Compare commits

...

348 Commits

Author SHA1 Message Date
eball
573d60c071 Update middleware-operator image version to 0.2.28 2025-12-09 16:44:47 +08:00
eball
0b5f927034 tapr: fix reconciling kvrocks creating event bug 2025-12-09 16:30:38 +08:00
eball
605b862937 opa: update image restriction to include docker.io prefix for beclab (#2172)
fix: update image restriction to include docker.io prefix for beclab
2025-12-08 21:38:32 +08:00
hysyeah
0110413528 tapr: kvrocks upgrade (#2173) 2025-12-08 21:32:59 +08:00
eball
0726d70b58 systemserver: remove default rbac authz 2025-12-08 16:37:12 +08:00
simon
8abf6d8b65 download-server: feat add download file remove api (#2168)
download server
2025-12-07 23:12:08 +08:00
salt
b0f495c37a feat: optimize highlight (#2167)
Co-authored-by: ubuntu <you@example.com>
2025-12-06 14:51:52 +08:00
wiy
4e9b8d840d feat(olares-app): update olares new version to v1.6.16 (#2166) 2025-12-05 23:41:42 +08:00
salt
57579813de feat: search scope change (#2159)
* Update search3-validation image to v0.0.80

* Update search3 and search3monitor images to v0.0.80

* Change LOG_FILE value to string 'true'

* Update search3-validation image version to v0.0.81

* Update search3 and search3monitor images to v0.0.81

* Update search3 and search3monitor images to v0.0.83

* Update search3-validation image to v0.0.83
2025-12-05 23:41:15 +08:00
hysyeah
97dd238c44 tapr: new middleware list api (#2165)
* tapr: new middleware list api

* Update middleware operator image version to 0.2.25

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-12-05 22:49:29 +08:00
eball
3095530d0d opa: add untrusted image policy (#2135)
* feat(opa): add untrusted image check and update webhook configuration

* fix: add separator before untrusted pod check ConfigMap

* fix: remove specific image checks from untrusted pod validation

* fix: remove specific image checks from untrusted pod validation

* feat: add priority class and node affinity for OPA deployment
2025-12-05 20:20:03 +08:00
dkeven
3e8120baf6 chore: clean up binary of module app-service (#2164) 2025-12-05 18:54:33 +08:00
eball
0685c4326b ci: update workflow triggers for linting and building to include specific paths (#2162)
* ci: update workflow triggers for linting and building to include specific paths

* ci: rename workflow to clarify purpose as App-Service Build test

* chore(ci): specify the path context when building for appservice

---------

Co-authored-by: dkeven <dkvvven@gmail.com>
2025-12-05 16:32:36 +08:00
dkeven
af9e1993d1 refactor: integrate app service into main repo (#2156)
* refactor: integrate app service into main repo

* Delete framework/app-service/LICENSE.md

* fix(manifest): remove unused manager deploy file

* refactor: change the output dir of CRDs to the standard path

---------

Co-authored-by: Peng Peng <billpengpeng@gmail.com>
2025-12-05 11:21:36 +08:00
eball
ba8868d771 tapr: add middleware label to nats deployment (#2160)
fix: update app-service image version to 0.4.54 and add middleware label to nats deployment
2025-12-05 00:17:24 +08:00
wiy
7ee1d7cae1 feat: update system-frontend version to v1.6.15 (#2158)
* feat: update system frontend version to v1.6.15

* feat: update login version to v1.6.15

---------

Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
2025-12-05 00:16:58 +08:00
hysyeah
cb17633f57 authelia,lldap: firstfactor return more clear message (#2157) 2025-12-05 00:16:24 +08:00
eball
18e94af22b opa: enhance validating webhook with namespace selectors (#2154)
Added namespace selectors to validating webhook configuration to exclude specific namespaces.
2025-12-04 16:31:24 +08:00
dkeven
b81665afe1 fix(ci): use non-slash separator in sed fo repo path (#2153) 2025-12-04 16:30:49 +08:00
berg
acb0fae406 settings: add env remoteOptions api, search rebuild api and bug fix. (#2152)
feat: update system-frontend and user-service version
2025-12-04 00:21:40 +08:00
hysyeah
e5fef95f4e node-exporter: fix disk scan open error (#2151) 2025-12-04 00:21:01 +08:00
dkeven
55fe22ed4c feat(app-service): add API to proxy remote options url (#2150) 2025-12-04 00:20:08 +08:00
eball
fee742d756 systemserver: combine system providers into one provider pod (#2149)
* feat: combine system provider configurations into a single deployment file

* feat: add auth-provider-nginx-config to system provider deployment

* feat: add auth-provider-nginx-config to system provider deployment
2025-12-04 00:19:28 +08:00
eball
36b4e792f6 daemon: add non-interactive flags to disk extend commands (#2148)
daemon: add nointeractive flags to disk extend commands
2025-12-04 00:19:12 +08:00
dkeven
8810a7657e feat(ci): distinguish different vendor in build & install script (#2147) 2025-12-04 00:18:56 +08:00
salt
59d87c860b feat: search3 add rebuild index api (#2146)
* feat: search3 add rebuild index api

* feat: add share url for FileParamShare

---------

Co-authored-by: ubuntu <you@example.com>
2025-12-04 00:18:28 +08:00
Yajing
8cda14a78c docs: add develop in a dev container using studio (#2141) 2025-12-03 16:07:20 +08:00
Yajing
a4c0161cb1 Apply suggestions
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-12-03 16:03:00 +08:00
eball
505a438fa3 fix: Update fsnotify_daemon.yaml (#2145) 2025-12-03 11:06:07 +08:00
wiy
1a794c9fc4 feat(olares-app): update olares-app version to v1.6.13 (#2144)
* feat: update system-frontend version to v1.6.13, market to v0.6.4 and chart repo to v0.6.4

* files: support internal sharing of external and cache

* feat(user-service): update desktop search

---------

Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
Co-authored-by: aby913 <aby913@163.com>
2025-12-02 23:41:53 +08:00
eball
03e8dd0ac7 app-service, bfl: add a title to the shared entrance (#2143) 2025-12-02 23:41:04 +08:00
eball
eea2dfb67a download, search: add download and search3 provider configurations (#2140)
feat: add download and search3 provider configurations
2025-12-02 23:40:37 +08:00
dkeven
316ffe4f35 fix(gpu): add precheck and disable op for nouveau kernel module (#2139) 2025-12-02 23:40:09 +08:00
eball
08a380df61 jfsnotify: update jfsnotify daemon volume configurations (#2138) 2025-12-02 23:39:07 +08:00
wangyajing
58e869604a add screenshots 2025-12-02 18:18:34 +08:00
wangyajing
a61dff75b9 docs: add develop in a dev container using studio 2025-12-02 17:11:30 +08:00
hysyeah
0b9c1a09b9 fix: clone app upgrade (#2137) 2025-12-01 23:52:02 +08:00
Yajing
3178e06349 docs: add ace-step tutorial (#2113) 2025-12-01 21:28:22 +08:00
Meow33
69c341060b Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-12-01 21:26:23 +08:00
Meow33
d56daad3f0 docs: apply suggestions 2025-12-01 21:21:21 +08:00
Meow33
2b239284b3 Apply suggestions from code review
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-12-01 19:38:44 +08:00
Meow33
e2e8b84eef docs: replace screenshots 2025-12-01 19:37:58 +08:00
Meow33
7afb59cd3a Apply suggestions from code review
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-12-01 19:36:12 +08:00
salt
6474487e75 fix: multiple node monitor setting webhook call fail (#2136)
Co-authored-by: ubuntu <you@example.com>
2025-12-01 17:48:24 +08:00
Yajing
3fd15d418b docs: update OlaresManifest example yaml (#2121) 2025-12-01 14:03:53 +08:00
hysyeah
243ad15e66 app-service: fix shared gpu inejct,ns label,shared entrance (#2134)
* app-service: fix shared gpu inejct,ns label,shared entrance

* fix: envoy outbound websocket
2025-11-29 00:02:15 +08:00
eball
56367c964e daemon: skip owner check if not installed successfully (#2133) 2025-11-29 00:00:53 +08:00
salt
8911b33d3e fix: process history data resource url without protocol (#2132)
Co-authored-by: ubuntu <you@example.com>
2025-11-29 00:00:16 +08:00
dkeven
f7c7939493 feat(gpu): clear GPU bindings of uninstalled App (#2129) 2025-11-28 23:59:45 +08:00
dkeven
8eee97f779 chore(cli): optimize error messages for some prechecks (#2128) 2025-11-28 23:59:15 +08:00
Meow33
d3c1a37378 docs: add prerequisites and modify expressions 2025-11-28 13:53:03 +08:00
Meow33
4a8303d050 docs: update LarePass link (#2106) 2025-11-28 13:09:09 +08:00
Power-One-2025
61df0056ba docs/revert changes to existing package-lock.json 2025-11-28 12:03:02 +08:00
Meow33
75c48ef5ee Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:08:34 +08:00
Meow33
4fed6bd618 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:08:06 +08:00
Meow33
581e252f30 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:07:53 +08:00
Meow33
f1d479cf1d Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:07:39 +08:00
Meow33
d070e53480 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:07:23 +08:00
Meow33
89719a8d48 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:07:06 +08:00
Meow33
085bef64b5 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:06:45 +08:00
Meow33
963ca8ab48 Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:06:06 +08:00
Meow33
59922bc5cf Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:04:25 +08:00
Meow33
1f4b3f94ca Update docs/use-cases/ace-step.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-28 11:04:11 +08:00
simon
aa9e89c0c9 download-server: fix resume task bug (#2126)
download server v0.1.14
2025-11-27 23:56:19 +08:00
aby913
760aef5521 backup: fix bufio.Scanner token too long (#2125) 2025-11-27 23:55:51 +08:00
aby913
ca1d7ebd09 cli: windows username contains spaces (#2124)
fix: windows username contains spaces
2025-11-27 23:55:23 +08:00
berg
a282878cfe market: update market permission v2 (#2123)
feat: update market permission v2
2025-11-27 23:54:54 +08:00
hysyeah
95ad815142 app-service: add gpu memory size check (#2122) 2025-11-27 23:54:23 +08:00
Meow33
984582c520 Update docs/developer/develop/package/manifest.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-27 21:19:34 +08:00
Meow33
d10e6f0e20 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-27 21:19:24 +08:00
Meow33
0db6227f98 Update docs/developer/develop/package/manifest.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-27 21:19:15 +08:00
Meow33
46aa153989 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-27 21:19:07 +08:00
Meow33
3cfd619d9d docs: update olaresmanifest example yaml 2025-11-27 17:19:54 +08:00
salt
82e3d7d2d4 fix: missing POD_NAME, POD_NAMESPACE (#2117)
* fix: missing POD_NAME, POD_NAMESPACE

* feat(cli): switch to NVIDIA runfile to install/upgrade GPU driver (#2116)

* opa: ignore validating opa pod itself (#2118)

* opa: ignore validating opa pod itself

* opa: add uid to response in decision logic

* opa: add apiVersion and kind to admission review response

---------

Co-authored-by: ubuntu <you@example.com>
Co-authored-by: dkeven <82354774+dkeven@users.noreply.github.com>
Co-authored-by: eball <liuy102@hotmail.com>
2025-11-27 16:54:35 +08:00
Yajing
9188718cb6 docs: update wording in Steam Headless tutorial (#2119) 2025-11-27 16:54:16 +08:00
eball
7f27a03e84 opa: ignore validating opa pod itself (#2118)
* opa: ignore validating opa pod itself

* opa: add uid to response in decision logic

* opa: add apiVersion and kind to admission review response
2025-11-27 16:19:56 +08:00
Meow33
202a17dd6f docs: update wording 2025-11-27 15:31:24 +08:00
dkeven
fe6817ff78 feat(cli): switch to NVIDIA runfile to install/upgrade GPU driver (#2116) 2025-11-27 15:15:58 +08:00
eball
3991bc2e08 opa: add opa based admission control to Olares (#2110)
* opa: add opa based admission control to Olares

* fix(deployment): add selector to opa deployment spec

* fix(deployment): update Deployment API version to apps/v1

* fix(deployment): remove insecure address option from OPA deployment

* fix(deployment): update OPA image version and adjust service port configuration

* fix(deployment): add debug logging and enable policies in OPA deployment
2025-11-27 01:13:53 +08:00
aby913
c84e4deded files: fix cloud video playback, support external and cache sharing (#2112)
* files: fix cloud video playback, support external and cache sharing

* feat(olares-app): update olares-app version to v1.6.9

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-11-27 01:01:54 +08:00
aby913
3a19d380f3 backup(fix): app backup files path invalid (#2111) 2025-11-27 01:01:27 +08:00
hysyeah
21cf7466ee app-service,hami: hardware info inject to values (#2108)
* app-service,hami: hardware info inject to values

* copy embed files
2025-11-27 01:00:08 +08:00
salt
9a0db453d3 feat:add get include directory and get exclude pattern rest api (#2107)
Co-authored-by: ubuntu <you@example.com>
2025-11-27 00:59:39 +08:00
Meow33
3021a88e70 Merge branch 'main' into docs/add-ace-step-tutorial 2025-11-26 22:03:01 +08:00
Meow33
232c277412 docs: add user guide for ace-step 2025-11-26 21:59:21 +08:00
Power-One-2025
d5e0523c6a Update README_CN.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-26 16:21:05 +08:00
salt
03641fb388 feat: add crd setting for search3 and support full content search (#2105)
* feat: add search3_validation yaml

* feat: add more crd

* fix: remove repeated namespace

---------

Co-authored-by: ubuntu <you@example.com>
2025-11-26 15:20:12 +08:00
Power-One-2025
023208603c docs/delete package-lock.json 2025-11-26 14:56:54 +08:00
Power-One-2025
21d10c37b3 update LarePass link 2025-11-26 12:13:08 +08:00
eball
5be2c61091 hami: bump hami version to v2.6.4 (#2104) 2025-11-25 23:44:29 +08:00
aby913
da12178933 backup: adjust backup policy update response data structure (#2103) 2025-11-25 23:10:09 +08:00
aby913
b6484e1a19 files(fix): sync share improve (#2102)
* files(fix): sync share improve

* feat: update olares-app to v1.8.8

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-11-25 23:09:39 +08:00
eball
206c946408 app-service, tapr, bfl: add shared entrance url api and fix some bugs (#2101) 2025-11-25 21:12:57 +08:00
Meow33
c57c67db24 docs: update 0.10.0 changes for OlaresManifest.yaml (#2094) 2025-11-25 15:05:22 +08:00
Yajing
1ed26c8264 docs: update macOS Chrome local access instructions (#2097) 2025-11-25 15:03:08 +08:00
Meow33
18ece294ce Update docs/manual/larepass/private-network.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-25 14:59:00 +08:00
Yajing
2f44ae273f docs: add duix.avatar tutorial (#2095) 2025-11-25 14:27:48 +08:00
aby913
a6457f0a2a files(fix): sync shared improve (#2099)
* files(fix): sync share improve

* feat: update olares app version to v1.6.7

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-11-24 23:55:25 +08:00
eball
3f6bc2bf36 app-service, tapr: implement shared entrances (#2098)
* app-service, tapr: implement shared entrances

* Update app-service image version to 0.4.46
2025-11-24 23:54:06 +08:00
Yajing
f7248a1c74 Apply suggestions
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-24 22:36:39 +08:00
Meow33
54fc939ea3 docs: reset image size and info title 2025-11-24 16:16:38 +08:00
Meow33
420bb1d805 docs: add extra info and screenshot 2025-11-24 15:46:58 +08:00
wangyajing
39c0d2c777 update curl command for json file 2025-11-24 15:40:50 +08:00
wangyajing
d8e3a64b61 add screenshots 2025-11-24 15:40:50 +08:00
wangyajing
78dbda300b docs: add duix.avatar tutorial 2025-11-24 15:40:47 +08:00
Meow33
16440bc3c5 docs: Update macOS Chrome local access instructions 2025-11-24 13:29:36 +08:00
wiy
f5b8d226c9 feat(olares-app): update version to v1.6.6 (#2096)
* feat(olares-app): update version to v1.6.6

* fix: file uploads under sync shares

---------

Co-authored-by: aby913 <aby913@163.com>
2025-11-21 00:00:25 +08:00
RiddleMe
a80142cdd7 add middleware description 2025-11-20 22:08:10 +08:00
Teng
e69364d329 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:37:15 +08:00
Teng
6facfd93ee Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:37:07 +08:00
Teng
7e9b0bcdc5 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:36:54 +08:00
Teng
bb461e8573 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:36:31 +08:00
Teng
926058cbd0 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:36:19 +08:00
Teng
44d56f64e1 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:36:09 +08:00
Teng
8074e7dee9 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:35:57 +08:00
Teng
67af7ee3fa Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:35:45 +08:00
Teng
e6b3624bae Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:35:34 +08:00
Teng
c27c8a61f1 Update docs/zh/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:35:26 +08:00
Teng
79e6d4b6e6 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:35:13 +08:00
Teng
ea15f6d04b Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:34:57 +08:00
Teng
dffcafbfd2 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:34:02 +08:00
Teng
e30afb517b Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:33:40 +08:00
Teng
97a701c7e4 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:33:26 +08:00
Teng
24c68ada0b Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:33:13 +08:00
Teng
ec5358f9b0 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:32:38 +08:00
Teng
03bb1ab2b8 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:32:28 +08:00
Teng
d5754b8977 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:32:10 +08:00
Teng
8017975124 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:31:56 +08:00
Teng
66b77ed5a1 Update docs/developer/develop/package/manifest.md
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-20 21:31:17 +08:00
Yajing
b990d50b01 docs: add the en version of Windows user guide (#2086) 2025-11-20 21:12:34 +08:00
Yajing
f1890e304b docs: fix typos discovered in Nov 2025 (#2093) 2025-11-20 21:03:31 +08:00
Meow33
587ea07a61 Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 20:17:03 +08:00
Meow33
e185931214 Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 20:16:44 +08:00
Meow33
78fe2b29d2 Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 20:15:58 +08:00
Meow33
9fc92b4f32 docs: changes made based on suggestions 2025-11-20 19:20:23 +08:00
Meow33
d33a8b7d31 Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:10:16 +08:00
Meow33
825a05b02f Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:10:04 +08:00
Meow33
6aa9b08b63 Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:08:59 +08:00
Meow33
dcb2505c8e Update docs/use-cases/windows.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:08:17 +08:00
Meow33
4917a2d2ab Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:06:17 +08:00
Meow33
aba1d3336d Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:05:52 +08:00
Meow33
7c2c68e03b Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-20 19:05:35 +08:00
RiddleMe
ff30a31748 update change for 0.10.0 2025-11-20 18:16:22 +08:00
Meow33
3d8d351996 docs: fix typos 2025-11-20 17:51:38 +08:00
Meow33
eea8f607fa Update en.ts 2025-11-20 17:40:58 +08:00
Yajing
d3f357eb13 docs: fix reference issue in organize content (#2092) 2025-11-20 17:32:50 +08:00
Meow33
e19ef85071 docs: fix inference issue in organize content 2025-11-20 17:22:13 +08:00
dkeven
1e7cc5b6ad fix(manifest): handle the case of present secret with missing key (#2091) 2025-11-20 15:00:17 +08:00
Meow33
6e4c27136a Merge branch 'main' into docs/add-run-windows-vm 2025-11-20 11:28:40 +08:00
Meow33
afb1e5b9f7 Merge branch 'docs/add-run-windows-vm' of https://github.com/Meow33/Olares into docs/add-run-windows-vm 2025-11-20 11:09:03 +08:00
Meow33
ed90b16fd3 docs: resolved comments 2025-11-20 11:09:00 +08:00
aby913
2901fcfd24 files: media integration, share bug fix (#2090)
* files: media-server integrate, share videos play, bug fixs

* files: media-server integrate, share videos play, bug fixs

* share: fix some bugs
notification: add apps stop reason

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-11-20 00:05:50 +08:00
hysyeah
c918459a8e app-serivce: push event add title, stop reason field (#2089) 2025-11-20 00:04:43 +08:00
eball
9d3c560648 authelia: add policy for probe validating (#2088) 2025-11-20 00:03:51 +08:00
dkeven
c901c54716 chore(cli): merge env for nvidia repo mirror with cdn mirror (#2087) 2025-11-19 21:16:28 +08:00
Meow33
d925999a70 docs: add deerflow tutorial and update Ollama tutorial (#2082) 2025-11-19 21:00:22 +08:00
Meow33
aa5aa78677 Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-19 20:31:20 +08:00
Meow33
fd37490fcd Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-19 17:48:24 +08:00
Meow33
d55fb76a71 Apply suggestion from @fnalways
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-19 17:30:25 +08:00
Meow33
ba3954dc0f docs: add windows in use case index 2025-11-19 15:45:45 +08:00
Meow33
faf20cdf0b docs: add user case for windows vm 2025-11-19 15:24:55 +08:00
dkeven
6321909582 feat(upgrade): upgrade l4bflproxy to v0.3.9 (#2084) 2025-11-19 00:31:56 +08:00
eball
355f7c4e69 coredns,bfl,l4: resolving domain to cluster ip in pods (#2085) 2025-11-19 00:24:30 +08:00
dkeven
2c3c949bc9 feat(gpu): add an API to switch GPUBindings in bulk for app (#2083) 2025-11-18 23:39:13 +08:00
Yajing
babf756bd5 Update docs/use-cases/ollama.md 2025-11-18 21:51:10 +08:00
Yajing
c341e22f76 Apply suggestions
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-18 21:49:57 +08:00
wangyajing
0a0e52dd3d add deerflow tutorial 2025-11-18 20:10:32 +08:00
Meow33
081b4064a1 docs: add studio tutorial (#2064) 2025-11-18 13:25:29 +08:00
Yajing
9a224ea780 docs: update the en version of Steam user guide (#2070) 2025-11-18 13:24:31 +08:00
Yajing
ab3a6ba34e docs: remove prompts from use docker compose (#2081) 2025-11-18 11:57:05 +08:00
Meow33
2ec8300663 docs: remove prompts from the doc 2025-11-18 11:52:59 +08:00
Meow33
8762f26c04 docs: change expression 2025-11-18 11:13:35 +08:00
Meow33
65e50afd27 Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-18 11:11:09 +08:00
hysyeah
aff0b38c0b fix: set priority for ks,node-exporter,prometheus (#2079)
* fix: set priority for ks,node-exporter,prometheus

* fix: add priority class for download
2025-11-17 23:54:55 +08:00
eball
fefd635f6c cli: add disk management commands for extending and listing unmounted disks (#2078)
* feat: lvm commands

* feat: add disk management commands for extending and listing unmounted disks
2025-11-17 23:54:15 +08:00
wangyajing
a8b410a0da reorganize topics for better readability 2025-11-17 23:24:58 +08:00
Meow33
841b5229e6 Merge branch 'docs/update-user-guide-for-Steam' of https://github.com/Meow33/Olares into docs/update-user-guide-for-Steam 2025-11-17 21:14:39 +08:00
Meow33
89421058bc docs: fix errors 2025-11-17 21:14:09 +08:00
Meow33
4d5f69e9dc Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 21:13:23 +08:00
Yajing
8cb7ee6aad docs: update perplexica tutorial (#2071) 2025-11-17 21:11:28 +08:00
Yajing
ab62c06d07 Apply suggestions
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-17 21:06:31 +08:00
Yajing
d85c81ff57 Apply suggestions
Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-17 21:04:20 +08:00
Meow33
94d07adf9c Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 21:02:29 +08:00
Meow33
3eeefb18c2 docs: refined structure 2025-11-17 20:54:56 +08:00
Meow33
34b58757ec Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:14:43 +08:00
Meow33
0df243184c Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:14:30 +08:00
Meow33
99420a8a48 Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:14:07 +08:00
Meow33
b013bf6ea9 Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:13:46 +08:00
Meow33
1bedb4d182 Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:13:30 +08:00
Meow33
f844d1221e Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:13:19 +08:00
Meow33
7950d1be7d Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:13:09 +08:00
Meow33
ffdeb91dcd Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:12:53 +08:00
Meow33
a356b13d5a Update docs/use-cases/stream-game.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-17 20:11:31 +08:00
wangyajing
db61f05fb6 update perplexica 2025-11-14 22:52:00 +08:00
Meow33
26937ab505 docs: update the en version of Steam user guide 2025-11-14 22:15:09 +08:00
hysyeah
3dc2132e72 olares: change cadvisor scrape interval (#2068)
olares change cadvisor scrape interval
2025-11-14 21:14:13 +08:00
dkeven
b50f2bbf6c feat(upgrade): upgrade l4bflproxy to v0.3.8 (#2066)
* feat(upgrade): upgrade l4bflproxy to v0.3.7

* feat(upgrade): update L4BFLProxy version to v0.3.8

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-11-14 21:13:30 +08:00
wangyajing
16a0a5556d fix dead link 2025-11-14 11:06:36 +08:00
aby913
32166687ec files: paste files across users for internal sharing (#2063)
* files: copy files across users for internal sharing

* feat: update olares-app version to 1.6.2

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-11-14 00:22:22 +08:00
0x7fffff92
db3498e0a0 fix: video transcoding continuously consumes cpu (#2062)
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-11-14 00:21:27 +08:00
Yajing
2dc70ede78 docs: add github trending badge and update olares intro (#2065)
add github trending badge and update olares intro
2025-11-13 23:04:19 +08:00
wangyajing
694f385d2b add studio en tutorial 2025-11-13 22:37:20 +08:00
eball
407c126419 bfl: fix vpn mode policy bug 2025-11-13 19:04:10 +08:00
aby913
18746c917e files: rebuild the samba image (#2061) 2025-11-13 14:01:29 +08:00
eball
01324970b4 daemon: Implement DSR Proxy for handling DNS requests and responses (#2057)
* daemon: Implement DSR Proxy for handling DNS requests and responses

* fix: update DSR proxy logging and improve DNS pod configuration handling

* fix: update sys-event deployment to include additional permissions and bump image version

* fix: update install step to include pcap-devel package

* fix: correct spelling in install step for udev-devel and pcap-devel

* fix: refactor DSRProxy implementation for better clarity and organization

* fix: build arm64

* fix: update sys-event image version to 0.2.12

---------

Co-authored-by: liuyu <>
2025-11-13 11:59:46 +08:00
simon
b068669c3c download-server: fix format id bug (#2059)
download
2025-11-13 00:46:20 +08:00
wiy
bc134283d9 feat(olares-app): add share application (#2058)
* files: share

* feat: update olares-app version to 1.6.1

---------

Co-authored-by: aby913 <aby913@163.com>
2025-11-13 00:45:50 +08:00
dkeven
9f3a0f3c32 feat(cli): expand NodePort range to allow sharing SMB service (#2056) 2025-11-13 00:45:19 +08:00
hysyeah
ca1ab3fef9 app-service: support specify pod that need inject outbound envoy sidecar (#2055)
app-service: support specify pod that need inject outbound envoy sidecar by pod selector
2025-11-13 00:44:36 +08:00
aby913
b6394cc39c integration: rename field expirationDate to expires (#2053) 2025-11-13 00:44:05 +08:00
eball
36915f5f03 Add libpcap-dev to udev-devel installation 2025-11-12 23:48:38 +08:00
hysyeah
1ad305f874 app-service: fix app clone version select (#2052) 2025-11-11 23:46:45 +08:00
dkeven
58cdd7de69 chore(cli): use preferred nvidia driver meta pkg name (#2051) 2025-11-11 21:13:05 +08:00
Yajing
4cee006a1e docs: update the en version of Jellyfin user guide (#2050) 2025-11-11 20:20:12 +08:00
Meow33
7bbc53bef9 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:16:18 +08:00
Meow33
1432168ec0 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:15:58 +08:00
Meow33
534ae8dd3a Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:15:20 +08:00
Meow33
0a25611cf5 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:01:52 +08:00
Meow33
17990b3558 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:01:32 +08:00
Meow33
cb80d04265 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:01:12 +08:00
Meow33
0194a493ab Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:00:55 +08:00
Meow33
06e49cb638 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:00:47 +08:00
Meow33
93dea60906 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:00:36 +08:00
Meow33
177f955a6b Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 20:00:11 +08:00
Meow33
324a0b4071 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 19:59:41 +08:00
Meow33
132d6432cc Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 19:59:02 +08:00
Meow33
4c51efb0b7 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 19:58:46 +08:00
Meow33
8f0f2e5844 Update docs/use-cases/stream-media.md
Co-authored-by: Yajing <110797546+fnalways@users.noreply.github.com>
2025-11-11 19:58:13 +08:00
Meow33
0ae1524682 docs:update en version of Jellyfin user guide 2025-11-11 18:03:35 +08:00
dkeven
b24ba06794 feat(app-service): add regex validation to env CRD (#2049) 2025-11-10 22:38:55 +08:00
hysyeah
ec6ce88e08 app-service,tapr: support app clone;es,minio prefix bucket,index manager (#2047) 2025-11-08 01:36:41 +08:00
Yajing
7839bed160 docs: refactor manage olares section and fix incorrect descriptions (#2046)
* docs: refactor manage olares section and fix incorrect descriptions

* Apply suggestions from code review

Co-authored-by: Meow33 <supermonkey03@163.com>

* Apply comment

* Apply suggestions from code review

Co-authored-by: Meow33 <supermonkey03@163.com>

---------

Co-authored-by: Meow33 <supermonkey03@163.com>
2025-11-07 23:01:23 +08:00
Meow33
39d3689d01 docs: update iso download link and remove concept from index (#2044) 2025-11-06 22:39:55 +08:00
Yajing
ef347ff8ef docs: update mirrors and cdn urls and hide Windows Docker installation guide (#2041) 2025-11-06 21:29:30 +08:00
eball
908629dd9a daemon: access local domain via proxy protocol (#2043) 2025-11-06 18:25:01 +08:00
dkeven
4cea6ab238 chore(manifest): lift GPU split count limit in timeslicing mode (#2042) 2025-11-06 17:54:41 +08:00
aby913
a0e8a69848 fix: wsl shutdown command not working on newer WSL versions (#2040) 2025-11-06 17:54:13 +08:00
hysyeah
df2b5b4274 authelia: fix ttlcache delete panic may due to some concurrency (#2039) 2025-11-06 17:53:24 +08:00
Yajing
f18d3af3b4 docs: update screenshot for cookie management and fix minor errors (#2033) 2025-11-06 16:49:27 +08:00
Meow33
b4a447b596 docs: update GPU mode descriptions (#1903) 2025-11-06 16:42:39 +08:00
Meow33
d329630509 docs: update mirrors and CDN URLs; hide Windows Docker installation section 2025-11-06 15:49:39 +08:00
yyh
1af84b046d chore: update olares-app and user-service version (#2038) 2025-11-05 21:27:50 +08:00
eball
84e8543309 authelia: improve cidr validation for remote ips in cloud environments (#2037)
* authelia: improve cidr validation for remote ips in cloud environments

* Update auth image version to 0.2.39
2025-11-05 20:42:54 +08:00
eball
09f7ecd295 infisical: add mutex lock for workspace creation (#2036) 2025-11-05 14:54:12 +08:00
salt
1a8dbf0f2c fix: wait drive, cache root directory create successfully (#2035)
Co-authored-by: ubuntu <you@example.com>
2025-11-05 11:34:35 +08:00
berg
3f1e695581 system frontend, user service: update system frontend and user service (#2034)
* feat: update system frontend and user service

* fix: change version
2025-11-05 00:22:54 +08:00
Meow33
8881503ca6 docs: fix minor errors 2025-11-04 20:42:05 +08:00
Meow33
317da8a13e Revert "docs:update screenshot and fix minor errors"
This reverts commit 6d5c2a5e2b.
2025-11-04 20:37:31 +08:00
berg
316d719d64 feat: update system frontend to v1.5.24 (#2032) 2025-11-04 19:28:28 +08:00
hysyeah
01e1b79674 app-service: skip entrance check if set skip filed true (#2031) 2025-11-04 19:28:05 +08:00
eball
9b7ff997b9 daemon: add local domain pattern as a host alias (#2030)
* fix: update zeroconf dependency to v0.2.2 and add host alias functionality

* fix: enhance intranet request handling for host patterns
2025-11-04 19:27:41 +08:00
Meow33
6d5c2a5e2b docs:update screenshot and fix minor errors 2025-11-04 17:53:39 +08:00
dkeven
d0185a484f feat(app-service): add APIs to batch update userenv & sysenv (#2029) 2025-11-04 00:43:42 +08:00
dkeven
aadacbf729 chore(cli): remove any left vgpu lock file (#2028) 2025-11-04 00:43:15 +08:00
wiy
86290d1ce9 feat(olares-app): update system-frontend new version (#2027) 2025-11-03 17:23:19 +08:00
berg
d5ddd59997 system frontend, user service: update system frontend to v1.5.21 and user-service to v0.0.66 (#2026)
feat: update system frontend to v1.5.21 and user-service to v0.0.66
2025-10-31 23:20:57 +08:00
dkeven
64883f1752 app-service: fix middleware netpol; rollback env schema (#2024) 2025-10-31 19:45:00 +08:00
dkeven
ef0b8d3180 fix(daemon): avoid concurrent execution of uninstall and change-ip (#2025) 2025-10-31 19:44:13 +08:00
Calvin W.
101379e6ba docs: add local URL for Olares access doc (#1995) 2025-10-31 15:59:19 +08:00
Calvin W.
80947af962 docs: add Set system environment variables in settings (#2004) 2025-10-31 13:55:31 +08:00
Calvin W.
9ebb80a111 docs: add Olares One specific operations across topics (#1978) 2025-10-31 13:55:06 +08:00
Calvin W.
37e99b977c docs: update docs about gpu passthrough and installation in PVE (#1986) 2025-10-31 13:52:41 +08:00
Calvin W.
dcbc505e7a docs: update FRP setting and related activation process (#1987) 2025-10-31 13:52:25 +08:00
Calvin W.
9f518d6c4b docs: add cookie management in Integrations (#2003) 2025-10-31 13:52:02 +08:00
Calvin W.
6f88df0570 docs: add feature comparison table for LarePass (#2009) 2025-10-31 13:51:47 +08:00
Calvin W.
f97c9521f3 docs: update screenshots for env variable setting (#2023) 2025-10-31 13:50:41 +08:00
cal-weng
61aa638be9 update screenshots 2025-10-31 13:45:19 +08:00
Calvin W.
6285359f31 docs: add documentation for the "user activate" CLI command (#1994) 2025-10-31 10:55:17 +08:00
eball
f72987d55f bfl: Update AUTHELIA_AUTH_URL in bfl_deploy.yaml (#2022) 2025-10-30 22:46:19 +08:00
berg
33292988bb system frontend: update system frontend to v1.5.19 (#2021)
* feat: update system frontend to v1.5.19

* feat: update vault-server version

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-10-30 22:45:56 +08:00
dkeven
261cd45535 feat(app-service): independent op & API for apply env (#2020) 2025-10-30 22:45:27 +08:00
hysyeah
f9994e7e88 app-service: fix set cookie with multi set-cookie in headers (#2018) 2025-10-30 19:27:57 +08:00
Calvin W.
b0ecfefa09 docs: update related docs for env var support (#2019) 2025-10-30 18:13:58 +08:00
cal-weng
e1e4528db6 update related docs 2025-10-30 17:32:13 +08:00
berg
6eecd514e4 system frontend: update system frontend to v1.5.18 (#2017)
feat: update system frontend to v1.5.18
2025-10-30 16:15:43 +08:00
dkeven
5b4464533b refactor(app-service): change Env CRD schema for future i18n (#2016) 2025-10-30 16:15:18 +08:00
eball
62233642ad daemon: improve error handling in CheckCurrentStatus function (#2015)
fix: improve error handling in CheckCurrentStatus function
2025-10-30 00:09:53 +08:00
cal-weng
26910b80b9 resolve comments 2025-10-29 19:37:55 +08:00
Meow33
306c7a2480 docs: update content 2025-10-29 19:28:53 +08:00
berg
d26f4f1ac2 system frontend: update system frontend to v1.5.16 (#2014)
feat: update system frontend to v1.5.16
2025-10-29 19:03:33 +08:00
dkeven
1509ab6435 feat(daemon): unified node disk size between olaresd and dashboard (#2013) 2025-10-29 19:03:10 +08:00
dkeven
df0fcb1801 chore(manifests): add default values for some user envs (#2012) 2025-10-29 19:02:41 +08:00
aby913
359a269e88 integration(fix): add user suffix to cookie (#2011) 2025-10-29 19:02:05 +08:00
dkeven
f621aeef54 feat(daemon): ensure dockerhub mirror in sysenv at the first (#2010) 2025-10-29 19:01:34 +08:00
cal-weng
10ce9b44fc add note on multi-GPU and improve accuracy 2025-10-29 17:16:55 +08:00
Meow33
6d5e66b73b docs: update doc based on feedback 2025-10-29 15:27:00 +08:00
cal-weng
2f701510e0 update support fact 2025-10-29 14:50:19 +08:00
cal-weng
ec38cbd285 fix case 2025-10-29 14:38:37 +08:00
cal-weng
640d8c1bf4 docs: add feature comparison table for LarePass 2025-10-29 14:33:41 +08:00
Meow33
c570cf8fc2 docs: update documentation based on review comments 2025-10-29 13:16:17 +08:00
Calvin W.
9e18f11822 Update docs/zh/manual/get-started/activate-olares.md 2025-10-29 11:54:56 +08:00
Meow33
121482528b docs: fix errors 2025-10-29 11:45:26 +08:00
Calvin W.
ac482bceae Update field names and relevant description (#1982) 2025-10-29 11:38:27 +08:00
Meow33
3692f5ed7d Merge branch 'docs/add-user-activate-cli-command' of https://github.com/beclab/Olares into docs/add-user-activate-cli-command 2025-10-29 11:27:39 +08:00
Meow33
ce32e32433 docs: improve formatting and fix minor errors 2025-10-29 11:27:36 +08:00
Meow33
fdeea2f4a1 Merge branch 'main' into docs/add-pve-gpu-passthrough-iso-install 2025-10-29 11:15:44 +08:00
Meow33
837aa2037f Merge branch 'main' into docs/add-user-activate-cli-command 2025-10-29 11:10:15 +08:00
berg
45065b03e3 system-frontend: update version to v1.5.15 (#2008)
feat: update system frontend to v1.5.15
2025-10-29 00:12:40 +08:00
Meow33
195f8c6ec7 docs: format the doc and add argument section 2025-10-28 21:18:35 +08:00
Sai
20202d1cdb feat: market support systemenv (#2006)
support systemenv
2025-10-28 20:33:06 +08:00
Meow33
e4d31241da docs: improve formatting and fix minor errors 2025-10-28 20:14:47 +08:00
Calvin W.
83dc24df94 docs: move concepts to developer doc (#1952) 2025-10-28 20:08:40 +08:00
dkeven
890eb8ea46 feat(cli): add upgrader for main release version 1.12.2 (#2005) 2025-10-28 18:27:07 +08:00
simon
d57f01f88b download: add file_type && modify aira2 percent calculate (#2002)
download 0.1.12
2025-10-28 17:08:33 +08:00
dkeven
3297f3088e feat(daemon): handle sysenv for dockerhub mirror endpoint (#2000) 2025-10-28 17:07:59 +08:00
Meow33
f34ab4d5ce docs: add set system environment variables for settings 2025-10-28 16:50:05 +08:00
cal-weng
2f775e098e docs: add cookie management in Integrations 2025-10-28 15:35:41 +08:00
eball
56600420f1 chore: update version to 1.12.3 in workflows and scripts (#2001) 2025-10-28 13:51:15 +08:00
Calvin W.
4e579bc934 Update docs/zh/manual/larepass/private-network.md 2025-10-28 11:49:52 +08:00
cal-weng
8571da9761 fix building error 2025-10-28 11:26:42 +08:00
aby913
0a591f7a3c fix: avoid glob parsing for special-char filenames (#1999) 2025-10-27 23:46:31 +08:00
berg
84dec294da system-frontend: update system-frontend to v1.5.14 (#1998)
feat: update system-frontend to v1.5.14
2025-10-27 23:46:04 +08:00
hysyeah
e3cb3e5a54 app-service: upgrade chart via appmgr controller in setup domain (#1997) 2025-10-27 23:45:25 +08:00
dkeven
9fb31d52b7 fix(daemon): handle LVM device when getting disksize (#1996) 2025-10-27 23:44:54 +08:00
cal-weng
5a7c8f539a fix error 2025-10-27 21:09:16 +08:00
cal-weng
9305b09717 docs: add local URL for Olares access doc 2025-10-27 19:43:07 +08:00
Meow33
25b2ff91af docs: add documentation for the "user activate" CLI command 2025-10-27 19:34:26 +08:00
eball
7f6091afb1 juicefs: bump version to v1.3.0 in Olares.yaml (#1993) 2025-10-27 18:52:31 +08:00
eball
fe3acf669e cli: fix some user activation bugs (#1992)
* fix(cli): update UserBindTerminus to return access token and adjust activation wizard call

* Update wizard.go

feat: ensure authUrl has worked

* Update wizard.go

* feat(cli): add reset password option to user activation command

* feat: add initializeAccount and upload mainvault

* fix: update UserBindTerminus to return access token and improve error handling in RunWizard

* feat: implement AES-GCM encryption in encryptAESGCM function and add necessary imports

* fix: improve account retrieval and error handling in Login and initializeAccount functions

* Update app.go

* feat: update

* fix: comment out TOTP initialization in Signup and adjust account retrieval in Login

---------

Co-authored-by: Peng Peng <billpengpeng@gmail.com>
2025-10-27 18:52:14 +08:00
dkeven
18950cc43b fix(bfl): use dynamical variable endpoint in cert manager (#1991) 2025-10-27 18:51:56 +08:00
cal-weng
d25bde12c3 add multiple cards for one app support and update GPU modes description 2025-10-27 15:30:45 +08:00
wiy
f0542c3ea5 feat(olares-app): update system-frontend version to v1.5.13 (#1990) 2025-10-25 00:35:04 +08:00
eball
70185da4a7 refactor: change the backend of JuiceFS notify daemon to inotify (#1989)
fix: update fsnotify daemon and proxy images to v0.1.4 and v0.1.11 respectively
2025-10-25 00:34:24 +08:00
hysyeah
1dc859f225 app-service: fix helm upgrade set recreate to false (#1988) 2025-10-25 00:33:46 +08:00
eball
7a84a51940 feat: refactor disk utility functions to improve disk size retrieval (#1985) 2025-10-25 00:33:14 +08:00
cal-weng
d5122fac17 docs: update FRP setting and related activation process 2025-10-24 21:20:47 +08:00
Meow33
36167790df Resolve merge conflicts and update internal links 2025-10-24 20:59:24 +08:00
Meow33
ad5e1328c5 Merge branch 'main' into docs/add-pve-gpu-passthrough-iso-install 2025-10-24 20:45:44 +08:00
Meow33
e2b8cf1cf2 update docs about gpu passthrough and installation in PVE 2025-10-24 16:32:29 +08:00
dkeven
6f8d9f15b2 fix(image-service): watch whole config dir to tolerate file removal (#1979) 2025-10-24 14:45:15 +08:00
cal-weng
64215b478f add anchor link to SSH password reset and fix format 2025-10-24 11:38:42 +08:00
hysyeah
f8faecdc36 app-sevice: fix upgrade chart context in setupdomain cause release failed (#1984) 2025-10-23 23:50:09 +08:00
wiy
656894e46a feat(olares-app): update system-frontend version to v1.5.12 (#1983) 2025-10-23 23:49:19 +08:00
aby913
3caaa6b63b files(fix): optimize the return value of the accounts query (#1981)
fix: optimize the return value of the accounts query
2025-10-23 23:48:49 +08:00
Sai
ad5acdbf1d fix: chartrepo support oci type in image manifest (#1980)
support oci type in image manifest
2025-10-23 23:47:15 +08:00
dkeven
24ef743d24 fix(cli): lazy load DID cache db upon invoke (#1977) 2025-10-23 23:45:33 +08:00
cal-weng
0e3e61afe3 fix links 2025-10-23 21:47:20 +08:00
cal-weng
de254bee66 fix links 2025-10-23 21:34:54 +08:00
Meow33
96f2aa5b30 Update field names and relevant description 2025-10-23 20:58:37 +08:00
cal-weng
f86c4e5e52 Add Olares One only badge for Fan panel 2025-10-23 20:48:53 +08:00
cal-weng
05c2fe8c35 add description for SSH password reset 2025-10-23 20:27:13 +08:00
Peng Peng
dcd8413dcf Revert "Update wizard.go"
This reverts commit b4b13b0aa9.
2025-10-23 19:34:05 +08:00
Peng Peng
b4b13b0aa9 Update wizard.go 2025-10-23 19:33:50 +08:00
cal-weng
d8d4b6d9f9 docs: update work mode and fan panel for Olares One 2025-10-23 15:33:40 +08:00
cal-weng
1305ffe910 docs: move concepts to developer doc 2025-10-20 13:37:27 +08:00
cal-weng
5a434b5b50 resolve comments 2025-10-11 21:45:15 +08:00
cal-weng
d8db9c458c update wording 2025-10-11 21:23:52 +08:00
cal-weng
861c5812b3 docs: update GPU mode descriptions to be more accurate 2025-10-11 15:06:45 +08:00
766 changed files with 62489 additions and 2216 deletions

View File

@@ -3,12 +3,28 @@ name: Lint and Test Charts
on:
push:
branches: [ "main", "release-*" ]
paths-ignore:
- 'docs/**'
paths:
- '!docs/**'
- 'apps/.olares/**'
- 'build/**'
- 'cli/**'
- 'daemon/**'
- 'framework/**/.olares/**'
- 'infrastructure/**/.olares/**'
- 'platform/**/.olares/**'
- 'vendor/**'
pull_request_target:
branches: [ "main", "release-*" ]
paths-ignore:
- 'docs/**'
paths:
- '!docs/**'
- 'apps/.olares/**'
- 'build/**'
- 'cli/**'
- 'daemon/**'
- 'framework/**/.olares/**'
- 'infrastructure/**/.olares/**'
- 'platform/**/.olares/**'
- 'vendor/**'
workflow_dispatch:
@@ -59,7 +75,7 @@ jobs:
steps:
- id: generate
run: |
v=1.12.2-$(echo $RANDOM$RANDOM)
v=1.12.3-$(echo $RANDOM$RANDOM)
echo "version=$v" >> "$GITHUB_OUTPUT"
upload-cli:

View File

@@ -0,0 +1,32 @@
name: App-Service Build test
on:
push:
branches:
- "module-appservice"
paths:
- 'framework/app-service/**'
- '!framework/app-service/.olares/**'
- '!framework/app-service/README.md'
- '!framework/app-service/PROJECT'
pull_request:
branches:
- "module-appservice"
paths:
- 'framework/app-service/**'
- '!framework/app-service/.olares/**'
- '!framework/app-service/README.md'
- '!framework/app-service/PROJECT'
jobs:
build0-main:
runs-on: ubuntu-latest
steps:
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y btrfs-progs libbtrfs-dev
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '1.24.6'
- run: make build
working-directory: framework/app-service

View File

@@ -0,0 +1,62 @@
name: Publish app-service to Dockerhub
on:
workflow_dispatch:
inputs:
tags:
description: 'Release Tags'
jobs:
publish_dockerhub_amd64:
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Build and push amd64 Docker image
uses: docker/build-push-action@v3
with:
push: true
tags: beclab/app-service:${{ github.event.inputs.tags }}-amd64
context: framework/app-service
file: framework/app-service/Dockerfile
platforms: linux/amd64
publish_dockerhub_arm64:
runs-on: self-hosted
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Build and push arm64 Docker image
uses: docker/build-push-action@v3
with:
push: true
tags: beclab/app-service:${{ github.event.inputs.tags }}-arm64
context: framework/app-service
file: framework/app-service/Dockerfile
platforms: linux/arm64
publish_manifest:
needs:
- publish_dockerhub_amd64
- publish_dockerhub_arm64
runs-on: ubuntu-latest
steps:
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Push manifest
run: |
docker manifest create beclab/app-service:${{ github.event.inputs.tags }} --amend beclab/app-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/app-service:${{ github.event.inputs.tags }}-arm64
docker manifest push beclab/app-service:${{ github.event.inputs.tags }}

View File

@@ -0,0 +1,63 @@
name: Publish image-service to Dockerhub
on:
workflow_dispatch:
inputs:
tags:
description: 'Release Tags'
jobs:
publish_dockerhub_amd64:
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Build and push amd64 Docker image
uses: docker/build-push-action@v3
with:
push: true
tags: beclab/image-service:${{ github.event.inputs.tags }}-amd64
context: framework/app-service
file: framework/app-service/Dockerfile.image
platforms: linux/amd64
publish_dockerhub_arm64:
runs-on: self-hosted
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Build and push arm64 Docker image
uses: docker/build-push-action@v3
with:
push: true
tags: beclab/image-service:${{ github.event.inputs.tags }}-arm64
context: framework/app-service
file: framework/app-service/Dockerfile.image
platforms: linux/arm64
publish_manifest:
needs:
- publish_dockerhub_amd64
- publish_dockerhub_arm64
runs-on: ubuntu-latest
steps:
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASS }}
- name: Push manifest
run: |
docker manifest create beclab/image-service:${{ github.event.inputs.tags }} --amend beclab/image-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/image-service:${{ github.event.inputs.tags }}-arm64
docker manifest push beclab/image-service:${{ github.event.inputs.tags }}

View File

@@ -44,9 +44,9 @@ jobs:
with:
go-version: 1.22.1
- name: install udev-devel
- name: install udev-devel and pcap-devel
run: |
sudo apt update && sudo apt install -y libudev-dev
sudo apt update && sudo apt install -y libudev-dev libpcap-dev
- name: Install x86_64 cross-compiler
run: sudo apt-get update && sudo apt-get install -y build-essential

View File

@@ -17,7 +17,7 @@ jobs:
steps:
- id: generate
run: |
v=1.12.2-$(date +"%Y%m%d")
v=1.12.3-$(date +"%Y%m%d")
echo "version=$v" >> "$GITHUB_OUTPUT"
release-id:

4
.gitignore vendored
View File

@@ -37,4 +37,6 @@ docs/.vitepress/dist/
docs/.vitepress/cache/
node_modules
.idea/
cli/olares-cli*
cli/olares-cli*
framework/app-service/bin

View File

@@ -10,6 +10,8 @@
[![Discord](https://img.shields.io/badge/Discord-7289DA?logo=discord&logoColor=white)](https://discord.gg/olares)
[![License](https://img.shields.io/badge/License-AGPL--3.0-blue)](https://github.com/beclab/olares/blob/main/LICENSE)
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
<p>
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
@@ -21,7 +23,7 @@
<p align="center">
<a href="https://olares.com">Website</a> ·
<a href="https://docs.olares.com">Documentation</a> ·
<a href="https://larepass.olares.com">Download LarePass</a> ·
<a href="https://www.olares.com/larepass">Download LarePass</a> ·
<a href="https://github.com/beclab/apps">Olares Apps</a> ·
<a href="https://space.olares.com">Olares Space</a>
</p>
@@ -33,7 +35,7 @@
![Personal Cloud](https://app.cdn.olares.com/github/olares/public-cloud-to-personal-cloud.jpg)
We believe you have a fundamental right to control your digital life. The most effective way to uphold this right is by hosting your data locally, on your own hardware.
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, SD WebUI for image generation, and Mastodon for building censor free social space. Imagine the power of the cloud, but with you in complete command.
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, ComfyUI for image generation, and Perplexica for private, AI-driven search and reasoning. Imagine the power of the cloud, but with you in complete command.
> 🌟 *Star us to receive instant notifications about new releases and updates.*

View File

@@ -10,6 +10,8 @@
[![Discord](https://img.shields.io/badge/Discord-7289DA?logo=discord&logoColor=white)](https://discord.gg/olares)
[![License](https://img.shields.io/badge/License-AGPL--3.0-blue)](https://github.com/beclab/olares/blob/main/LICENSE)
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
<p>
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
@@ -21,7 +23,7 @@
<p align="center">
<a href="https://olares.com">网站</a> ·
<a href="https://docs.olares.com">文档</a> ·
<a href="https://larepass.olares.com">下载 LarePass</a> ·
<a href="https://www.olares.cn/larepass">下载 LarePass</a> ·
<a href="https://github.com/beclab/apps">Olares 应用</a> ·
<a href="https://space.olares.com">Olares Space</a>
</p>
@@ -34,7 +36,7 @@
我们坚信,**您拥有掌控自己数字生活的基本权利**。维护这一权利最有效的方式,就是将您的数据托管在本地,在您自己的硬件上。
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 SD WebUI 用于图像生成,以及使用 Mastodon 构建不受审查的社交空间。Olares 让坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 ComfyUI 生成图像,以及使用 Perplexica 打造本地化、注重隐私的 AI 搜索与问答体验。Olares 让坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
> 为 Olares 点亮 🌟 以及时获取新版本和更新的通知。

View File

@@ -10,6 +10,8 @@
[![Discord](https://img.shields.io/badge/Discord-7289DA?logo=discord&logoColor=white)](https://discord.gg/olares)
[![License](https://img.shields.io/badge/License-AGPL--3.0-blue)](https://github.com/beclab/olares/blob/main/LICENSE)
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
<p>
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
@@ -21,7 +23,7 @@
<p align="center">
<a href="https://olares.com">ウェブサイト</a> ·
<a href="https://docs.olares.com">ドキュメント</a> ·
<a href="https://larepass.olares.com">LarePassをダウンロード</a> ·
<a href="https://www.olares.com/larepass">LarePassをダウンロード</a> ·
<a href="https://github.com/beclab/apps">Olaresアプリ</a> ·
<a href="https://space.olares.com">Olares Space</a>
</p>
@@ -34,8 +36,7 @@
私たちは、あなたが自身のデジタルライフをコントロールする基本的な権利を有すると確信しています。この権利を守る最も効果的な方法は、あなたのデータをローカルの、あなた自身のハードウェア上でホストすることです。
Olaresは、あなたが自身のデジタル資産をローカルで容易に所有し管理できるよう設計された、オープンソースのパーソナルクラウドOSです。もはやパブリッククラウドサービスに依存する必要はありません。Olares上で、例えばOllamaを利用した大規模言語モデルのホスティング、SD WebUIによる画像生成、Mastodonを用いた検閲のないソーシャルスペースの構築など、強力なオープンソースの代替サービスやアプリケーションをローカルにデプロイできます。Olaresは、クラウドコンピューティングの絶大な力を活用しつつ、それを完全に自身のコントロール下に置くことを可能にします
Olaresは、あなたが自身のデジタル資産をローカルで所有し管理できるよう設計された、オープンソースのパーソナルクラウドOSです。パブリッククラウドサービスに依存する代わりに、Olares上で強力なオープンソースの代替をローカルにデプロイできます。例えば、LLMのホスティングにはOllama、画像生成にはComfyUI、そしてプライバシーを重視したAI駆動の検索と推論にはPerplexicaを利用できます。クラウドの力をそのままに、主導権は常にあなたの手に
> 🌟 *新しいリリースや更新についての通知を受け取るために、スターを付けてください。*
## アーキテクチャ
@@ -44,7 +45,7 @@ Olaresは、あなたが自身のデジタル資産をローカルで容易に
![Olaresのアーキテクチ](https://app.cdn.olares.com/github/olares/olares-architecture.jpg)
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/manual/concepts/system-architecture.html)(英語版)をご参照ください。
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/developer/concepts/system-architecture.html)(英語版)をご参照ください。
> 🔍**OlaresとNASの違いは何ですか**
>

View File

@@ -51,6 +51,8 @@ rules:
- "/provider/get_dataset_folder_status"
- "/provider/update_dataset_folder_paths"
- "/seahub/api/*"
- "/system/configuration/encoding"
- "/api/search/get_directory/"
verbs: ["*"]
---

View File

@@ -209,6 +209,21 @@ spec:
port: 80
targetPort: 91
---
apiVersion: v1
kind: Service
metadata:
name: share-fe-service
namespace: user-space-{{ .Values.bfl.username }}
spec:
selector:
app: olares-app
type: ClusterIP
ports:
- protocol: TCP
name: share
port: 80
targetPort: 92
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -220,12 +235,12 @@ metadata:
applications.app.bytetrade.io/owner: '{{ .Values.bfl.username }}'
applications.app.bytetrade.io/author: bytetrade.io
annotations:
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName":"olares-app","entranceName":"share","thirdLevelDomain":"share"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
applications.app.bytetrade.io/icon: https://app.cdn.olares.com/appstore/olaresapps/icon.png
applications.app.bytetrade.io/title: 'Olares Apps'
applications.app.bytetrade.io/version: '0.0.1'
applications.app.bytetrade.io/policies: '{"policies":[{"entranceName":"dashboard","uriRegex":"/js/script.js", "level":"public"},{"entranceName":"dashboard","uriRegex":"/js/api/send", "level":"public"}]}'
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"share","authLevel":"public", "host":"share-fe-service", "port":80,"title":"Share","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png","invisible":true},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
spec:
replicas: 1
selector:
@@ -303,7 +318,7 @@ spec:
chown -R 1000:1000 /uploadstemp && \
chown -R 1000:1000 /appdata
- name: olares-app-init
image: beclab/system-frontend:v1.5.11
image: beclab/system-frontend:v1.6.16
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -352,6 +367,7 @@ spec:
- containerPort: 89
- containerPort: 90
- containerPort: 91
- containerPort: 92
- containerPort: 8090
command:
- /bin/sh
@@ -424,7 +440,7 @@ spec:
- name: NATS_SUBJECT_VAULT
value: os.vault.{{ .Values.bfl.username}}
- name: user-service
image: beclab/user-service:v0.0.62
image: beclab/user-service:v0.0.73
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.bfl.username }}:prometheus-k8s
annotations:
provider-registry-ref: {{ .Values.bfl.username }}/4ae9f19e
provider-service-ref: http://prometheus-k8s.kubesphere-monitoring-system:9090
rules:
- nonResourceURLs:
- "*"
verbs: ["*"]

View File

@@ -9,4 +9,7 @@ metadata:
rules:
- nonResourceURLs:
- "/document/search*"
- "/task/*"
- "/search/*"
- "/monitorsetting/*"
verbs: ["*"]

View File

@@ -57,4 +57,16 @@ metadata:
rules:
- nonResourceURLs:
- "/server/intent/send"
verbs: ["*"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.bfl.username }}:dashboard
annotations:
provider-registry-ref: {{ .Values.bfl.username }}/dashboard
provider-service-ref: prometheus-k8s.kubesphere-monitoring-system:9090
rules:
- nonResourceURLs:
- "*"
verbs: ["*"]

View File

@@ -29,7 +29,7 @@ spec:
containers:
- name: wizard
image: beclab/wizard:v1.5.11
image: beclab/wizard:v1.6.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80

View File

@@ -7,10 +7,18 @@ function command_exists() {
command -v "$@" > /dev/null 2>&1
}
if [[ x"$REPO_PATH" == x"" ]]; then
export REPO_PATH="#__REPO_PATH__"
fi
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
export REPO_PATH="/"
fi
if [[ x"$VERSION" == x"" ]]; then
if [[ "$LOCAL_RELEASE" == "1" ]]; then
ts=$(date +%Y%m%d%H%M%S)
export VERSION="1.12.2-$ts"
export VERSION="1.12.3-$ts"
echo "will build and use a local release of Olares with version: $VERSION"
echo ""
else
@@ -20,7 +28,7 @@ fi
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
echo "for example: VERSION=1.12.2-20241124 bash $0"
echo "for example: VERSION=1.12.3-20241124 bash $0"
exit 1
fi
@@ -92,13 +100,17 @@ if [[ "$LOCAL_RELEASE" == "1" ]]; then
fi
INSTALL_OLARES_CLI=$(which olares-cli)
else
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
expected_vendor="main"
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
expected_vendor="OlaresOne"
fi
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
INSTALL_OLARES_CLI=$(which olares-cli)
echo "olares-cli already installed and is the expected version"
echo ""
else
if [[ ! -f ${CLI_FILE} ]]; then
CLI_URL="${cdn_url}/${CLI_FILE}"
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
echo "downloading Olares installer from ${CLI_URL} ..."
echo ""

View File

@@ -7,6 +7,15 @@ function command_exists() {
command -v "$@" > /dev/null 2>&1
}
if [[ x"$REPO_PATH" == x"" ]]; then
export REPO_PATH="#__REPO_PATH__"
fi
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
export REPO_PATH="/"
fi
function read_tty() {
echo -n $1
read $2 < /dev/tty
@@ -149,7 +158,7 @@ export VERSION="#__VERSION__"
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
echo "for example: VERSION=1.12.2-20241124 bash $0"
echo "for example: VERSION=1.12.3-20241124 bash $0"
exit 1
fi
@@ -172,15 +181,17 @@ else
RELEASE_ID_SUFFIX=".$RELEASE_ID"
fi
CLI_FILE="olares-cli-v${VERSION}_linux_${ARCH}${RELEASE_ID_SUFFIX}.tar.gz"
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
expected_vendor="main"
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
expected_vendor="OlaresOne"
fi
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
INSTALL_OLARES_CLI=$(which olares-cli)
echo "olares-cli already installed and is the expected version"
echo ""
else
if [[ ! -f ${CLI_FILE} ]]; then
CLI_URL="${cdn_url}/${CLI_FILE}"
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
echo "downloading Olares installer from ${CLI_URL} ..."
echo ""

View File

@@ -17,6 +17,7 @@ metadata:
kubesphere.io/creator: '{{ .Values.user.name }}'
labels:
kubesphere.io/workspace: system-workspace
openpolicyagent.org/webhook: ignore
name: os-platform
---
@@ -27,6 +28,7 @@ metadata:
kubesphere.io/creator: '{{ .Values.user.name }}'
labels:
kubesphere.io/workspace: system-workspace
openpolicyagent.org/webhook: ignore
name: os-framework
---
@@ -37,6 +39,7 @@ metadata:
kubesphere.io/creator: '{{ .Values.user.name }}'
labels:
kubesphere.io/workspace: system-workspace
openpolicyagent.org/webhook: ignore
name: os-protected

View File

@@ -66,6 +66,12 @@ if [ ! -z $RELEASE_ID ]; then
sh -c "$SED 's/#__RELEASE_ID__/${RELEASE_ID}/' joincluster.sh"
fi
# replace repo path placeholder in scripts if provided
if [ ! -z "$REPO_PATH" ]; then
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' install.sh"
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' joincluster.sh"
fi
$TAR --exclude=wizard/tools --exclude=.git -zcvf ${BASE_DIR}/../install-wizard-${VERSION}.tar.gz .
popd

View File

@@ -21,6 +21,11 @@ systemEnvs:
type: url
editable: true
required: true
# docker hub mirror endpoint for docker.io registry
- envName: OLARES_SYSTEM_DOCKERHUB_SERVICE
type: url
editable: true
required: false
# the legacy OLARES_ROOT_DIR
- envName: OLARES_SYSTEM_ROOT_PATH
default: /olares

View File

@@ -33,6 +33,7 @@ userEnvs:
- envName: OLARES_USER_SMTP_SECURE
type: bool
editable: true
default: "true"
- envName: OLARES_USER_SMTP_USE_TLS
type: bool
editable: true
@@ -54,15 +55,18 @@ userEnvs:
- envName: OLARES_USER_HUGGINGFACE_SERVICE
type: url
editable: true
default: "https://huggingface.co/"
- envName: OLARES_USER_HUGGINGFACE_TOKEN
type: password
editable: true
- envName: OLARES_USER_PYPI_SERVICE
type: url
editable: true
default: "https://pypi.org/simple/"
- envName: OLARES_USER_GITHUB_SERVICE
type: url
editable: true
default: "https://github.com/"
- envName: OLARES_USER_GITHUB_TOKEN
type: password
editable: true

445
cli/cmd/ctl/disk/extend.go Normal file
View File

@@ -0,0 +1,445 @@
package disk
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"text/tabwriter"
"github.com/beclab/Olares/cli/pkg/utils"
"github.com/beclab/Olares/cli/pkg/utils/lvm"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
const defaultOlaresVGName = "olares-vg"
func NewExtendDiskCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "extend",
Short: "extend disk operations",
Run: func(cmd *cobra.Command, args []string) {
// early return if no unmounted disks found
unmountedDevices, err := lvm.FindUnmountedDevices()
if err != nil {
log.Fatalf("Error finding unmounted devices: %v\n", err)
}
if len(unmountedDevices) == 0 {
log.Println("No unmounted disks found to extend.")
return
}
// select volume group to extend
currentVgs, err := lvm.FindCurrentLVM()
if err != nil {
log.Fatalf("Error finding current LVM: %v\n", err)
}
if len(currentVgs) == 0 {
log.Println("No valid volume groups found to extend.")
return
}
selectedVg, err := selectExtendingVG(currentVgs)
if err != nil {
log.Fatalf("Error selecting volume group: %v\n", err)
}
log.Printf("Selected volume group to extend: %s\n", selectedVg)
// select logical volume to extend
lvInVg, err := lvm.FindLvByVgName(selectedVg)
if err != nil {
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
}
if len(lvInVg) == 0 {
log.Printf("No logical volumes found in volume group %s to extend.\n", selectedVg)
return
}
selectedLv, err := selectExtendingLV(selectedVg, lvInVg)
if err != nil {
log.Fatalf("Error selecting logical volume: %v\n", err)
}
log.Printf("Selected logical volume to extend: %s\n", selectedLv)
// select unmounted devices to create physical volume
selectedDevice, err := selectExtendingDevices(unmountedDevices)
if err != nil {
log.Fatalf("Error selecting unmounted device: %v\n", err)
}
log.Printf("Selected unmounted device to use: %s\n", selectedDevice)
options := &LvmExtendOptions{
VgName: selectedVg,
DevicePath: selectedDevice,
LvName: selectedLv,
DeviceBlk: unmountedDevices[selectedDevice],
}
log.Printf("Extending logical volume %s in volume group %s using device %s\n", options.LvName, options.VgName, options.DevicePath)
cleanupNeeded, err := options.cleanupDiskParts()
if err != nil {
log.Fatalf("Error during disk partition cleanup check: %v\n", err)
}
if cleanupNeeded {
do, err := options.destroyWarning()
if err != nil {
log.Fatalf("Error during partition cleanup confirmation: %v\n", err)
}
if !do {
log.Println("Operation aborted by user.")
return
}
err = options.deleteDevicePartitions()
if err != nil {
log.Fatalf("Error deleting device partitions: %v\n", err)
}
} else {
do, err := options.makeDecision()
if err != nil {
log.Fatalf("Error during extension confirmation: %v\n", err)
}
if !do {
log.Println("Operation aborted by user.")
return
}
}
err = options.extendLVM()
if err != nil {
log.Fatalf("Error extending LVM: %v\n", err)
}
log.Println("Disk extension completed successfully.")
// end of command run, and show result
// show the result of the extension
lvInVg, err = lvm.FindLvByVgName(selectedVg)
if err != nil {
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
}
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
for idx, lv := range lvInVg {
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
}
w.Flush()
},
}
return cmd
}
type LvmExtendOptions struct {
VgName string
DevicePath string
LvName string
DeviceBlk *lvm.BlkPart
}
func selectExtendingVG(vgs []*lvm.VgItem) (string, error) {
// if only one vg, return it directly
if len(vgs) == 1 {
return vgs[0].VgName, nil
}
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return "", errors.Wrap(err, "failed to get terminal input reader")
}
fmt.Println("Multiple volume groups found. Please select one to extend:")
fmt.Println("")
// print header
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprint(w, "id\tVG\tVSize\tVFree\n")
for idx, vg := range vgs {
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", idx+1, vg.VgName, vg.VgSize, vg.VgFree)
}
w.Flush()
LOOP:
fmt.Printf("\nEnter the volume group id to extend: ")
var input string
input, err = reader.ReadString('\n')
if err != nil && err.Error() != "EOF" {
return "", errors.Wrap(errors.WithStack(err), "read volume group id failed")
}
input = strings.TrimSpace(input)
if input == "" {
fmt.Printf("\ninvalid volume group id, please try again")
goto LOOP
}
selectedIdx, err := strconv.Atoi(input)
if err != nil || selectedIdx < 1 || selectedIdx > len(vgs) {
fmt.Printf("\ninvalid volume group id, please try again")
goto LOOP
}
return vgs[selectedIdx-1].VgName, nil
}
func selectExtendingLV(vgName string, lvs []*lvm.LvItem) (string, error) {
if len(lvs) == 1 {
return lvs[0].LvName, nil
}
if vgName == defaultOlaresVGName {
selectedLv := ""
for _, lv := range lvs {
if lv.LvName == "root" {
selectedLv = lv.LvName
continue
}
if lv.LvName == "data" {
selectedLv = lv.LvName
break
}
}
if selectedLv != "" {
return selectedLv, nil
}
}
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return "", errors.Wrap(err, "failed to get terminal input reader")
}
fmt.Println("Multiple logical volumes found. Please select one to extend:")
fmt.Println("")
// print header
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
for idx, lv := range lvs {
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
}
w.Flush()
LOOP:
fmt.Printf("\nEnter the logical volume id to extend: ")
var input string
input, err = reader.ReadString('\n')
if err != nil && err.Error() != "EOF" {
return "", errors.Wrap(errors.WithStack(err), "read logical volume id failed")
}
input = strings.TrimSpace(input)
if input == "" {
fmt.Printf("\ninvalid logical volume id, please try again")
goto LOOP
}
selectedIdx, err := strconv.Atoi(input)
if err != nil || selectedIdx < 1 || selectedIdx > len(lvs) {
fmt.Printf("\ninvalid logical volume id, please try again")
goto LOOP
}
return lvs[selectedIdx-1].LvName, nil
}
func selectExtendingDevices(unmountedDevices map[string]*lvm.BlkPart) (string, error) {
if len(unmountedDevices) == 0 {
return "", errors.New("no unmounted devices available for selection")
}
if len(unmountedDevices) == 1 {
for path := range unmountedDevices {
return path, nil
}
}
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return "", errors.Wrap(err, "failed to get terminal input reader")
}
fmt.Println("Multiple unmounted devices found. Please select one to use:")
fmt.Println("")
// print header
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprint(w, "id\tDevice\tSize\n")
idx := 1
devicePaths := make([]string, 0, len(unmountedDevices))
for path, device := range unmountedDevices {
fmt.Fprintf(w, "%d\t%s\t%s\n", idx, path, device.Size)
devicePaths = append(devicePaths, path)
idx++
}
w.Flush()
LOOP:
fmt.Printf("\nEnter the device id to use: ")
var input string
input, err = reader.ReadString('\n')
if err != nil && err.Error() != "EOF" {
return "", errors.Wrap(errors.WithStack(err), "read device id failed")
}
input = strings.TrimSpace(input)
if input == "" {
fmt.Printf("\ninvalid device id, please try again")
goto LOOP
}
selectedIdx, err := strconv.Atoi(input)
if err != nil || selectedIdx < 1 || selectedIdx > len(devicePaths) {
fmt.Printf("\ninvalid device id, please try again")
goto LOOP
}
return devicePaths[selectedIdx-1], nil
}
func (o LvmExtendOptions) destroyWarning() (bool, error) {
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return false, errors.Wrap(err, "failed to get terminal input reader")
}
fmt.Printf("WARNING: This will DESTROY all data on %s\n", o.DevicePath)
LOOP:
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
var input string
input, err = reader.ReadString('\n')
if err != nil && err.Error() != "EOF" {
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
}
input = strings.ToUpper(strings.TrimSpace(input))
if input != "YES" {
goto LOOP
}
return true, nil
}
func (o LvmExtendOptions) makeDecision() (bool, error) {
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return false, errors.Wrap(err, "failed to get terminal input reader")
}
fmt.Printf("NOTICE: Extending LVM will begin on device %s\n", o.DevicePath)
LOOP:
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
var input string
input, err = reader.ReadString('\n')
if err != nil && err.Error() != "EOF" {
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
}
input = strings.ToUpper(strings.TrimSpace(input))
if input != "YES" {
goto LOOP
}
return true, nil
}
func (o LvmExtendOptions) cleanupDiskParts() (bool, error) {
if o.DeviceBlk == nil {
return false, errors.New("device block is nil")
}
if len(o.DeviceBlk.Children) == 0 {
return false, nil
}
return true, nil
}
func (o LvmExtendOptions) deleteDevicePartitions() error {
log.Printf("Selected device %s has existing partitions. Cleaning up...\n", o.DevicePath)
if o.DeviceBlk == nil {
return errors.New("device block is nil")
}
if len(o.DeviceBlk.Children) == 0 {
return nil
}
log.Printf("Deleting existing partitions on device %s...\n", o.DevicePath)
var partitions []string
for _, part := range o.DeviceBlk.Children {
partitions = append(partitions, "/dev/"+part.Name)
}
vgs, err := lvm.FindVgsOnDevice(partitions)
if err != nil {
return errors.Wrap(err, "failed to find volume groups on device partitions")
}
if len(vgs) > 0 {
log.Println("existing volume group on device, delete it first")
for _, vg := range vgs {
lvs, err := lvm.FindLvByVgName(vg.VgName)
if err != nil {
return errors.Wrapf(err, "failed to find logical volumes in volume group %s", vg.VgName)
}
err = lvm.DeactivateLv(vg.VgName)
if err != nil {
return errors.Wrapf(err, "failed to deactivate volume group %s", vg.VgName)
}
for _, lv := range lvs {
err = lvm.RemoveLv(lv.LvPath)
if err != nil {
return errors.Wrapf(err, "failed to remove logical volume %s", lv.LvPath)
}
}
err = lvm.RemoveVg(vg.VgName)
if err != nil {
return errors.Wrapf(err, "failed to remove volume group %s", vg)
}
err = lvm.RemovePv(vg.PvName)
if err != nil {
return errors.Wrapf(err, "failed to remove physical volume %s", vg.PvName)
}
}
}
log.Printf("Deleting partitions on device %s...\n", o.DevicePath)
err = lvm.DeleteDevicePartitions(o.DevicePath)
if err != nil {
return errors.Wrapf(err, "failed to delete partitions on device %s", o.DevicePath)
}
return nil
}
func (o LvmExtendOptions) extendLVM() error {
log.Printf("Creating partition on device %s...\n", o.DevicePath)
err := lvm.MakePartOnDevice(o.DevicePath)
if err != nil {
return errors.Wrapf(err, "failed to create partition on device %s", o.DevicePath)
}
log.Printf("Creating physical volume on device %s...\n", o.DevicePath)
err = lvm.AddNewPV(o.DevicePath, o.VgName)
if err != nil {
return errors.Wrapf(err, "failed to create physical volume on device %s", o.DevicePath)
}
log.Printf("Extending volume group %s with logic volume %s on device %s...\n", o.VgName, o.LvName, o.DevicePath)
err = lvm.ExtendLv(o.VgName, o.LvName)
if err != nil {
return errors.Wrapf(err, "failed to extend logical volume %s in volume group %s", o.LvName, o.VgName)
}
return nil
}

View File

@@ -0,0 +1,34 @@
package disk
import (
"fmt"
"log"
"os"
"text/tabwriter"
"github.com/beclab/Olares/cli/pkg/utils/lvm"
"github.com/spf13/cobra"
)
func NewListUnmountedDisksCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "list-unmounted",
Short: "List unmounted disks",
Run: func(cmd *cobra.Command, args []string) {
unmountedDevices, err := lvm.FindUnmountedDevices()
if err != nil {
log.Fatalf("Error finding unmounted devices: %v\n", err)
}
// print header
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprint(w, "Device\tSize\n")
for path, device := range unmountedDevices {
fmt.Fprintf(w, "%s\t%s\n", path, device.Size)
}
w.Flush()
},
}
return cmd
}

15
cli/cmd/ctl/disk/root.go Normal file
View File

@@ -0,0 +1,15 @@
package disk
import "github.com/spf13/cobra"
func NewDiskCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "disk",
Short: "disk management operations",
}
cmd.AddCommand(NewListUnmountedDisksCommand())
cmd.AddCommand(NewExtendDiskCommand())
return cmd
}

View File

@@ -0,0 +1,23 @@
package gpu
import (
"log"
"github.com/beclab/Olares/cli/pkg/pipelines"
"github.com/spf13/cobra"
)
func NewCmdDisableNouveau() *cobra.Command {
cmd := &cobra.Command{
Use: "disable-nouveau",
Short: "Blacklist and disable the nouveau kernel module",
Run: func(cmd *cobra.Command, args []string) {
if err := pipelines.DisableNouveau(); err != nil {
log.Fatalf("error: %v", err)
}
},
}
return cmd
}

View File

@@ -14,7 +14,7 @@ func NewCmdGpu() *cobra.Command {
rootGpuCmd.AddCommand(NewCmdUninstallpu())
rootGpuCmd.AddCommand(NewCmdEnableGpu())
rootGpuCmd.AddCommand(NewCmdDisableGpu())
rootGpuCmd.AddCommand(NewCmdUpgradeGpu())
rootGpuCmd.AddCommand(NewCmdGpuStatus())
rootGpuCmd.AddCommand(NewCmdDisableNouveau())
return rootGpuCmd
}

View File

@@ -1,24 +0,0 @@
package gpu
import (
"log"
"github.com/beclab/Olares/cli/cmd/ctl/options"
"github.com/beclab/Olares/cli/pkg/pipelines"
"github.com/spf13/cobra"
)
func NewCmdUpgradeGpu() *cobra.Command {
o := options.NewInstallGpuOptions()
cmd := &cobra.Command{
Use: "upgrade",
Short: "upgrade GPU drivers for Olares",
Run: func(cmd *cobra.Command, args []string) {
if err := pipelines.UpgradeGpuDrivers(o); err != nil {
log.Fatalf("error: %v", err)
}
},
}
o.AddFlags(cmd)
return cmd
}

View File

@@ -49,7 +49,7 @@ func NewCmdRelease() *cobra.Command {
}
if version == "" {
version = fmt.Sprintf("1.12.2-%s", time.Now().Format("20060102150405"))
version = fmt.Sprintf("1.12.3-%s", time.Now().Format("20060102150405"))
fmt.Printf("--version unspecified, using: %s\n", version)
time.Sleep(1 * time.Second)
}

View File

@@ -1,6 +1,7 @@
package ctl
import (
"github.com/beclab/Olares/cli/cmd/ctl/disk"
"github.com/beclab/Olares/cli/cmd/ctl/gpu"
"github.com/beclab/Olares/cli/cmd/ctl/node"
"github.com/beclab/Olares/cli/cmd/ctl/os"
@@ -33,6 +34,7 @@ func NewDefaultCommand() *cobra.Command {
cmds.AddCommand(node.NewNodeCommand())
cmds.AddCommand(gpu.NewCmdGpu())
cmds.AddCommand(user.NewUserCommand())
cmds.AddCommand(disk.NewDiskCommand())
return cmds
}

View File

@@ -10,11 +10,12 @@ import (
)
type activateUserOptions struct {
Mnemonic string
BflUrl string
VaultUrl string
Password string
OlaresId string
Mnemonic string
BflUrl string
VaultUrl string
Password string
OlaresId string
ResetPassword string
Location string
Language string
@@ -53,6 +54,7 @@ func (o *activateUserOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().BoolVar(&o.EnableTunnel, "enable-tunnel", false, "Enable tunnel mode (default: false)")
cmd.Flags().StringVar(&o.Host, "host", "", "FRP host (only used when tunnel is enabled)")
cmd.Flags().StringVar(&o.Jws, "jws", "", "FRP JWS token (only used when tunnel is enabled)")
cmd.Flags().StringVar(&o.ResetPassword, "reset-password", "", "New password for resetting (required for password reset)")
}
func (o *activateUserOptions) Validate() error {
@@ -65,6 +67,9 @@ func (o *activateUserOptions) Validate() error {
if o.Mnemonic == "" {
return fmt.Errorf("Mnemonic is required")
}
if o.ResetPassword == "" {
return fmt.Errorf("Reset password is required")
}
return nil
}
@@ -88,7 +93,7 @@ func (c *activateUserOptions) Run() error {
return fmt.Errorf("failed to initialize global stores: %v", err)
}
err = wizard.UserBindTerminus(c.Mnemonic, c.BflUrl, c.VaultUrl, c.Password, c.OlaresId, localName)
accessToken, err := wizard.UserBindTerminus(c.Mnemonic, c.BflUrl, c.VaultUrl, c.Password, c.OlaresId, localName)
if err != nil {
return fmt.Errorf("user bind failed: %v", err)
}
@@ -96,7 +101,7 @@ func (c *activateUserOptions) Run() error {
log.Printf("✅ Vault activation completed successfully!")
log.Printf("🚀 Starting system activation wizard...")
wizardConfig := wizard.CustomWizardConfig(c.Location, c.Language, c.EnableTunnel, c.Host, c.Jws, c.Password, c.Password)
wizardConfig := wizard.CustomWizardConfig(c.Location, c.Language, c.EnableTunnel, c.Host, c.Jws, c.Password, c.ResetPassword)
log.Printf("Wizard configuration:")
log.Printf(" Location: %s", wizardConfig.System.Location)
@@ -107,7 +112,7 @@ func (c *activateUserOptions) Run() error {
log.Printf(" FRP JWS: %s", wizardConfig.System.FRP.Jws)
}
err = wizard.RunActivationWizard(c.BflUrl, "", wizardConfig)
err = wizard.RunActivationWizard(c.BflUrl, accessToken, wizardConfig)
if err != nil {
return fmt.Errorf("activation wizard failed: %v", err)
}

View File

@@ -5,7 +5,7 @@ go 1.24.2
toolchain go1.24.6
replace (
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.10
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.41
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
github.com/containers/image/v5 => github.com/containers/image/v5 v5.21.1
github.com/containers/storage => github.com/containers/storage v1.40.0

View File

@@ -45,8 +45,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beclab/api v0.0.2 h1:aD5RcMie2uqa/FZI7aQBa1F4yVEib8/x3IIZSLiHkBM=
github.com/beclab/api v0.0.2/go.mod h1:ESZLe8cf4934QFkU6cqbskKfiTyNk67i1qbv/ctS6js=
github.com/beclab/app-service v0.4.10 h1:0CT8sl5K+qwQsrKO6FYxbUFNXcRJVkkErw3sB7V7OQw=
github.com/beclab/app-service v0.4.10/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
github.com/beclab/app-service v0.4.41 h1:WSIXEqHSAepHweBooPkc+pedVaGGn335RugNwixkciY=
github.com/beclab/app-service v0.4.41/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=

View File

@@ -447,6 +447,7 @@ var (
"/etc/kubekey",
"/etc/kke/version",
"/etc/systemd/system/olares-swap.service",
"/tmp/vgpulock",
}
networkResetCmds = []string{

View File

@@ -78,6 +78,8 @@ func (m *RunPrechecksModule) Init() {
new(SystemdCheck),
new(RequiredPortsCheck),
new(ConflictingContainerdCheck),
new(NvidiaCardArchChecker),
new(NouveauChecker),
new(CudaChecker),
}
runPreChecks := &task.LocalTask{

View File

@@ -23,6 +23,7 @@ import (
"fmt"
"net"
"os"
"slices"
"strings"
"time"
@@ -114,7 +115,7 @@ func (t *RequiredPortsCheck) Check(runtime connector.Runtime) error {
defer l.Close()
}
if len(unbindablePorts) > 0 {
return fmt.Errorf("port %v required by Olares cannot be bound", unbindablePorts)
return fmt.Errorf("port %v required by Olares cannot be bound, you can check which process using the command `sudo netstat -tlnp`", unbindablePorts)
}
return nil
}
@@ -133,13 +134,15 @@ func (t *ConflictingContainerdCheck) Check(runtime connector.Runtime) error {
if kubeRuntime.Arg.IsCloudInstance {
return nil
}
fixMSG := "\nIf it is installed as a component of Docker, it should be uninstalled per the official doc https://docs.docker.com/engine/install/ubuntu/#uninstall-old-versions"
fixMSG += "\nIf it is left over from a previous installation of Olares, clean it up using the command `sudo olares-cli uninstall --all`"
containerdBin, err := util.GetCommand("containerd")
if err == nil && containerdBin != "" {
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function", containerdBin)
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function%s", containerdBin, fixMSG)
}
containerdSocket := "/run/containerd/containerd.sock"
if util.IsExist(containerdSocket) {
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function", containerdSocket)
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function%s", containerdSocket, fixMSG)
}
return nil
}
@@ -269,20 +272,104 @@ func (t *ValidResolvConfCheck) Check(runtime connector.Runtime) error {
return nil
}
type CudaChecker struct {
CudaCheckTask
type NvidiaCardArchChecker struct{}
func (t *NvidiaCardArchChecker) Name() string {
return "NvidiaCardArch"
}
func (c *CudaChecker) Check(runtime connector.Runtime) error {
err := c.CudaCheckTask.Execute(runtime)
func (t *NvidiaCardArchChecker) Check(runtime connector.Runtime) error {
supportedArchs := []string{"Blackwell", "Hopper", "Ada Lovelace", "Ampere", "Turing"}
model, arch, err := utils.DetectNvidiaModelAndArch(runtime)
if err != nil {
return err
}
if strings.TrimSpace(model) == "" {
return nil
}
if !slices.Contains(supportedArchs, arch) {
return fmt.Errorf("unsupported NVIDIA card %s of architecture: %s, Olares only supports the following architectures: %s", model, arch, strings.Join(supportedArchs, ", "))
}
return nil
}
// the command `precheck` will check the cuda version,
// only if the cuda is installed and the current version is not supported, it will return an error
if err == ErrCudaInstalled {
// NouveauChecker checks whether nouveau is loaded and has modeset=1 or -1.
// This check only runs when an NVIDIA GPU is present.
type NouveauChecker struct{}
func (n *NouveauChecker) Name() string {
return "NouveauKernelModule"
}
func (n *NouveauChecker) Check(runtime connector.Runtime) error {
if !runtime.GetSystemInfo().IsLinux() {
return nil
}
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
if err != nil {
fmt.Println("Error detecting NVIDIA card:", err)
os.Exit(1)
}
if strings.TrimSpace(model) == "" {
return nil
}
return err
if !util.IsExist("/sys/module/nouveau") {
return nil
}
const modesetPath = "/sys/module/nouveau/parameters/modeset"
data, err := os.ReadFile(modesetPath)
if err != nil {
fmt.Printf("Error reading modeset parameter of nouveau kernel module by reading file %s: %v", modesetPath, err)
os.Exit(1)
}
val := strings.TrimSpace(string(data))
if val == "1" || val == "-1" {
return fmt.Errorf("detected nouveau kernel module loaded with modeset=%s; this conflicts with the NVIDIA driver that Olares will install, please disable it by running `sudo olares-cli gpu disable-nouveau`, REBOOT your machine, and try again", val)
}
return nil
}
type CudaChecker struct{}
func (c *CudaChecker) Name() string {
return "CUDA"
}
func (c *CudaChecker) Check(runtime connector.Runtime) error {
if !runtime.GetSystemInfo().IsLinux() {
return nil
}
st, err := utils.GetNvidiaStatus(runtime)
if err != nil {
return err
}
if st == nil || !st.Installed {
if st != nil && st.Running {
return ErrKernelDriverUninstalledButRunning
}
logger.Info("NVIDIA driver is not installed")
return nil
}
if st.Mismatch {
return ErrDriverLibraryVersionMismatch
}
if st.InstallMethod != utils.GPUDriverInstallMethodRunfile && !runtime.GetSystemInfo().IsWsl() {
return ErrNotInstalledByRunfile
}
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", st.DriverVersion, st.CudaVersion)
oldestVer := semver.MustParse(supportedCudaVersions[0])
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
currentVer := semver.MustParse(st.CudaVersion)
if oldestVer.GreaterThan(currentVer) {
return ErrUnsupportedCudaVersion
}
if newestVer.LessThan(currentVer) {
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
}
return nil
}
//////////////////////////////////////////////
@@ -474,44 +561,8 @@ func (t *RemoveWSLChattr) Execute(runtime connector.Runtime) error {
return nil
}
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it, REBOOT your machine, and try again")
var ErrCudaInstalled = errors.New("cuda is installed")
var supportedCudaVersions = []string{"12.8", common.CurrentVerifiedCudaVersion}
// CudaCheckTask checks the cuda version, if the current version is not supported, it will return an error
// before executing the command `olares-cli gpu install`, we need to check the cuda version
// if the cuda if not installed, it will return nil and the command can be executed.
// if the cuda is installed and the version is unsupported, the command can not be executed,
// or the cuda version is supported, executing the command is unnecessary.
type CudaCheckTask struct{}
func (t *CudaCheckTask) Name() string {
return "Cuda"
}
func (t *CudaCheckTask) Execute(runtime connector.Runtime) error {
if !runtime.GetSystemInfo().IsLinux() {
return nil
}
info, installed, err := utils.ExecNvidiaSmi(runtime)
switch {
case err != nil:
return err
case !installed:
logger.Info("NVIDIA driver is not installed")
return nil
default:
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", info.DriverVersion, info.CudaVersion)
oldestVer := semver.MustParse(supportedCudaVersions[0])
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
currentVer := semver.MustParse(info.CudaVersion)
if oldestVer.GreaterThan(currentVer) {
return ErrUnsupportedCudaVersion
}
if newestVer.LessThan(currentVer) {
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
}
return ErrCudaInstalled
}
}
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
var ErrKernelDriverUninstalledButRunning = errors.New("NVIDIA driver is uninstalled, but the kernel driver is still running, please REBOOT your machine, and try again")
var ErrNotInstalledByRunfile = errors.New("NVIDIA driver is installed, but not installed by runfile, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
var ErrDriverLibraryVersionMismatch = errors.New("NVIDIA driver is installed, but the library version with the running version is mismatched, please REBOOT your machine, and try again")
var supportedCudaVersions = []string{common.CurrentVerifiedCudaVersion}

View File

@@ -26,7 +26,7 @@ const (
DefaultKubernetesVersion = ""
DefaultKubeSphereVersion = "v3.3.0"
DefaultTokenMaxAge = 31536000
CurrentVerifiedCudaVersion = "12.9"
CurrentVerifiedCudaVersion = "13.0"
)
const (
@@ -279,30 +279,29 @@ const (
)
const (
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
ENV_OLARES_VERSION = "OLARES_VERSION"
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
ENV_KUBE_TYPE = "KUBE_TYPE"
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
ENV_NVIDIA_CONTAINER_REPO_MIRROR = "NVIDIA_CONTAINER_REPO_MIRROR"
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
ENV_STORAGE = "STORAGE"
ENV_S3_BUCKET = "S3_BUCKET"
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
ENV_BACKUP_SECRET = "BACKUP_SECRET"
ENV_CLUSTER_ID = "CLUSTER_ID"
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
ENV_TOKEN_MAX_AGE = "TOKEN_MAX_AGE"
ENV_HOST_IP = "HOST_IP"
ENV_PREINSTALL = "PREINSTALL"
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
ENV_OLARES_VERSION = "OLARES_VERSION"
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
ENV_KUBE_TYPE = "KUBE_TYPE"
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
ENV_STORAGE = "STORAGE"
ENV_S3_BUCKET = "S3_BUCKET"
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
ENV_BACKUP_SECRET = "BACKUP_SECRET"
ENV_CLUSTER_ID = "CLUSTER_ID"
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
ENV_TOKEN_MAX_AGE = "TOKEN_MAX_AGE"
ENV_HOST_IP = "HOST_IP"
ENV_PREINSTALL = "PREINSTALL"
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
ENV_CONTAINER = "container"
ENV_CONTAINER_MODE = "CONTAINER_MODE" // running in docker container

View File

@@ -58,7 +58,7 @@ func NewLocalRuntime(debug, ingoreErr bool) (LocalRuntime, error) {
host.Address = ""
host.InternalAddress = ""
host.Port = 22
host.User = u.Name
host.User = u.Username
host.Password = ""
host.PrivateKeyPath = fmt.Sprintf("%s/.ssh/id_rsa", u.HomeDir)
host.Arch = ""

View File

@@ -1,9 +1,10 @@
package gpu
import (
"github.com/beclab/Olares/cli/pkg/container"
"time"
"github.com/beclab/Olares/cli/pkg/container"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/prepare"
"github.com/beclab/Olares/cli/pkg/core/task"
@@ -20,11 +21,6 @@ type InstallDriversModule struct {
// 1. no card is found (which skips the driver installation)
// 2. no driver is found (which skips the container toolkit installation)
FailOnNoInstallation bool
// currently, this is only used to skip the nvidia-smi check after driver upgrade
// because the nvidia-smi will not work after upgrade (Failed to initialize NVML: Driver/library version mismatch)
// otherwise, always check the driver is running properly after installation to fail early and avoid other issues
SkipNVMLCheckAfterInstall bool
}
func (m *InstallDriversModule) IsSkip() bool {
@@ -34,14 +30,14 @@ func (m *InstallDriversModule) IsSkip() bool {
func (m *InstallDriversModule) Init() {
m.Name = "InstallGPUDriver"
installCudaDeps := &task.RemoteTask{
Name: "InstallCudaKeyRing",
installCudaDriver := &task.RemoteTask{ // not for WSL
Name: "InstallNvidiaDriver",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(CudaNotInstalled),
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
},
Action: &InstallCudaDeps{
Action: &InstallCudaDriver{
ManifestAction: manifest.ManifestAction{
Manifest: m.Manifest,
BaseDir: m.BaseDir,
@@ -51,20 +47,7 @@ func (m *InstallDriversModule) Init() {
Retry: 1,
}
installCudaDriver := &task.RemoteTask{ // not for WSL
Name: "InstallNvidiaDriver",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(CudaNotInstalled),
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
},
Action: &InstallCudaDriver{SkipNVMLCheckAfterInstall: m.SkipNVMLCheckAfterInstall},
Parallel: false,
Retry: 1,
}
m.Tasks = []task.Interface{
installCudaDeps,
installCudaDriver,
}
}
@@ -364,13 +347,20 @@ func (l *UninstallCudaModule) Init() {
}
type ExitIfNoDriverUpgradeNeededModule struct {
type DisableNouveauModule struct {
common.KubeModule
}
func (l *ExitIfNoDriverUpgradeNeededModule) Init() {
l.Tasks = append(l.Tasks, &task.LocalTask{
Action: new(ExitIfNoDriverUpgradeNeeded),
})
func (m *DisableNouveauModule) Init() {
m.Name = "DisableNouveau"
writeBlacklist := &task.LocalTask{
Name: "WriteNouveauBlacklist",
Action: new(WriteNouveauBlacklist),
Retry: 1,
}
m.Tasks = []task.Interface{
writeBlacklist,
}
}

View File

@@ -3,12 +3,14 @@ package gpu
import (
"context"
"os"
"strings"
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
"github.com/beclab/Olares/cli/pkg/clientset"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/utils"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -32,35 +34,33 @@ func (p *GPUEnablePrepare) PreCheck(runtime connector.Runtime) (bool, error) {
type CudaInstalled struct {
common.KubePrepare
precheck.CudaCheckTask
FailOnNoInstallation bool
}
func (p *CudaInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
err := p.CudaCheckTask.Execute(runtime)
st, err := utils.GetNvidiaStatus(runtime)
if err != nil {
if err == precheck.ErrCudaInstalled {
return true, nil
}
return false, err
}
return false, nil
if st == nil || !st.Installed {
return false, nil
}
return true, nil
}
type CudaNotInstalled struct {
common.KubePrepare
precheck.CudaCheckTask
}
func (p *CudaNotInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
err := p.CudaCheckTask.Execute(runtime)
st, err := utils.GetNvidiaStatus(runtime)
if err != nil {
if err == precheck.ErrCudaInstalled {
return false, nil
}
return false, err
}
return true, nil
if st == nil || !st.Installed {
return true, nil
}
return false, nil
}
type K8sNodeInstalled struct {
@@ -97,9 +97,6 @@ type NvidiaGraphicsCard struct {
}
func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, err error) {
if runtime.RemoteHost().GetOs() == common.Darwin {
return false, nil
}
defer func() {
if !p.ExitOnNotFound {
return
@@ -109,20 +106,15 @@ func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, er
os.Exit(1)
}
}()
output, err := runtime.GetRunner().SudoCmd(
"lspci | grep -i -e vga -e 3d | grep -i nvidia", false, false)
// an empty grep also results in the exit code to be 1
// and thus a non-nil err
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
if err != nil {
logger.Debug("try to find nvidia graphics card error ", err)
logger.Debug("ignore card driver installation")
logger.Debugf("detect NVIDIA GPU error: %v", err)
}
if strings.TrimSpace(model) == "" {
return false, nil
}
if output != "" {
logger.Info("found nvidia graphics card: ", output)
}
return output != "", nil
logger.Infof("found NVIDIA GPU: %s", model)
return true, nil
}
type ContainerdInstalled struct {

View File

@@ -10,11 +10,8 @@ import (
"strings"
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/apis/kubekey/v1alpha2"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
"github.com/beclab/Olares/cli/pkg/clientset"
"github.com/beclab/Olares/cli/pkg/common"
cc "github.com/beclab/Olares/cli/pkg/core/common"
@@ -39,10 +36,7 @@ type CheckWslGPU struct {
func (t *CheckWslGPU) CheckNvidiaSmiFileExists() bool {
var nvidiaSmiFile = "/usr/lib/wsl/lib/nvidia-smi"
if !util.IsExist(nvidiaSmiFile) {
return false
}
return true
return util.IsExist(nvidiaSmiFile)
}
func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
@@ -66,88 +60,41 @@ func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
runtime.Arg.SetGPU(true)
}
type InstallCudaDeps struct {
type InstallCudaDriver struct {
common.KubeAction
manifest.ManifestAction
}
func (t *InstallCudaDeps) Execute(runtime connector.Runtime) error {
var systemInfo = runtime.GetSystemInfo()
var cudaKeyringVersion string
var osVersion string
switch {
case systemInfo.IsUbuntu():
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_0
if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu24) || systemInfo.IsUbuntuVersionEqual(connector.Ubuntu25) {
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
osVersion = "24.04"
} else if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu22) {
osVersion = "22.04"
} else {
osVersion = "20.04"
}
case systemInfo.IsDebian():
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
if systemInfo.IsDebianVersionEqual(connector.Debian12) {
osVersion = connector.Debian12.String()
} else {
osVersion = connector.Debian11.String()
}
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
_, _ = runtime.GetRunner().SudoCmd("apt-get update", false, true)
// install build deps for dkms
if _, err := runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends dkms build-essential linux-headers-$(uname -r)", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to install kernel build dependencies for nvidia runfile")
}
var fileId = fmt.Sprintf("%s-%s_cuda-keyring_%s-1",
strings.ToLower(systemInfo.GetOsPlatformFamily()), osVersion, cudaKeyringVersion)
cudakeyring, err := t.Manifest.Get(fileId)
// fetch runfile from manifest
item, err := t.Manifest.Get("cuda-driver")
if err != nil {
return err
}
path := cudakeyring.FilePath(t.BaseDir)
var exists = util.IsExist(path)
if !exists {
return fmt.Errorf("Failed to find %s binary in %s", cudakeyring.Filename, path)
runfile := item.FilePath(t.BaseDir)
if !util.IsExist(runfile) {
return fmt.Errorf("failed to find %s binary in %s", item.Filename, runfile)
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("dpkg -i --force all %s", path), false, true); err != nil {
return err
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", runfile), false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to chmod +x runfile")
}
return nil
}
type InstallCudaDriver struct {
common.KubeAction
SkipNVMLCheckAfterInstall bool
}
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("apt-get update", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to apt-get update")
}
if runtime.GetSystemInfo().IsDebian() {
_, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-open", false, true)
return errors.Wrap(err, "failed to apt-get install nvidia-open")
}
if _, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-kernel-open-575", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-kernel-open-575")
}
if t.SkipNVMLCheckAfterInstall {
return nil
// execute runfile with required flags
cmd := fmt.Sprintf("sh %s -z --no-x-check --allow-installation-with-running-driver --no-check-for-alternate-installs --dkms --rebuild-initramfs -s", runfile)
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to install nvidia driver via runfile")
}
// now that the nvidia driver is installed,
// the nvidia-smi should work correctly,
// if not, a manual reboot is needed by the user
_, installed, err := utils.ExecNvidiaSmi(runtime)
if err != nil {
return fmt.Errorf("failed to check nvidia driver status by executing nvidia-smi: %v", err)
}
if !installed {
st, err := utils.GetNvidiaStatus(runtime)
if err != nil || st == nil || !st.Installed || st.Mismatch {
logger.Error("ERROR: nvidia driver has been installed, but is not running properly, please reboot the machine and try again")
os.Exit(1)
}
@@ -170,7 +117,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
keyPath := gpgkey.FilePath(t.BaseDir)
if !util.IsExist(keyPath) {
return fmt.Errorf("Failed to find %s binary in %s", gpgkey.Filename, keyPath)
return fmt.Errorf("failed to find %s binary in %s", gpgkey.Filename, keyPath)
}
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 /usr/share/keyrings", false, true); err != nil {
@@ -190,7 +137,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
libPath := libnvidia.FilePath(t.BaseDir)
if !util.IsExist(libPath) {
return fmt.Errorf("Failed to find %s binary in %s", libnvidia.Filename, libPath)
return fmt.Errorf("failed to find %s binary in %s", libnvidia.Filename, libPath)
}
// remove any conflicting libnvidia-container.list
@@ -209,19 +156,30 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
return err
}
mirrorRepo := os.Getenv(common.ENV_NVIDIA_CONTAINER_REPO_MIRROR)
if mirrorRepo == "" {
// decide mirror based on OLARES_SYSTEM_CDN_SERVICE
var mirrorHost string
cdnService := os.Getenv(common.ENV_OLARES_CDN_SERVICE)
if cdnService != "" {
cdnRaw := cdnService
if !strings.HasPrefix(cdnRaw, "http") {
cdnRaw = "https://" + cdnRaw
}
if cdnURL, err := url.Parse(cdnRaw); err == nil {
host := cdnURL.Host
if host == "" {
host = cdnService
}
if strings.HasSuffix(host, "olares.cn") {
mirrorHost = "mirrors.ustc.edu.cn"
}
} else if strings.HasSuffix(cdnService, "olares.cn") {
mirrorHost = "mirrors.ustc.edu.cn"
}
}
if mirrorHost == "" {
return nil
}
mirrorRepoRawURL := mirrorRepo
if !strings.HasPrefix(mirrorRepoRawURL, "http") {
mirrorRepoRawURL = "https://" + mirrorRepoRawURL
}
mirrorRepoURL, err := url.Parse(mirrorRepoRawURL)
if err != nil || mirrorRepoURL.Host == "" {
return fmt.Errorf("invalid mirror for nvidia container: %s", mirrorRepo)
}
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorRepoURL.Host, dstPath)
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorHost, dstPath)
if _, err := runtime.GetRunner().SudoCmd(cmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to switch nvidia container repo to mirror site")
}
@@ -233,9 +191,21 @@ type InstallNvidiaContainerToolkit struct {
}
func (t *InstallNvidiaContainerToolkit) Execute(runtime connector.Runtime) error {
containerdDropInDir := "/etc/containerd/config.d"
containerdConfigFile := "/etc/containerd/config.toml"
if util.IsExist(containerdDropInDir) {
if err := os.RemoveAll(containerdDropInDir); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to remove containerd drop-in directory")
}
}
if util.IsExist(containerdConfigFile) {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("sed -i '/^import/d' %s", containerdConfigFile), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to remove import section from containerd config file")
}
}
logger.Debugf("install nvidia-container-toolkit")
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y nvidia-container-toolkit=1.17.9-1 nvidia-container-toolkit-base=1.17.9-1 jq", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-container-toolkit")
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y --allow-downgrades nvidia-container-toolkit=1.17.9-1 nvidia-container-toolkit-base=1.17.9-1 jq", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to apt-get install nvidia-container-toolkit")
}
return nil
}
@@ -382,7 +352,7 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
lines := strings.Split(res, "\n")
if lines == nil || len(lines) == 0 {
if len(lines) == 0 {
return nil
}
for _, line := range lines {
@@ -403,7 +373,6 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
type UpdateNodeLabels struct {
common.KubeAction
precheck.CudaCheckTask
}
func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
@@ -412,32 +381,26 @@ func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
return errors.Wrap(errors.WithStack(err), "kubeclient create error")
}
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
st, err := utils.GetNvidiaStatus(runtime)
if err != nil {
return err
}
if !installed {
logger.Info("nvidia-smi not exists")
if st == nil || !st.Installed {
logger.Info("NVIDIA driver is not installed")
return nil
}
supported := "false"
err = u.CudaCheckTask.Execute(runtime)
switch {
case err == precheck.ErrCudaInstalled:
if st.Installed {
supported = "true"
case err == precheck.ErrUnsupportedCudaVersion:
// bypass
case err != nil:
return err
case err == nil:
// impossible
logger.Warn("check impossible")
}
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &gpuInfo.DriverVersion, &gpuInfo.CudaVersion, &supported)
driverVersion := st.DriverVersion
if st.Mismatch && st.LibraryVersion != "" {
driverVersion = st.LibraryVersion
}
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &driverVersion, &st.CudaVersion, &supported)
}
type RemoveNodeLabels struct {
@@ -586,16 +549,44 @@ type UninstallNvidiaDrivers struct {
}
func (t *UninstallNvidiaDrivers) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove nvidia*", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove nvidia*")
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
_, _ = runtime.GetRunner().SudoCmd("dpkg --configure -a || true", false, true)
listCmd := "dpkg -l | awk '/^(ii|i[UuFHWt]|rc|..R)/ {print $2}' | grep nvidia | grep -v container"
pkgs, _ := runtime.GetRunner().SudoCmd(listCmd, false, false)
pkgs = strings.ReplaceAll(pkgs, "\n", " ")
pkgs = strings.TrimSpace(pkgs)
if pkgs != "" {
removeCmd := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get -y --auto-remove --purge remove %s", pkgs)
if _, err := runtime.GetRunner().SudoCmd(removeCmd, false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to remove nvidia packages via apt-get")
}
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
}
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove libnvidia*", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove libnvidia*")
// also try to uninstall runfile-installed drivers if present
if out, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-uninstall && echo yes || true", false, false); strings.TrimSpace(out) == "yes" {
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-uninstall -s", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-uninstall")
}
} else if out2, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-installer && echo yes || true", false, false); strings.TrimSpace(out2) == "yes" {
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-installer --uninstall -s", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-installer --uninstall")
}
}
// clean up any leftover dkms-installed kernel modules for nvidia if present
// only remove .ko files under updates/dkms to avoid removing other modules
checkLeftoverCmd := "sh -c 'test -d /lib/modules/$(uname -r)/updates/dkms && find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name \"nvidia*.ko\" -print -quit | grep -q . && echo yes || true'"
if out, _ := runtime.GetRunner().SudoCmd(checkLeftoverCmd, false, false); strings.TrimSpace(out) == "yes" {
if _, err := runtime.GetRunner().SudoCmd("find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name 'nvidia*.ko' -print -delete", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to remove leftover nvidia dkms kernel modules")
}
// refresh module dependency maps
if _, err := runtime.GetRunner().SudoCmd("depmod -a $(uname -r)", false, true); err != nil {
logger.Error("Failed to refresh module dependency maps: ", err)
}
}
logger.Infof("uninstall nvidia drivers success, please reboot the system to take effect if you reinstall the new nvidia drivers")
return nil
}
@@ -604,19 +595,43 @@ type PrintGpuStatus struct {
}
func (t *PrintGpuStatus) Execute(runtime connector.Runtime) error {
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
st, err := utils.GetNvidiaStatus(runtime)
if err != nil {
return err
}
if !installed {
logger.Info("cuda not exists")
if st == nil {
logger.Info("no NVIDIA GPU status available")
return nil
}
logger.Infof("GPU Driver Version: %s", gpuInfo.DriverVersion)
logger.Infof("CUDA Version: %s", gpuInfo.CudaVersion)
// basic status
logger.Infof("Installed: %t", st.Installed)
if st.Installed {
logger.Infof("Install method: %s", st.InstallMethod)
}
logger.Infof("Running: %t", st.Running)
// running (kernel) driver version
if st.Running && strings.TrimSpace(st.DriverVersion) != "" {
logger.Infof("Running driver version (kernel): %s", st.DriverVersion)
}
// userland info from nvidia-smi (when available)
if st.Installed {
if st.Info != nil && strings.TrimSpace(st.Info.DriverVersion) != "" {
logger.Infof("Installed driver version (nvidia-smi): %s", st.Info.DriverVersion)
}
if strings.TrimSpace(st.CudaVersion) != "" {
logger.Infof("CUDA version (nvidia-smi): %s", st.CudaVersion)
}
if st.Mismatch {
if strings.TrimSpace(st.LibraryVersion) != "" {
logger.Warnf("Driver/library version mismatch, NVML library version: %s", st.LibraryVersion)
} else {
logger.Warn("Driver/library version mismatch detected")
}
}
}
if !st.Installed && !st.Running {
logger.Info("no NVIDIA driver detected (neither installed nor running)")
}
return nil
}
@@ -688,31 +703,35 @@ func (t *RestartPlugin) Execute(runtime connector.Runtime) error {
return nil
}
type ExitIfNoDriverUpgradeNeeded struct {
type WriteNouveauBlacklist struct {
common.KubeAction
}
func (t *ExitIfNoDriverUpgradeNeeded) Execute(runtime connector.Runtime) error {
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
if err != nil {
logger.Warn("error checking whether the GPU need upgrade:")
logger.Warn(err.Error())
logger.Warn("assuming an upgrade is needed and continue upgrading")
func (t *WriteNouveauBlacklist) Execute(runtime connector.Runtime) error {
if !runtime.GetSystemInfo().IsLinux() {
return nil
}
if !installed {
logger.Info("GPU driver not installed, will just install it")
return nil
const dir = "/usr/lib/modprobe.d"
const dst = "/usr/lib/modprobe.d/olares-disable-nouveau.conf"
const content = "blacklist nouveau\nblacklist lbm-nouveau\nalias nouveau off\nalias lbm-nouveau off\n"
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 "+dir, false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to ensure /usr/lib/modprobe.d exists")
}
installedVersion, err := semver.NewVersion(gpuInfo.CudaVersion)
if err != nil {
logger.Warn("error parsing the current CUDA version of GPU driver \"%s\": %v", gpuInfo.CudaVersion, err)
logger.Warn("assuming an upgrade is needed and continue installing")
return nil
tmpPath := path.Join(runtime.GetBaseDir(), cc.PackageCacheDir, "gpu", "olares-disable-nouveau.conf")
if err := os.MkdirAll(path.Dir(tmpPath), 0755); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create temp dir for nouveau blacklist")
}
targetVersion, _ := semver.NewVersion(common.CurrentVerifiedCudaVersion)
if !targetVersion.GreaterThan(installedVersion) {
logger.Info("current GPU driver version is up to date, no need to upgrade")
if err := util.WriteFile(tmpPath, []byte(content), 0644); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to write temp nouveau blacklist file")
}
if err := runtime.GetRunner().SudoScp(tmpPath, dst); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to install nouveau blacklist file")
}
if out, _ := runtime.GetRunner().SudoCmd("test -d /sys/module/nouveau && echo loaded || true", false, false); strings.TrimSpace(out) == "loaded" {
logger.Infof("the disable file for nouveau kernel module has been written, but the nouveau kernel module is currently loaded. Please REBOOT your machine to make the disabling effective.")
os.Exit(0)
}
return nil

View File

@@ -202,12 +202,17 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
"runtime-request-timeout": "5m",
"image-gc-high-threshold": "91",
"image-gc-low-threshold": "90",
"housekeeping_interval": "5s",
}
defaultKubeProxyArgs := map[string]string{
"proxy-mode": "ipvs",
}
kubeApiserverArgs, _ := util.GetArgs(map[string]string{}, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
defaultKubeApiServerArgs := map[string]string{
"service-node-port-range": "445-32767",
}
kubeApiserverArgs, _ := util.GetArgs(defaultKubeApiServerArgs, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
kubeControllerManager, _ := util.GetArgs(map[string]string{
"terminated-pod-gc-threshold": "1",
}, g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)

View File

@@ -162,17 +162,19 @@ var (
}
ApiServerArgs = map[string]string{
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"service-node-port-range": "445-32767",
}
ApiServerSecurityArgs = map[string]string{
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"authorization-mode": "Node,RBAC",
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"service-node-port-range": "445-32767",
"authorization-mode": "Node,RBAC",
// --enable-admission-plugins=EventRateLimit must have a configuration file
"enable-admission-plugins": "AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity",
// "audit-log-path": "/var/log/apiserver/audit.log", // need audit policy

View File

@@ -44,14 +44,14 @@ var assets = func() http.FileSystem {
},
"/build/ks-config/templates": &vfsgen۰DirInfo{
name: "templates",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 949729535, time.UTC),
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269518280, time.UTC),
},
"/build/ks-config/templates/kubesphere-config.yaml": &vfsgen۰CompressedFileInfo{
name: "kubesphere-config.yaml",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 949762785, time.UTC),
uncompressedSize: 418,
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269634566, time.UTC),
uncompressedSize: 419,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x84\x90\xb1\x6a\xc3\x30\x10\x86\x77\x3d\xc5\xbd\x80\x1d\x87\x94\x82\x6f\xed\xd0\x29\xd0\xa5\xdd\x2f\xf6\xd9\x3e\x22\x9d\x84\x74\x36\x04\xfa\xf0\xa5\xc6\xa9\xd3\xa1\x74\x14\xdf\xaf\xff\xfb\x39\x4a\xf2\xc1\xb9\x48\x54\x84\xe5\xe8\xae\xa2\x3d\xc2\x4b\xd4\x41\xc6\x33\x25\x17\xd8\xa8\x27\x23\x74\x00\x4a\x81\x11\xae\xf3\x85\x4b\x9a\x38\x73\xd5\xad\xb1\x8d\x94\x44\xdd\x6f\x5c\x6e\xc5\x38\xb8\xfb\xf7\x9d\xd4\x37\x0a\x1e\xe1\xd3\x01\x00\x84\xa8\x62\x31\x8b\x8e\xb8\xbe\x01\x58\xfb\x14\x45\x0d\x61\x32\x4b\x78\x38\xa4\x1c\x03\xdb\xc4\x73\xa9\x62\xe2\x4c\xc6\x7d\xfd\xe0\xd9\x1b\x36\x65\x5d\x96\x0e\xdb\xa6\x6d\x7e\x0a\xe9\xe2\xf9\xf5\xed\xfd\xbc\xbb\x60\x20\x5f\x78\x0d\x68\x34\x19\xa4\x23\xfb\x3e\xc2\x5f\x1b\x1e\x43\x55\x20\xa5\x91\x73\x55\x96\xee\xff\x21\xc7\xb6\x69\x4f\x6b\xad\x71\x0e\xa2\xe4\xef\x12\x09\x34\x32\x02\xf9\x24\xca\x78\xaa\x8f\x4f\x1b\x30\x09\x1c\x67\x43\x78\x6e\x1a\xf7\x15\x00\x00\xff\xff\xa8\x81\xab\xba\xa2\x01\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x84\x90\xc1\x6a\x83\x40\x10\x86\xef\xfb\x14\xf3\x02\x1a\x6d\x0a\xc5\xb9\xf6\xd0\x53\xa0\x97\xf6\x3e\xd1\x51\x87\xb8\xb3\xcb\xee\x28\x04\xfa\xf0\xa5\x62\x6a\x7a\x28\x39\x2e\xdf\xbf\xff\xf7\x33\x14\xe5\x93\x53\x96\xa0\x08\x4b\xed\x2e\xa2\x1d\xc2\x6b\xd0\x5e\x86\x13\x45\xe7\xd9\xa8\x23\x23\x74\x00\x4a\x9e\x11\x2e\xf3\x99\x73\x1c\x39\x71\xd1\xae\xb1\x8d\xe4\x48\xed\x5f\x9c\xaf\xd9\xd8\xbb\xdb\xf7\x9d\x94\x57\xf2\x13\xc2\x97\x03\x00\xf0\x41\xc5\x42\x12\x1d\x70\x7d\x03\xb0\x76\x31\x88\x1a\xc2\x68\x16\xf1\x70\x88\x29\x78\xb6\x91\xe7\x5c\x84\xc8\x89\x8c\xbb\xf2\xce\xb3\x37\x6c\xca\x32\x2f\x2d\x36\x55\x53\xfd\x16\xd2\x79\xe2\xb7\xf7\x8f\xd3\xee\x82\x9e\xa6\xcc\x6b\x40\x83\x49\x2f\x2d\xd9\xcf\x11\xfe\xdb\x70\x1f\x2a\x3c\x29\x0d\x9c\x8a\xbc\xb4\x8f\x87\xd4\x4d\xd5\x1c\xd7\x5a\xe3\xe4\x45\x69\xba\x49\xc4\xd3\xc0\x08\x34\x45\x51\xc6\x63\x59\x3f\x6f\xc0\xc4\x73\x98\x0d\xe1\xe5\xa9\xaa\xdc\x77\x00\x00\x00\xff\xff\x8a\xb9\xa0\x58\xa3\x01\x00\x00"),
},
"/build/ks-config/values.yaml": &vfsgen۰FileInfo{
name: "values.yaml",
@@ -78,7 +78,7 @@ var assets = func() http.FileSystem {
},
"/build/ks-core/templates": &vfsgen۰DirInfo{
name: "templates",
modTime: time.Date(2025, 9, 4, 11, 58, 15, 814111089, time.UTC),
modTime: time.Date(2025, 11, 20, 6, 44, 5, 165722497, time.UTC),
},
"/build/ks-core/templates/NOTES.txt": &vfsgen۰FileInfo{
name: "NOTES.txt",
@@ -94,10 +94,10 @@ var assets = func() http.FileSystem {
},
"/build/ks-core/templates/ks-apiserver.yml": &vfsgen۰CompressedFileInfo{
name: "ks-apiserver.yml",
modTime: time.Date(2025, 9, 4, 11, 58, 15, 814831582, time.UTC),
uncompressedSize: 3060,
modTime: time.Date(2025, 11, 20, 6, 44, 5, 165886081, time.UTC),
uncompressedSize: 3111,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xbc\x56\x4d\x6f\xe3\x36\x13\xbe\xeb\x57\x0c\xf2\x9e\x25\x3b\xc1\xbe\x45\x96\x40\x0f\x41\xb2\x68\x83\x36\xad\x11\xa7\x01\x7a\xa4\xc9\xb1\x44\x98\x22\x59\x72\xe4\x46\x70\xf7\xbf\x17\xb4\x6c\x99\xb2\x94\xd4\xc5\x02\xd5\x49\x98\xe1\x7c\x3d\x33\xf3\x90\xdc\xa9\x57\xf4\x41\x59\xc3\x80\x3b\x17\x66\xdb\xeb\x6c\xa3\x8c\x64\xf0\x80\x4e\xdb\xb6\x46\x43\x59\x8d\xc4\x25\x27\xce\x32\x00\xcd\x57\xa8\x43\xfc\x83\x68\xc0\x60\x13\x72\xee\x54\x40\xbf\x45\xbf\x97\x92\x42\xcf\x60\xc5\xc5\x06\x8d\xdc\x4b\xb6\xc7\x08\xbb\x1d\x14\xf7\x15\xf7\x54\xdc\x39\x77\x88\x0b\x5f\xbf\x66\x00\x86\xd7\x78\xe6\x2b\x38\x14\x31\x4e\x20\xcf\x09\xcb\xb6\x8b\xe9\xad\xd6\xca\x94\xbf\x39\xc9\x09\x3b\x11\x40\xcd\xdf\x96\x8d\x2f\x91\xc1\xbc\x4b\xa1\x75\xc8\xe0\x39\x3d\x9a\x01\x78\x74\x5a\x09\x1e\xba\x3c\x9e\x51\x23\x0f\x58\x3c\x77\xd2\x7b\xdb\x18\xea\x72\x09\xa8\x51\x90\xf5\x9d\xf7\x9a\x93\xa8\x7e\x4e\xaa\x7e\xaf\xee\xa9\xca\x01\xfe\x77\x49\xf5\x84\xb5\xd3\x7d\x3d\x29\xdc\xf1\xd3\x83\xe0\xef\x87\x9f\x4e\xe0\xc2\x14\x00\x8e\x78\xc7\x6f\xb7\xcb\xe1\x4f\x45\x15\x14\xaf\x5c\x37\x18\x0a\x55\xf3\x12\x17\x8d\xd6\x4b\x14\x1e\x29\x1c\x8d\x00\xce\x35\xa7\x3c\xa3\x13\xb2\xbf\xf3\x5a\x43\x01\x7f\x81\x51\x46\xa2\x21\xb8\x3d\xd9\xc6\x13\x68\xe4\x49\x20\xac\x21\xae\x0c\xfa\xde\x4d\x0e\xc2\xd6\x35\x37\xf2\xe4\x37\x9f\x2e\x3e\x87\x3c\xd7\xb6\x24\x1b\x48\xa2\xf7\xdf\x93\x6f\xb0\x57\xee\xb3\x64\xb0\x42\xa1\xf9\x6a\x96\xda\xb3\x79\x31\x2f\x6e\x3e\x0d\x4f\xc6\x7a\x16\x56\x2b\xd1\x76\x98\xa5\x28\x14\xae\x57\x9e\x12\x9f\x9c\xe0\xa3\xca\x59\x9f\xe2\x92\x9f\xca\x5c\x58\x4f\x0c\x3e\xcf\x3f\xcf\x7b\x2d\x80\xf3\x96\xac\xb0\x9a\xc1\xcb\xfd\xa2\x97\x7b\x0c\xb6\xf1\x02\x13\x47\x43\x88\x0f\x39\xf6\xe1\x8b\xde\x22\x01\xff\xfa\x26\xcd\x79\x6b\x75\x53\xe3\x53\x1c\xfd\x41\x7e\x75\x94\x2c\x38\x55\x0c\x66\x48\x62\xb6\x69\x56\x18\x5c\x85\x1e\x67\x49\xf0\x43\xc5\xbd\x2e\x17\xd6\xac\x55\xf9\x81\x1f\x6d\x05\xd7\xa4\x6a\x1c\x79\xa9\x6c\xa0\xfc\x4c\xe3\x91\xcb\x5f\x8d\x6e\x19\x0c\x5a\x19\x6b\x56\xeb\x89\x7a\xf1\x8d\x3c\x7f\x4d\x4a\x4a\x4b\xfd\x07\xac\xc6\xb6\x93\x03\x3b\x31\xb2\x00\x5a\x6d\xd1\x60\x08\x0b\x6f\x57\x98\x76\x67\xcd\x95\x6e\x3c\xbe\x54\x1e\x43\x65\xb5\x64\x70\x9b\x68\x2b\x22\xf7\x03\x52\x6a\x00\xe0\x3a\xb0\x36\x31\xb3\xd9\x61\x6d\x87\x07\xa6\x26\x06\x20\x88\x0a\x23\x8e\x3f\xbe\xbc\x2c\x12\x85\x32\x8a\x14\xd7\x0f\xa8\x79\xbb\x44\x61\x8d\x0c\x0c\xae\xff\x9f\x9c\x88\x98\xdb\x86\xc6\xca\x08\x8c\x12\x78\x27\x44\xc4\xe3\x97\x7d\x97\x76\x3b\x50\x46\xe8\x46\x22\x5c\x6d\x42\x2e\xac\xc7\x62\x7c\xee\x0a\x8a\xe1\x8a\x0f\x98\x84\xac\x46\xcf\x49\x59\x93\xf4\x27\x11\x7e\x13\x7f\x8c\xa2\x19\x2b\x71\x79\xe0\xf3\xd3\xb1\x54\xfa\x4d\xf1\xf8\x7a\x1d\x21\x6e\x59\xb2\xeb\xf2\xce\x90\xba\x1b\x29\xe2\x3c\xff\xd1\x28\x8f\xf2\xa1\xf1\xca\x94\x4b\x51\xa1\x6c\xe2\x05\xf5\x58\x1a\xdb\x8b\xbf\xbc\xa1\x68\x22\x10\xa9\x65\xcc\xcb\x59\x6d\xcb\xf6\x27\x6c\xbb\x95\xf3\x06\x29\x32\x92\x9d\xc5\xd5\x89\x4b\x34\x98\x87\xfd\x9d\x31\x2e\xf1\x78\x5d\x92\xa8\xbe\xbc\x39\x8f\x21\x0c\x21\xef\x09\x36\x86\xe1\xce\x9d\x29\x00\xac\x8b\x7d\xb2\x9e\xc1\xa3\x19\x29\xb7\x7b\xcc\xd9\x48\xfe\x0e\x61\x1f\xb7\x3f\x38\x2e\xce\xcd\xf2\x94\x57\x42\x1b\x08\xeb\x6c\xb2\xb9\x47\xa0\x07\x54\x9c\xc8\x59\xb6\xdb\xa5\x5d\x3d\xf2\xe0\x3c\x1a\x8c\xfa\xd9\x11\xe2\xe0\xf2\x89\x94\xf6\xc4\x5d\x9a\x9e\xc4\x35\x6f\x34\x3d\x59\x89\x0c\x3e\xdd\xcc\xff\x05\x27\x7e\xac\xcf\xf7\x3c\xb8\x27\xcc\x6c\xc4\x09\xef\x11\x68\xf7\xd8\xb9\xba\xca\x3e\x66\xd4\x4b\x68\x33\x0c\x99\xee\x22\xba\x4c\x99\xf2\xbb\xc9\x55\xc9\xf3\x3c\xcb\xd2\x87\x66\xff\xc6\x5c\x76\xdc\x31\x78\x60\x72\x63\x2c\xa5\x54\x30\x9c\x76\xe1\x91\x13\xca\x7c\xd5\xa6\x30\x46\xcd\xd9\x8c\xfd\xf7\x0f\xd5\xfe\x9a\xcf\x0f\x2c\x7d\xdb\x4d\xc6\xf8\x3e\x27\xee\x4b\xa4\xc1\xe5\x3f\x7c\x74\x5e\x9a\xf0\x65\xaf\xcb\xfd\x80\xdc\xeb\x26\x10\xfa\xc7\x45\xf6\x77\x00\x00\x00\xff\xff\x30\xa7\xa6\x88\xf4\x0b\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xbc\x56\xd1\x6e\xeb\x36\x0f\xbe\xf7\x53\x10\xf9\xaf\xed\xa4\xc5\xf9\x87\x1e\x03\xbb\x28\xd2\x83\xad\xd8\xba\x05\x4d\x57\x60\x97\x8c\xcc\xc4\x42\x64\x49\x93\xe8\xac\x46\x76\xde\x7d\x50\x9c\x38\x72\xec\x76\x19\x0e\x30\x5f\x19\xa4\x28\x92\x1f\xc9\x8f\x42\x2b\x5f\xc9\x79\x69\x74\x0e\x68\xad\x9f\xee\x6e\x92\xad\xd4\x45\x0e\x0f\x64\x95\x69\x2a\xd2\x9c\x54\xc4\x58\x20\x63\x9e\x00\x28\x5c\x91\xf2\xe1\x0f\x82\x41\x0e\x5b\x9f\xa2\x95\x9e\xdc\x8e\xdc\x41\xca\x92\x5c\x0e\x2b\x14\x5b\xd2\xc5\x41\xb2\x3b\x79\xd8\xef\x21\x9b\x97\xe8\x38\xbb\xb7\xf6\xe8\x17\xbe\x7e\x4d\x00\x34\x56\x74\x71\x97\xb7\x24\x82\x1f\xcf\x0e\x99\x36\x4d\xeb\xd3\x19\xa5\xa4\xde\xfc\x66\x0b\x64\x6a\x45\x00\x15\xbe\x2d\x6b\xb7\xa1\x1c\x66\x6d\x08\x8d\xa5\x1c\x9e\xe3\xa3\x09\x80\x23\xab\xa4\x40\xdf\xc6\xf1\x4c\x8a\xd0\x53\xf6\xdc\x4a\xe7\xa6\xd6\xdc\xc6\xe2\x49\x91\x60\xe3\xda\xdb\x2b\x64\x51\xfe\x1c\x65\xfd\x5e\xde\x63\x99\x03\xfc\xef\x9a\xec\x99\x2a\xab\xba\x7c\x62\xb8\xc3\xa7\x7a\xce\xdf\x77\x3f\x1e\xc0\x95\x21\x00\x9c\xf0\x0e\xdf\x7e\x9f\xc2\x9f\x92\x4b\xc8\x5e\x51\xd5\xe4\x33\x59\xe1\x86\x16\xb5\x52\x4b\x12\x8e\xd8\x9f\x8c\x00\x2e\x35\xe7\x38\xc3\x25\x6c\x7e\xc7\x4a\x41\x06\x7f\x81\x96\xba\x20\xcd\x70\x77\xb6\x0d\x27\x48\x17\x67\x81\x75\xd2\x38\xc9\xcd\x5c\xa1\xf7\xbf\x1c\x7a\x62\xe2\x1b\xcf\x54\xa5\x42\xd5\x9e\xc9\xa5\xc2\x49\x96\x02\xd5\xe4\x68\x22\x8c\x66\x94\x9a\x5c\xe7\x39\x05\x61\xaa\x0a\x75\x71\x0e\x25\x1d\xc7\x2b\x85\x34\x55\x66\xc3\xc6\x73\x41\xce\x7d\xcf\xae\xa6\x4e\x79\x48\x2c\x87\x15\x09\x85\xab\x69\x6c\x9f\xcf\xb2\x59\x76\xfb\xa9\x7f\x32\x40\xb0\x30\x4a\x8a\xa6\x85\x39\x06\x2e\xb3\x9d\xf2\x9c\xeb\x68\xd3\x9f\x54\xd6\xb8\x18\xca\xf4\x9c\xe6\xc2\x38\xce\xe1\xf3\xec\xf3\xac\xd3\x06\xdc\x0c\x1b\x61\x54\x0e\x2f\xf3\x45\x27\x77\xe4\x4d\xed\x04\x45\x17\xf5\xab\x72\x8c\xb1\x73\x9f\x75\x16\x51\xbd\x6e\x6e\xe3\x98\x77\x46\xd5\x15\x3d\x85\x69\xe9\xc5\x57\x05\xc9\x02\xb9\xcc\x61\x4a\x2c\xa6\xdb\x7a\x45\xde\x96\xe4\x68\x1a\x39\x3f\x66\xdc\xe9\x52\x61\xf4\x5a\x6e\x3e\xb8\x47\x19\x81\x8a\x65\x45\x83\x5b\x4a\xe3\x39\xbd\xd0\x38\xc2\xe2\x57\xad\x9a\x1c\x7a\xa5\x0c\x39\xcb\xf5\x48\xbe\xf4\xc6\x0e\x5f\xa3\x94\xe2\x54\xff\x01\xab\xa1\xed\x68\x8f\x8f\x74\x39\x80\x92\x3b\xd2\xe4\xfd\xc2\x99\x15\xc5\xd5\x59\xa3\x54\xb5\xa3\x97\xd2\x91\x2f\x8d\x2a\x72\xb8\x8b\xb4\x25\xb3\xfd\x81\x38\x36\x00\xb0\x2d\x58\xdb\x10\xd9\xf4\x38\xe9\xfd\x03\x63\x1d\x03\xe0\x45\x49\x01\xc7\x1f\x5f\x5e\x16\x91\x42\x6a\xc9\x12\xd5\x03\x29\x6c\x96\x24\x8c\x2e\x7c\x0e\x37\xff\x8f\x4e\x04\xcc\x4d\xcd\x43\x65\x00\x46\x0a\xba\x17\x22\xe0\xd1\x8e\xef\x7e\x0f\x52\x0b\x55\x17\x04\x93\xad\x4f\x85\x71\x94\x0d\xcf\x4d\x20\xeb\xb3\x42\x8f\x7c\xd8\x28\x72\xc8\xd2\xe8\xa8\x3e\x91\xf0\x9b\x28\x67\xe0\x4d\x9b\x82\x96\xc7\x15\x70\x3e\x16\x4b\xbf\xc9\x1f\xae\xd7\x01\xe2\x26\x8f\x66\xbd\xb8\xd7\x2c\xef\x07\x8a\xd0\xcf\x7f\xd4\xd2\x51\xf1\x50\x3b\xa9\x37\x4b\x51\x52\x51\x87\x9d\xf6\xb8\xd1\xa6\x13\x7f\x79\x23\x51\x07\x20\x62\xcb\x10\x97\x35\xca\x6c\x9a\x9f\xa8\x69\x47\xce\x69\xe2\xc0\x48\x66\x1a\x46\x27\x0c\x51\xaf\x1f\x0e\x6b\x66\x98\xe2\x69\xc3\xb2\x28\xbf\xbc\x59\x47\xde\xf7\x21\xef\x08\x36\xb8\x41\x6b\x2f\x14\x00\xc6\x86\x3a\x19\x97\xc3\xa3\x1e\x28\x77\x07\xcc\xf3\x81\xfc\x1d\xc2\x3e\x4d\xbf\xb7\x28\x2e\xcd\xd2\x98\x57\xda\xa5\x91\x8c\x16\xf7\x04\x74\x8f\x8a\x23\x79\x9e\xec\xf7\x71\x55\x4f\x3c\x38\x0b\x06\x83\x7a\xb6\x84\xd8\x5b\x3e\x81\xd2\x9e\xd0\xc6\xe1\x15\xb4\xc6\x5a\xf1\x93\x29\x28\x87\x4f\xb7\xb3\x7f\xc1\x89\x1f\xeb\xd3\x03\x0f\x1e\x08\x33\x19\x70\xc2\x7b\x04\xda\xbe\x8f\x26\x93\xe4\x63\x46\xbd\x86\x36\x7d\x9f\xe9\xae\xa2\xcb\x98\x29\xbf\x1b\x1d\x95\x34\x4d\x93\x24\x7e\x9b\x76\xcf\xd2\x65\xcb\x1d\xbd\x37\x29\x6a\x6d\x38\xa6\x82\x7e\xb7\x0b\x47\xc8\x54\xa4\xab\x26\x86\x31\x68\x2e\x7a\xec\xbf\x7f\xdb\x76\x6b\x3e\x3d\xb2\xf4\x5d\xdb\x19\xc3\x7d\xce\xe8\x36\xc4\xbd\xe5\xdf\x7f\xa7\x5e\x1b\xf0\x75\x0f\xd2\x43\x83\xcc\xdb\x27\xd7\xe3\x22\xf9\x3b\x00\x00\xff\xff\x06\x39\x6d\x22\x27\x0c\x00\x00"),
},
"/build/ks-core/values.yaml": &vfsgen۰CompressedFileInfo{
name: "values.yaml",
@@ -239,7 +239,7 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/kube-state-metrics": &vfsgen۰DirInfo{
name: "kube-state-metrics",
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643530930, time.UTC),
modTime: time.Date(2025, 11, 20, 6, 44, 5, 166070457, time.UTC),
},
"/build/prometheus/kube-state-metrics/kube-state-metrics-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
name: "kube-state-metrics-clusterRole.yaml",
@@ -257,10 +257,10 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/kube-state-metrics/kube-state-metrics-deployment.yaml": &vfsgen۰CompressedFileInfo{
name: "kube-state-metrics-deployment.yaml",
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643763923, time.UTC),
uncompressedSize: 4124,
modTime: time.Date(2025, 11, 20, 6, 44, 5, 166198083, time.UTC),
uncompressedSize: 4175,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x5b\x6f\xdb\x46\x13\x7d\xd7\xaf\x58\xf8\xc3\x07\xc8\x75\x48\x5d\x12\xa7\x0e\x01\xa1\x70\x15\x23\x0e\x10\xbb\x82\x15\xb7\x0f\x45\x41\xac\x96\x23\x69\xeb\xbd\x75\x77\x28\x8b\x80\x7e\x7c\xb1\xbc\x8a\xb6\x24\x47\x46\x80\x3e\xb4\x7a\x12\x67\x66\xcf\xce\x9e\x99\x33\x4b\x52\xc3\x7f\x05\xeb\xb8\x56\x11\xa1\xc6\xb8\xde\x6a\xd0\x79\xe0\x2a\x89\xc8\x47\x30\x42\x67\x12\x14\x76\x24\x20\x4d\x28\xd2\xa8\x43\x88\xa0\x33\x10\xce\xff\x23\x7e\x41\xf8\x90\xce\xc0\x2a\x40\x70\x21\xd7\x3d\xa6\xa5\xd1\x0a\x14\x46\x04\xd6\x46\x5b\x04\xbb\x27\x52\x51\x09\x11\xf1\xb6\xc0\x21\x45\x08\x24\xa0\xe5\xcc\xed\x09\x37\xd4\x62\xa0\xe7\xe5\x0a\x63\xb5\x04\x5c\x42\xba\x2f\x7c\x55\x9d\x69\x18\xbe\x0d\xfb\x01\xac\x31\x1c\x90\xff\x11\xd4\x89\xee\x10\x72\x60\x6f\xef\x72\x86\xb2\xd2\xef\xcc\x12\x2c\x04\x52\x2b\x8e\xda\x72\xb5\x08\x5c\xe6\x10\x64\xc7\x19\x60\x9e\x04\x0b\x46\x70\x46\x5d\x44\x06\x1d\x42\x1c\x08\x60\xa8\x6d\x41\x8f\xa4\xc8\x96\x5f\xb6\xf8\x3a\x86\xb1\xa3\x39\x3b\x92\x35\x87\x96\x22\x2c\xb2\xa8\xa4\x85\xfc\x94\xa3\x58\x2d\x04\x57\x8b\x7b\x93\x50\x84\x2a\x6b\x49\xd7\xd3\xd4\x2e\x20\x22\xc3\xf3\xff\x37\xb6\x7b\x45\x57\x94\x0b\x3a\x13\x5b\x1e\xcc\x0c\x44\xe4\x6e\x1b\xa6\x43\x08\x82\x34\xa2\x46\xdc\xee\xa7\x3c\x71\xa5\x34\x52\xe4\x5a\xd5\x44\x91\x3c\x65\x86\xe2\xc9\x89\x12\x98\xd3\x54\x60\xc0\xb4\x42\xca\x15\xd8\x03\x7c\x88\x16\xf5\xc7\x91\xff\x0a\xfa\x8f\x6e\xdb\xd7\xb4\x2e\x21\x55\xe7\xf9\xdf\x4a\x8b\x54\x42\x7d\xc4\xa0\xec\xec\xa5\x76\x18\x20\x97\x50\x6f\xe3\x2d\x13\x8a\xcb\x86\x0c\x42\x8c\x7f\x26\x3d\x40\xd6\x13\x9a\x51\xd1\x5a\x50\x15\xf2\xe4\xa4\x34\xd5\x84\x6f\xed\x46\xed\x62\x8b\xde\x80\x04\x81\xdf\x67\x34\x18\xfe\x18\xf6\xc3\x7e\x38\x68\xb9\x3c\xbd\xa3\x8b\xfe\x45\xdb\x8a\x20\xc0\xd3\x98\x1d\x5a\xda\x04\x55\x20\xc3\x96\xbf\xa8\x43\x90\x80\xca\x04\x77\x38\xf2\x84\xc6\x46\x27\x71\x9d\x74\xec\x0b\x96\xba\x38\xfc\x01\xc1\x4a\xae\x28\x42\x12\x5b\xa0\x4e\xab\x37\x79\x74\x78\x16\x97\xc4\xd7\xcf\xcc\x82\x0f\x2b\x9e\x93\x7a\x1a\xc6\x5d\x5f\x81\xd8\xd0\xd4\x41\xb2\xc9\xff\x57\x5a\x8a\x4b\xf5\xa4\x79\xdb\xc7\xe1\xd9\x69\xb1\x18\x54\x62\x34\xf7\x4b\xb9\x9a\xeb\x0d\x4d\x12\x0b\xce\x35\xfe\x3f\xf5\xac\x74\xe9\x47\x05\xb6\x00\xed\x1a\x6a\xa9\x10\x20\xb8\x93\x1b\xca\x90\xaf\x7c\x16\x34\x11\x5c\x41\xec\x80\x69\x95\xb8\xd3\x4d\x79\xae\x6e\x11\xb0\x09\xcf\x62\x5f\xc6\xd3\x12\x98\x59\xad\x1a\xf0\x8a\x83\xb3\x62\x83\x7a\xfb\x7a\xe6\xc5\xdd\x32\xc4\x2c\xa9\x83\xd2\x6b\x3c\x2d\x0e\x41\x61\xd1\x6d\x25\x18\xa3\x86\x32\x8e\x59\x03\xf3\x34\x90\x09\xca\x65\xdc\xb5\xe0\x74\x6a\x99\xa7\x63\x43\x19\x6b\x1d\xdc\x01\xb3\x80\x71\xd7\xf7\x5a\x6d\xb2\x2b\x9e\xa7\x52\xe4\xd8\x64\x5d\x06\x70\xb5\xc8\xd9\x2b\xd2\xf0\x4d\xbc\x41\xe1\x4a\x67\x35\x8e\x3d\xe8\xb3\xe3\x16\xec\x56\xd9\xea\x24\xe1\xce\xa6\xc6\x8f\x9d\x59\x9a\x2c\x00\xeb\x26\x39\x6b\x81\xf9\x00\xdf\x48\xbe\xb8\x60\x6b\xaf\xd2\x89\x4f\x66\xae\x8b\xc7\xee\xd2\xd0\x4d\xb3\xfd\x66\xe7\xe2\x53\xdf\x57\x0b\x50\x60\x73\xc7\xae\x26\x2e\x86\x56\x40\x85\xd0\x8f\x79\x33\xd7\xd5\x71\xa3\xdf\x9b\x2b\xc9\x4f\x8a\x47\x6d\x1f\x72\xcf\x9b\x59\x86\x80\x96\x26\xb9\x59\xb9\x20\x3f\xe8\x1f\x35\x3c\x97\xd4\x0f\xef\x99\x2f\xc9\xac\xf7\x7c\x80\x45\xab\xad\x59\xd3\x5e\x35\x49\x85\x98\x68\xc1\x59\x16\x91\xcf\xf3\x5b\x8d\x13\x0b\xce\xbf\x13\x54\x51\x2f\x0e\xc5\xaa\xfe\x6e\x7b\xf6\x08\x2e\x39\xb6\x2c\x84\x30\x93\x46\xe4\x64\x70\xd2\x32\x4a\x90\xda\x66\x11\xb9\xf8\xc4\xb7\xec\x16\xfe\x4a\xc1\xed\x04\x18\xf4\xfb\x72\x27\xc2\xe0\xbc\x7f\xd3\x60\x38\x60\xa9\xe5\x98\x8d\xb5\x42\x58\xe3\x36\x90\x4d\xd5\xa5\xbb\x77\xfe\x6a\x79\x7f\x7e\xfe\xf6\x5d\xed\x2a\xda\xfa\x46\xa7\x0a\x5b\x63\x4f\x7a\xcb\xe4\xf0\x34\xdd\x37\x98\xfd\x59\x68\xf2\x8b\x12\x59\x44\xd0\xa6\x70\x60\xb6\x0a\xbd\x40\xed\x30\x01\x6b\x5b\xf6\xfc\x28\x10\x88\x5c\x7b\x41\x39\x5c\x46\xd1\xc5\xbb\x77\x6f\xdb\x53\x54\xb8\x80\x71\xdf\x3d\x81\x4b\x39\x82\x1b\x7d\xfd\x32\x8d\xaf\xc6\x1f\xaf\xaf\xe2\xbb\xe9\x65\xfc\xdb\xe7\xaf\xd7\xf1\xe5\xd5\x34\x1e\x0c\x2f\xe2\x4f\xe3\x9b\x78\x7a\x7d\x39\x3c\x7f\xff\xa6\x89\xba\x1a\x7f\x7c\x21\xee\x19\xce\xf8\xe7\xf1\x37\xe1\xec\x8c\x3b\x80\xd6\x3a\x59\x6a\x1c\x5a\xa0\x72\xb4\x44\x34\x51\xaf\x57\xdf\x22\x91\xbf\x6f\x7a\x87\x74\x60\x67\x94\xf9\x7b\x79\x9d\x45\xfd\x70\xf0\x21\xec\xbf\xbe\xfd\x1b\xa8\x40\x52\xde\x88\xdb\xdf\x59\xad\x52\xd6\xb7\xd2\x44\x5b\x8c\x48\xab\x50\x75\xa7\x20\x1a\xd7\xc6\xf9\x3e\x42\x1a\xf4\xb7\x65\xf0\x82\x94\x76\x2b\x69\x78\x8c\x90\x3e\x59\x9d\x9a\x42\x49\xc3\xa7\xbe\x5b\xad\xee\xb4\xc6\x56\xe7\xef\x50\xe0\xf0\xfb\x89\xe2\xc3\x7f\xa2\xc8\x45\x31\xfc\x07\x44\xe1\x40\xcc\xbf\x59\x14\x1f\xf6\x8b\xa2\x85\xf3\xaf\x17\x85\x7f\x0d\x99\xb6\x3e\x3b\xab\x8f\xa7\xe6\x93\x42\xbb\x88\x08\xae\xd2\x75\xa7\x4a\x2f\x7f\xc7\xba\x64\xcc\xdf\x5c\xb7\xfb\x6e\xf0\xbf\x03\x00\x00\xff\xff\x22\x0f\x2a\x59\x1c\x10\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x6f\x6f\xdb\xb6\x13\x7e\xef\x4f\x41\xe4\x87\x1f\xe0\x2c\x95\xfc\xa7\x4d\x97\x0a\x30\x86\x2c\x0d\xda\x02\x6d\x16\x34\xed\xf6\x62\x18\x04\x9a\xba\xd8\x5c\x29\x1e\xc7\x3b\xb9\x11\xe0\x0f\x3f\x50\x7f\xad\xd6\x71\x9b\xa2\xc0\x5e\x6c\x7e\x65\xdd\x1d\x1f\x1e\x9f\xbb\xe7\x28\x49\xa7\x7f\x05\x4f\x1a\x6d\x22\xa4\x73\x34\xd9\xcc\x46\x1f\xb4\xcd\x12\xf1\x1c\x9c\xc1\x32\x07\xcb\xa3\x1c\x58\x66\x92\x65\x32\x12\xc2\xc8\x25\x18\x0a\xff\x44\x58\x10\x7f\x28\x96\xe0\x2d\x30\x50\xac\x71\xa2\x30\x77\x68\xc1\x72\x22\xe0\xce\xa1\x67\xf0\xf7\x44\x5a\x99\x43\x22\x82\x2d\x22\x96\x0c\x51\x0e\xec\xb5\xa2\x7b\xc2\x9d\xf4\x1c\xe1\x6d\xb3\xc2\x79\xcc\x81\xd7\x50\xdc\x17\xbe\x69\xcf\x34\x8f\x1f\xc7\xd3\x08\xee\x38\x9e\x89\xff\x09\xc6\x0c\x47\x42\x1c\xd8\x3b\xb8\xc8\x49\xd5\xf8\xc9\xad\xc1\x43\x94\xa3\xd5\x8c\x5e\xdb\x55\x44\x25\x31\xe4\x23\x72\xa0\x02\x09\x1e\x9c\xd1\x4a\x52\x22\x66\x23\x21\x08\x0c\x28\x46\x5f\xd3\x93\x4b\x56\xeb\xd7\x3b\x7c\x3d\x84\xb1\x07\x73\xf6\x40\xd6\x88\xbd\x64\x58\x95\x49\x43\x8b\xf8\xa9\x42\xf1\x68\x8c\xb6\xab\xf7\x2e\x93\x0c\x6d\xd6\xb9\xbc\xbb\x29\xfc\x0a\x12\x31\x3f\xfd\x7f\x6f\x7b\x6f\xe5\x46\x6a\x23\x97\x66\xc7\xc3\xa5\x83\x44\xbc\xdd\x85\x19\x09\xc1\x90\x3b\xd3\x21\xee\xf6\x53\x95\xb8\xb5\xc8\x92\x35\xda\x8e\x28\x51\xa5\xac\xd8\x7c\x72\xa2\x0c\x6e\x65\x61\x38\x52\x68\x59\x6a\x0b\xfe\x00\x1f\x66\x40\xfd\xc3\xc8\xff\x06\xfa\x1f\xdc\xb6\xdf\xd2\xba\x42\xb4\x9d\x17\x7e\x1b\x34\x45\x0e\xdd\x11\xa3\xa6\xb3\xd7\x48\x1c\xb1\xce\xa1\xdb\x26\x58\xae\x25\xaf\x7b\x32\x84\x70\xe1\x59\x4c\x80\xd5\xc4\xa0\x92\x66\xb0\xa0\x2d\xe4\xd1\x51\x63\x72\x5e\xa3\xd7\x5c\x5e\x18\x49\x74\x55\x6d\x73\x54\x6b\x21\x52\xa6\x20\x06\x1f\x29\xaf\x59\x2b\x69\xda\x25\x5d\x8d\x76\x12\x94\x7e\xb5\x53\x91\x48\x44\x51\x48\x6d\x31\x9b\xff\x18\x4f\xe3\x69\x3c\x1b\xb8\x42\x45\x16\x67\xd3\xb3\xa1\x95\xc1\x40\x60\xbe\x3c\xb4\xb4\x0f\x6a\x41\xe6\x03\x7f\x5d\xba\x28\x03\x5b\x1a\x4d\xbc\x08\x35\x48\x1d\x66\x69\x97\x74\x1a\x6a\x5c\x50\x1a\xff\xc0\xe0\x73\x6d\x25\x43\x96\x7a\x90\x84\xf6\x51\x15\x1d\x9f\xa4\x4d\xad\xba\x67\xe5\x21\x84\xd5\xcf\x59\x37\x40\xd3\x71\x28\x5a\xea\x64\x41\x90\x6d\xab\xff\xad\xfc\xd2\x46\x70\x45\xa5\x94\x34\x3e\x39\xae\x17\x83\xcd\x1c\xea\xb0\x54\xdb\x5b\xdc\xca\x2c\xf3\x40\xd4\xfb\xff\xc4\x65\xe3\xc2\x8f\x16\x7c\x0d\x3a\x76\xd2\x4b\x63\xc0\x68\xca\xb7\x52\xb1\xde\x84\x2c\x64\x66\xb4\x85\x94\x40\xa1\xcd\xe8\x78\xdb\x9c\x6b\x5c\x07\x6c\xe3\x93\x34\x54\xfe\xb8\x01\x56\x1e\x6d\x0f\xde\x72\x70\x52\x6f\xd0\x6d\xdf\x8d\xc9\x74\xdc\x84\xb8\xb5\x24\x68\xbc\x2e\xd0\x42\x0c\x96\xeb\x06\x6d\xc0\x94\x74\x52\x69\x2e\x7b\x98\x4f\x03\x95\x91\x3a\x4f\xc7\x1e\x08\x0b\xaf\x02\x1d\x5b\xa9\xd4\xe0\xe0\x04\xca\x03\xa7\xe3\xd0\x9e\x9d\xc9\x6f\x74\x95\x4a\x9d\x63\x9f\x75\x13\xa0\xed\xaa\x62\xaf\x4e\x23\xf4\xfd\x96\x0d\x35\xce\x76\x82\x07\xd0\xcf\x8e\x5b\xb3\xdb\x66\x8b\x59\xa6\xc9\x17\x2e\x4c\xaa\x65\x91\xad\x80\xbb\x26\x39\x19\x80\x85\x80\xd0\x48\xa1\xb8\xe0\x3b\xaf\xc5\x2c\x24\x73\x8b\xf5\xe3\x78\xed\xe4\xb6\xdf\x7e\xbb\x77\xf1\x71\xe8\xab\x15\x58\xf0\x95\x63\x5f\x13\xd7\x73\x2e\x92\xc6\xe0\xc7\xaa\x99\xbb\xea\xd0\xe2\xf7\xfe\x16\x0b\xc3\xe5\x23\xfa\x0f\x95\xe7\xd1\xb2\x64\x60\x2f\xb3\xca\x6c\x29\xaa\x0e\xfa\x47\x07\xaf\x73\x19\xe6\xfd\x32\x94\x64\x39\xf9\x7c\xe6\x25\x9b\x9d\xf1\x34\x5c\x75\x5d\x18\x73\x8d\x46\xab\x32\x11\xaf\x6e\xaf\x90\xaf\x3d\x50\x78\x8d\x68\xa3\xbe\x38\x47\xdb\xfa\xd3\xee\xb8\x32\x3a\xd7\x3c\xb0\x08\xa1\x5c\x91\x88\xa3\xd9\xd1\xc0\x98\x43\x8e\xbe\x4c\xc4\xd9\x0b\xbd\x63\xf7\xf0\x57\x01\xb4\x17\x60\x36\x9d\xe6\x7b\x11\x66\xa7\xd3\x37\x3d\x06\x81\x2a\xaa\x09\x88\x96\xe1\x8e\x77\x81\x7c\x61\xcf\xe9\x3d\x85\xdb\xe8\xe9\xe9\xe9\xe3\x27\x9d\xab\x6e\xeb\x37\x58\x58\x1e\x8c\xbd\x3c\x58\xae\x0f\x0f\xe0\xfb\x66\x79\x38\x8b\xcc\x7e\xb1\xa6\x4c\x04\xfb\x02\x0e\xcc\x56\x83\x2b\x46\xe2\x0c\xbc\x1f\xd8\xab\xa3\x40\x64\x2a\xed\x45\xcd\x70\x59\x24\x67\x4f\x9e\x3c\x1e\x4e\x51\x43\x91\xd2\xa1\x7b\x22\x2a\x34\x03\x2d\xde\xbd\xbe\x49\x2f\x2f\x9e\xbf\xbc\x4c\xdf\xde\x9c\xa7\xbf\xbd\x7a\xf7\x32\x3d\xbf\xbc\x49\x67\xf3\xb3\xf4\xc5\xc5\x9b\xf4\xe6\xe5\xf9\xfc\xf4\xe9\xa3\x3e\xea\xf2\xe2\xf9\x17\xe2\x3e\xc3\xb9\xf8\xf9\xe2\xab\x70\xf6\xc6\x1d\x40\x1b\x9c\xac\x70\xc4\x1e\x64\xbe\x58\x33\xbb\x64\x32\xe9\x6e\x91\x24\xdc\x37\x93\x43\x3a\xf0\x4b\xa9\xc2\x55\x7e\x57\x26\xd3\x78\xf6\x2c\x9e\x7e\x7b\xfb\xf7\x50\x51\x2e\x75\x2f\xee\x70\x67\x0d\x4a\xd9\xdd\x4a\xd7\xe8\x39\x11\x83\x42\x75\x9d\xc2\xec\x68\x88\xf3\x7d\x84\x34\x9b\xee\xca\xe0\x0b\x52\xda\xaf\xa4\xf9\x43\x84\xf4\xc2\x63\xe1\x6a\x25\xcd\x3f\xf5\x5d\xa1\x7d\x8b\xc8\x83\xce\xdf\xa3\xc0\xf9\xf7\x13\xc5\xb3\xff\x44\x51\x89\x62\xfe\x0f\x88\x82\xc0\xdc\x7e\xb5\x28\x9e\xdd\x2f\x8a\x01\xce\xbf\x5e\x14\xe1\x35\xe4\x66\xf0\xa5\xda\x7e\x6f\xf5\x5f\x21\x48\x89\x30\xda\x16\x77\xa3\x36\xbd\xea\x1d\xeb\x5c\xa9\x70\x73\x5d\xdd\x77\x83\xff\x1d\x00\x00\xff\xff\x16\xa9\x3b\xd7\x4f\x10\x00\x00"),
},
"/build/prometheus/kube-state-metrics/kube-state-metrics-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
name: "kube-state-metrics-prometheusRule.yaml",
@@ -292,7 +292,7 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/kubernetes": &vfsgen۰DirInfo{
name: "kubernetes",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 953644484, time.UTC),
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269747477, time.UTC),
},
"/build/prometheus/kubernetes/kubernetes-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
name: "kubernetes-prometheusRule.yaml",
@@ -345,14 +345,14 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/kubernetes/kubernetes-serviceMonitorKubelet.yaml": &vfsgen۰CompressedFileInfo{
name: "kubernetes-serviceMonitorKubelet.yaml",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 953683110, time.UTC),
uncompressedSize: 1717,
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269943343, time.UTC),
uncompressedSize: 1718,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xe4\x55\xb1\x6e\xdb\x30\x10\xdd\xfd\x15\x1c\xdb\x04\x96\x90\x55\x6b\x81\x4e\x69\x87\xa6\xc8\x4a\xd0\xd4\xb3\xc4\x5a\xe2\x11\x77\x27\xb7\x06\xf2\xf1\x05\x29\xd9\x71\xec\x18\x45\xc7\xa2\x9b\xfd\xee\xf1\xf8\xee\xdd\x03\xe5\x52\x78\x06\x4b\xa0\xd8\x98\x91\x62\x50\xe2\x10\xbb\xca\x13\x83\xa4\xf2\x34\xd6\xfb\x87\xd5\x2e\xc4\xb6\x31\x4f\xe0\x7d\xf0\xf8\x32\xb3\x56\x23\xd4\xb5\x4e\x5d\xb3\x32\x66\x70\x1b\x0c\x92\x7f\x19\xe3\x52\xaa\x76\xd3\x06\x1c\xa1\x90\x2a\x50\x1d\xdd\x88\xc6\x64\x6c\x80\xde\xe0\x24\xc7\xba\xa6\xed\x4c\x5b\x27\xa6\x11\xda\x63\x92\x1b\xf4\x3d\x62\x4b\x3c\xb3\x25\xf5\x60\xac\x8c\xb9\xbc\x27\xff\x97\xe4\x3c\xce\x79\xeb\xd7\x29\xd7\x72\x10\xc5\xb8\x92\x04\x9f\xb5\x23\xb6\x89\x42\xd4\x32\xc8\xda\x6c\xe0\x18\xfc\x9d\x76\x88\x9f\xc3\x80\xc6\xd4\x7b\xc7\x35\x4f\xb1\x16\x78\x86\x4a\xfd\x56\x93\xcc\xfe\x38\xef\x69\x8a\x5a\x6b\x3e\x58\xe4\xf7\x14\x89\x1f\x67\x8b\x8c\xf2\x84\x82\x86\xa8\xe0\xbd\x1b\x1a\xf3\x30\x16\x60\x84\x72\xf0\xdf\x50\xcc\x0c\xb1\x5b\xfc\x5c\x1b\xe7\xb5\xec\x67\x07\xa4\x02\x19\xc3\xe8\xf0\xeb\x34\xab\x8d\xd4\xc2\xe6\x71\x5f\x8e\x08\x4f\x31\x86\xd8\x59\x4f\x51\x5d\x88\x60\x5b\x54\x5d\xd5\x13\xb5\x17\x95\x3d\x0d\xd3\x08\x2b\xea\x54\xaa\xbb\x13\x9c\x06\x74\x96\x31\x04\x51\xdb\x4e\xec\xb2\x24\x2b\xf0\x14\x5b\xb1\xd5\xfd\xa2\x4b\x68\x62\x8f\xc7\xb3\x38\xe4\x01\x6c\xd1\x66\x6d\x01\x12\xb1\x36\xa6\x57\x4d\xb2\x9e\x47\x9e\x97\xcc\xd7\x83\xdf\xea\xb6\x1c\xb3\xc9\x69\xbf\x74\x35\x46\x1d\x77\xd0\x42\x6e\xcc\x39\xe3\xc2\xc5\x72\x4b\xcb\x74\x61\xe5\x87\x65\x7d\x2f\xc7\x14\x7c\x2c\x75\xf1\x3d\x72\xaa\x8a\xde\x82\xe8\x20\x9f\x28\x6e\x43\x77\x54\x14\xa2\xc0\x4f\x8c\xa7\x5d\x48\xcf\xe0\xb0\x3d\x9c\xb6\xfc\x4f\x84\xe8\x2c\x22\x69\xb2\x93\xb8\x0e\xa7\xc5\x2a\xa9\x1b\x5e\x5e\x19\x23\x46\xe2\xc3\x42\xda\x1c\x14\x72\x5d\xf4\xce\xf7\x38\x83\x23\xf4\x27\xf1\xce\x56\xf7\xf3\x89\x5b\x4d\x33\x29\x47\x52\xa0\x57\xad\xb3\x32\xbf\x15\x5b\xdd\x25\x70\xa0\x77\x94\x25\x26\x0f\x11\xe4\xcc\xbe\xa2\xda\x33\x5c\x2b\xd5\xdd\x5f\xe4\xd3\x69\xdf\x98\x7a\x49\x50\xed\x5d\xbb\x0f\x42\xfc\xdf\x65\xf7\x07\x6d\x16\x41\xef\xbf\xe8\xe7\x0f\xec\x13\x06\x78\x25\x9e\xdb\x8e\x4e\x7d\xff\x35\xd7\x8e\x66\x94\x47\x7d\x79\x6d\x8d\x91\x6b\xf6\x5b\x9f\xfe\xf4\x09\xf9\x1d\x00\x00\xff\xff\xff\x8d\xbb\xd9\xb5\x06\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xe4\x55\xb1\x8e\xdb\x3a\x10\xec\xfd\x15\x2c\xdf\xbb\x83\xa5\x77\xad\xda\x07\xa4\xba\xa4\xc8\x05\xd7\x12\x34\x35\x96\x18\x4b\x5c\x62\x77\xe5\xc4\xc0\x7d\x7c\x40\x4a\xf6\xf9\xec\x33\x82\xa4\x48\x93\xce\x9e\x1d\x2e\x67\x67\x07\x94\x4b\xe1\x19\x2c\x81\x62\x63\x46\x8a\x41\x89\x43\xec\x2a\x4f\x0c\x92\xca\xd3\x58\xef\x1f\x56\xbb\x10\xdb\xc6\x3c\x81\xf7\xc1\xe3\xe3\xcc\x5a\x8d\x50\xd7\x3a\x75\xcd\xca\x98\xc1\x6d\x30\x48\xfe\x65\x8c\x4b\xa9\xda\x4d\x1b\x70\x84\x42\xaa\x40\x75\x74\x23\x1a\x93\xb1\x01\x7a\x83\x93\x1c\xeb\x9a\xb6\x33\x6d\x9d\x98\x46\x68\x8f\x49\x6e\xd0\xf7\x88\x2d\xf1\xcc\x96\xd4\x83\xb1\x32\xe6\xf2\x9e\xfc\x5f\x92\xf3\x38\xe7\xad\x5f\xa7\x5c\xcb\x41\x14\xe3\x4a\x12\x7c\xd6\x8e\xd8\x26\x0a\x51\xcb\x20\x6b\xb3\x81\x63\xf0\x17\xda\x21\x7e\x08\x03\x1a\x53\xef\x1d\xd7\x3c\xc5\x5a\xe0\x19\x2a\xf5\x5b\x4d\x32\xfb\xe3\xbc\xa7\x29\x6a\xad\xf9\x60\x91\xdf\x53\x24\x7e\x9c\x2d\x32\xca\x13\x0a\x1a\xa2\x82\xf7\x6e\x68\xcc\xc3\x58\x80\x11\xca\xc1\x7f\x46\x31\x33\xc4\x6e\xf1\x73\x6d\x9c\xd7\xb2\x9f\x1d\x90\x0a\x64\x0c\xa3\xc3\xf7\xd3\xac\x36\x52\x0b\x9b\xc7\x7d\x39\x22\x3c\xc5\x18\x62\x67\x3d\x45\x75\x21\x82\x6d\x51\x75\x55\x4f\xd4\x5e\x54\xf6\x34\x4c\x23\xac\xa8\x53\xa9\xee\x4e\x70\x1a\xd0\x59\xc6\x10\x44\x6d\x3b\xb1\xcb\x92\xac\xc0\x53\x6c\xc5\x56\xf7\x8b\x2e\xa1\x89\x3d\x1e\xcf\xe2\x90\x07\xb0\x45\x9b\xb5\x05\x48\xc4\xda\x98\x5e\x35\xc9\x7a\x1e\x79\x5e\x32\x5f\x0f\x7e\xab\xdb\x72\xcc\x26\xa7\xfd\xd2\xd5\x18\x75\xdc\x41\x0b\xb9\x31\xe7\x8c\x0b\x17\xcb\x2d\x2d\xd3\x85\x95\xff\x2c\xeb\x7b\x39\xa6\xe0\xdf\x52\x17\xdf\x23\xa7\xaa\xe8\x2d\x88\x0e\xf2\x3f\xc5\x6d\xe8\x8e\x8a\x42\x14\xf8\x89\xf1\xb4\x0b\xe9\x19\x1c\xb6\x87\xd3\x96\xff\x64\x88\xfe\x93\xdf\x4d\xd1\x59\x46\xd2\x64\x27\x71\x1d\x4e\x9b\x55\x52\x37\xbc\xbc\x32\x46\x8c\xc4\x87\x85\xb4\x39\x28\xe4\xba\xe8\x9d\xef\x71\x06\x47\xe8\x37\xe2\x9d\xad\xee\xe7\x13\xb7\x9a\x66\x52\xce\xa4\x40\xaf\x5a\x67\x65\x7e\x2b\xb6\xba\x4b\xe0\x40\xef\x28\x4b\x4c\x1e\x22\xc8\xa1\x7d\x45\xb5\x67\xb8\x56\xaa\xbb\x5f\x08\xa8\xd3\xbe\x31\xf5\x12\xa1\xda\xbb\x76\x1f\x84\xf8\xaf\x0b\xef\x57\xda\x2c\x82\xde\x7f\xd2\xcf\x5f\xd8\x27\x0c\xf0\x4a\x3c\xb7\x1d\x9d\xfa\xfe\x53\xae\x1d\xcd\x28\xaf\xfa\xf2\xdc\x1a\x23\xd7\xec\xb7\x3e\xfd\xec\x1b\xf2\x23\x00\x00\xff\xff\xa4\x9b\xbc\x26\xb6\x06\x00\x00"),
},
"/build/prometheus/node-exporter": &vfsgen۰DirInfo{
name: "node-exporter",
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643873836, time.UTC),
modTime: time.Date(2025, 12, 3, 13, 36, 55, 20776255, time.UTC),
},
"/build/prometheus/node-exporter/node-exporter-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
name: "node-exporter-clusterRole.yaml",
@@ -370,10 +370,10 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/node-exporter/node-exporter-daemonset.yaml": &vfsgen۰CompressedFileInfo{
name: "node-exporter-daemonset.yaml",
modTime: time.Date(2025, 9, 30, 6, 56, 49, 644065414, time.UTC),
uncompressedSize: 3536,
modTime: time.Date(2025, 12, 3, 13, 36, 55, 20659302, time.UTC),
uncompressedSize: 3587,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x56\x51\x6f\xdb\x38\x12\x7e\xcf\xaf\x20\x8a\x3e\x24\xb8\x93\x6c\xa7\x4d\xaf\x15\x90\x87\x5c\xe2\x6b\x02\xb4\x39\xa3\xce\xee\x3e\x2c\xb6\x06\x4d\x8d\x6c\x22\x14\x87\xe5\x0c\x5d\x1b\xd0\x8f\x5f\xd0\x8a\x1d\x49\x76\xdc\x18\xd8\x97\x58\x19\x0e\x3f\x72\x66\xbe\xf9\x86\xd2\xe9\xdf\xc1\x93\x46\x9b\x09\xe9\x1c\xf5\x16\x83\x93\x47\x6d\xf3\x4c\xdc\x48\x28\xd1\x8e\x81\x4f\x4a\x60\x99\x4b\x96\xd9\x89\x10\x46\x4e\xc1\x50\xfc\x12\xd1\x3f\x7d\x0c\x53\xf0\x16\x18\x28\xd5\xd8\x53\x58\x3a\xb4\x60\x39\x13\xb0\x74\xe8\x19\xfc\x0b\x9e\x56\x96\x90\x09\x8b\x39\x24\xbf\xf0\x74\xd2\x73\x82\x45\x26\xa2\x39\x71\x1e\x4b\xe0\x39\x04\x7a\xc1\x7d\xb1\x89\x66\x90\xbe\x4b\x07\x27\x42\xec\x3f\x29\x5a\xc9\x49\x05\x35\x2e\xb9\x39\x78\x48\x4a\xb4\x9a\xd1\x6b\x3b\x4b\x68\x45\x0c\xe5\x09\x39\x50\x31\x5a\x02\x03\x8a\xd1\xd7\x91\x97\x92\xd5\xfc\x4b\x23\x15\xc7\x24\xe3\x98\x74\x1c\x99\x10\x86\xd2\x19\xc9\xf0\x74\xcb\x46\xdd\xd6\x50\xd6\x22\x4b\xd6\x68\xb7\xb7\x16\x6b\x10\xc5\xa6\x73\x46\x0e\x85\x0c\x86\x13\x85\x96\xa5\xb6\xe0\xf7\x5f\xce\xb4\x52\x70\x5c\x12\x8e\x4b\xc3\xd1\xcc\x78\x25\x3b\x84\xd8\x54\x78\xbd\xa1\x28\xb4\xd5\xbc\x7a\x8e\x28\xde\xe5\x6a\xc7\x2a\x84\x87\x1f\x41\x7b\xc8\x6f\x42\x64\xcb\x58\xcd\x21\x0f\x46\xdb\xd9\xdd\xcc\xe2\xd6\x3c\x5c\x82\x0a\x31\xdf\xcd\x9d\x35\xe6\xf8\x89\x4f\x0f\xe0\x4b\x6a\x2f\x27\x35\xbd\x86\x4b\xe7\x81\xa8\x5d\xad\x8d\xc7\x23\xac\x9e\xf2\xe4\xd1\x40\x27\x48\xc8\x67\xd0\xd9\x21\x04\x3a\xf0\x32\x12\x58\xdc\x20\xd0\x3d\xf2\x70\xa9\x89\x9f\xdc\xb6\x65\xde\x1e\x95\x08\xe9\x67\x8d\x83\x13\x91\x24\x3f\x61\x9a\x1a\x4d\x0c\x36\x91\x79\x1e\x2f\x77\x39\x38\xff\x4f\xda\x4f\xfb\xe9\x20\xfb\x34\xe8\xf7\x5b\xde\x4e\xf2\x3c\xa5\x15\x15\x74\xd9\x9b\x23\x71\x8f\x56\xb4\xeb\xe0\x11\x79\xeb\x11\xff\x69\xb9\x58\x4c\x14\x9a\x3a\x53\xe9\x4f\x5d\xe8\xd6\xea\xf3\x92\x72\x21\xd5\xb6\xc0\x17\x96\x2d\xf0\x4f\xf4\x8f\x13\x8f\x81\xe1\x65\x9f\x1c\x16\xe9\x53\x60\xc9\x01\xb4\x42\x1b\xa8\xd5\x21\xd5\x75\xb5\x93\x12\x83\xe5\xc4\xa1\xb6\x4c\x97\xdf\x7b\xa7\x39\x2c\x2a\xe7\x51\x55\xb4\xa2\x6a\x21\x7d\xcf\xe8\x69\x2f\x47\xf5\x08\xbe\x97\xfe\xeb\xec\xf4\x6d\xd5\x3b\x7b\x3d\x7a\x41\x09\xaf\x1c\xd0\xe5\xf7\x53\x19\x18\x0b\xaa\xa6\xda\x16\x25\x4f\x4a\x4d\xaa\x52\x33\x8f\xc1\x55\x0a\x6d\xa1\x67\x05\x55\x39\x4c\x43\xfd\xbb\x70\xbc\xfe\xe1\xd2\x15\x54\x15\x81\x62\xa3\x57\xf3\x30\x03\x36\xd3\x82\xaa\xf2\x47\x80\x00\x15\x2e\xc0\x1b\xb9\xaa\x2f\x1c\xff\x14\x54\x39\x62\xf4\x50\x79\xa7\x26\x4e\x3b\x28\xa8\x22\x50\xc1\x6b\x5e\xc5\xcf\x58\xd4\x8a\xbd\x54\x50\xd0\xd9\xdb\x6d\x20\xba\x94\x33\xc8\xc4\x14\x94\x91\xd3\x5e\xab\x8b\xb3\x48\x92\xf7\xcf\x5d\x75\xa0\xcf\x37\x07\x5d\xa3\x65\x58\x72\x93\xfb\xce\xeb\x85\x36\x30\x83\x3c\x13\xec\x43\x93\xe4\x3e\xd8\x2b\xfa\x8d\xa2\x50\x3d\xd3\xd0\x03\x61\xf0\x0a\x5a\x0d\x64\x74\xa9\xb9\xd3\x52\xca\x85\x4c\xbc\x19\xbc\x69\x19\x4b\x28\xd1\xaf\x32\x71\xd1\xef\x7f\xd5\x9d\xd6\x07\xda\x0b\x31\xe8\x9f\x97\x7b\x31\x06\x1f\x9b\x18\x0b\x34\xa1\x84\xaf\x91\x34\xad\x16\x5b\xd3\x68\x24\x79\x9e\x89\xba\x21\x62\x35\x1a\x78\x75\xda\x3a\x46\x0f\x32\xff\xbf\x35\xab\x4e\x4a\xf6\xa0\x35\x1b\x70\x03\xd6\xb6\xbd\x1e\xab\xd5\xaa\xe2\x69\xdd\xa3\x93\xb3\xf5\x80\xc9\xc4\x2d\x12\x3f\xe0\xf5\x46\x58\x76\x0e\xee\x00\xec\x3d\x79\x9f\x08\x19\x9c\x31\x12\xe7\xe0\x7d\xcb\xbe\xa6\x0d\x24\x1d\x7d\xfa\xf3\xed\xe9\xdd\xe8\xec\xaf\x5d\x75\x62\x43\x89\xd2\x71\xea\x27\x14\x34\x03\x5d\x3e\x7c\x19\x4f\x86\xd7\x37\xb7\xc3\xc9\xb7\xf1\xd5\xe4\x8f\xbb\x87\xdb\xc9\xd5\x70\x3c\x19\x9c\x7f\x9c\x7c\xbe\xfe\x3a\x19\xdf\x5e\x9d\x5f\x7c\xf8\xf7\xb3\xd7\xf0\xfa\xe6\x17\x7e\x3b\x38\xd7\xff\xbd\x7e\x15\xce\x5e\xbf\x03\x68\xad\xc8\x82\x23\xf6\x20\xcb\xcb\x39\xb3\xcb\x7a\xbd\xb6\x44\xf7\xb6\xbe\x60\x17\xcd\xcc\xd6\x55\xb9\x1b\x35\x6a\xb2\x90\x26\xc0\xff\x3c\x96\x6d\xa6\x17\x1a\x4c\xfe\x0d\x8a\xee\x54\x5a\xdb\x6b\x8e\x10\x4b\x0e\x94\x3a\xcc\x1b\x80\x6d\x79\x58\xcf\x6c\x3f\x95\x2a\x0e\xee\xe5\x2a\xeb\xa7\x83\x4f\x69\xbf\xed\x3c\x0a\xc6\x8c\xd0\x68\xb5\xca\xc4\x5d\x71\x8f\x3c\xf2\x40\x60\xb9\xa3\x23\x1d\xa8\xed\x6a\x14\x96\x16\x7b\xb6\x63\x6e\x84\x9e\x33\xd1\x22\x85\x10\x91\xd7\x7b\x17\xea\x53\x62\x3a\xe9\x1f\x96\x96\xc1\x31\xd2\xb2\x5f\x59\xce\x9b\x08\x07\xb4\x73\x2d\x90\x9f\xe3\xa8\xc8\xc4\x87\x8b\x8b\x77\xe7\xdd\xb5\x7b\xb4\xdf\x10\xf9\x90\xb2\x36\xf7\xc5\x6c\xdd\xd7\x73\xb5\xb5\x65\x9d\xc5\xbb\x9b\x96\xad\xf9\xe0\x69\xbf\x3c\x9f\x1f\x2d\x48\x99\x30\xda\x86\xe5\xc9\x2f\x42\xe9\xdc\xe7\xfd\xd6\xdf\x2f\xb4\x82\x2b\xa5\xa2\x14\xdd\xbf\x38\x60\x18\x4d\x7c\x08\x35\x9f\x55\x49\xe3\x71\xb4\x7e\x15\x6d\xca\x5c\xeb\x74\xc3\x6f\x1d\x5c\x64\x78\x73\x28\xd5\xaa\xd8\x52\xe5\x1d\xa1\x3e\xb8\xb5\xa9\xc1\x5d\x55\x3e\xb8\xf1\x64\x57\x52\xff\x0e\x00\x00\xff\xff\x5d\xda\x6d\x1d\xd0\x0d\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x56\x51\x6f\xdb\x36\x10\x7e\xcf\xaf\x20\x82\x3e\x24\xd8\x24\xdb\xe9\xd2\xb5\x02\xf2\x90\x39\x59\x13\xa0\xcd\x82\x26\xdb\x1e\x86\xd5\xa0\xa9\x93\x4d\x84\xe2\xb1\xbc\xa3\x1b\x03\xfa\xf1\x03\xa5\xd8\x91\x64\xc7\x8d\x81\xbd\xc4\xca\xf1\xe3\x47\xde\xf1\xbb\x8f\x94\x4e\xff\x05\x9e\x34\xda\x4c\x48\xe7\x68\xb0\x18\x1d\x3c\x68\x9b\x67\xe2\x42\x42\x89\xf6\x0e\xf8\xa0\x04\x96\xb9\x64\x99\x1d\x08\x61\xe4\x14\x0c\xc5\x2f\x11\xf1\xe9\x43\x98\x82\xb7\xc0\x40\xa9\xc6\x81\xc2\xd2\xa1\x05\xcb\x99\x80\x47\x87\x9e\xc1\xbf\x80\xb4\xb2\x84\x4c\x58\xcc\x21\xf9\x01\xd2\x49\xcf\x09\x16\x99\x88\xe1\xc4\x79\x2c\x81\xe7\x10\xe8\x05\xf8\x62\x95\xcd\x28\x7d\x9b\x8e\x0e\x84\xd8\xbe\x52\x8c\x92\x93\x0a\x1a\x5e\x72\x73\xf0\x90\x94\x68\x35\xa3\xd7\x76\x96\xd0\x92\x18\xca\x03\x72\xa0\x62\xb6\x04\x06\x14\xa3\x6f\x32\x2f\x25\xab\xf9\xa7\x56\x29\xf6\x29\xc6\x3e\xe5\xd8\xb3\x20\x0c\xa5\x33\x92\xe1\x69\x97\xad\x73\xab\xa9\xac\x45\x96\xac\xd1\xae\x77\x2d\x6a\x12\xc5\xa6\xb7\x46\x0e\x85\x0c\x86\x13\x85\x96\xa5\xb6\xe0\xb7\x6f\xce\x74\x4a\xb0\x5f\x11\xf6\x2b\xc3\xde\xca\x78\xa5\x3a\x84\x58\x9d\x70\x3d\xa1\x28\xb4\xd5\xbc\x7c\xce\x28\xee\xe5\x7c\x23\x2a\x84\x87\x6f\x41\x7b\xc8\x2f\x42\x54\xcb\x9d\x9a\x43\x1e\x8c\xb6\xb3\xeb\x99\xc5\x75\xf8\xf2\x11\x54\x88\xf5\x6e\xcf\x6c\x38\xef\x9e\xf4\x74\x0f\xbe\xa4\xee\x70\xd2\xc8\xeb\xf2\xd1\x79\x20\xea\x9e\xd6\x0a\xf1\x00\xcb\xa7\x3a\x79\x34\xd0\x4b\x12\xf2\x19\xf4\x66\x08\x81\x0e\xbc\x8c\x02\x16\x17\x08\x74\x83\x7c\xf9\xa8\x89\x9f\x60\xce\x6b\xf4\x9a\x97\x63\x23\x89\x6e\xea\x43\x38\x6c\x1a\x20\x51\x26\x10\x83\x4f\x94\xd7\xac\x95\x34\x87\x4f\x53\xd6\xca\x58\xef\x2e\x11\xd2\xcf\x5a\x7b\x4d\x44\x92\x7c\x87\x69\x6a\x34\x31\xd8\x44\xe6\x79\xcc\xe7\x6c\x74\xf2\x6b\x3a\x4c\x87\xe9\x28\xfb\x30\x1a\x0e\x3b\x68\x27\x79\x9e\xd2\x92\x0a\x3a\x1b\xcc\x91\x78\x40\x4b\xda\x04\x78\x44\x5e\x23\xe2\x3f\x1d\x88\xc5\x44\xa1\x69\x8a\x9b\x7e\xd7\x85\xee\x8c\x3e\x0f\x29\x17\x52\x6d\x0b\x7c\x61\xd8\x02\x7f\x47\xff\x30\xf1\x18\x18\x5e\xc6\xe4\xb0\x48\x9f\x12\x4b\x76\xb0\x15\xda\x40\x53\xcf\x54\x37\x02\x49\x4a\x0c\x96\x13\x87\xda\x32\x9d\x7d\x1d\x1c\xe5\xb0\xa8\x9c\x47\x55\xd1\x92\xaa\x85\xf4\x03\xa3\xa7\x83\x1c\xd5\x03\xf8\x41\xfa\xd3\xf1\xd1\x9b\x6a\x70\xfc\x7a\xf6\x82\x12\x5e\x3a\xa0\xb3\xaf\x47\x32\x30\x16\x54\x4d\xb5\x2d\x4a\x9e\x94\x9a\x54\xa5\x66\x1e\x83\xab\x14\xda\x42\xcf\x0a\xaa\x72\x98\x86\xe6\x77\xe1\xb8\xfe\xe1\xd2\x15\x54\x15\x81\xa2\x37\x54\xf3\x30\x03\x36\xd3\x82\xaa\xf2\x5b\x80\x00\x15\x2e\xc0\x1b\xb9\x6c\x36\x1c\xff\x14\x54\x39\x62\xf4\x50\x79\xa7\x26\x4e\x3b\x28\xa8\x22\x50\x21\xaa\x2a\x7e\xc6\x43\xad\xd8\x4b\x05\x05\x1d\xbf\x59\x27\xa2\x4b\x39\x83\x4c\x4c\x41\x19\x39\x1d\x74\x1a\x3f\x8b\x22\x39\x7d\x6e\xc4\x1d\xd6\xb0\x5a\x68\x8c\x96\xe1\x91\xdb\xed\xe2\xbc\x5e\x68\x03\x33\xc8\x33\xc1\x3e\xb4\xfb\xc2\x07\x7b\x4e\x7f\x52\xf4\xb6\x67\x19\x7a\x20\x0c\x5e\x41\xa7\xe7\x8c\x2e\x35\xf7\xba\x50\xb9\x90\x89\xc3\xd1\x61\x27\x58\x42\x89\x7e\x99\x89\xd3\xe1\xf0\xb3\xee\xb9\x05\xd0\x56\x8a\xd1\xf0\xa4\xdc\xca\x31\x7a\xdf\xe6\x58\xa0\x09\x25\x7c\x8e\xa2\xe9\xb4\x58\x2d\xa3\x5b\xc9\xf3\x4c\x34\x0d\x11\x4f\xa3\xc5\xd7\x94\xad\x17\xf4\x20\xf3\x3f\xac\x59\xf6\x4a\xb2\x85\xad\xdd\x80\x2b\xb2\x6e\xec\xf5\x5c\x9d\x56\x15\x4f\xe3\x1e\x9d\x9c\xd5\x77\x52\x26\xae\x90\xf8\x1e\xc7\x2b\x63\xd9\x58\xb8\x47\xb0\x75\xe5\x6d\x26\x64\x70\xc6\x48\x9c\x83\xf7\x9d\x78\x2d\x1b\x48\x7a\xfe\xf4\xcf\x9b\xa3\xeb\xdb\xe3\x7f\x37\xdd\x89\x0d\x25\x4a\xc7\x87\x42\x42\x41\x33\xd0\xd9\xfd\xa7\xbb\xc9\xe5\xf8\xe2\xea\x72\xf2\xe5\xee\x7c\xf2\xf7\xf5\xfd\xd5\xe4\xfc\xf2\x6e\x32\x3a\x79\x3f\xf9\x38\xfe\x3c\xb9\xbb\x3a\x3f\x39\x7d\xf7\xf3\x33\xea\x72\x7c\xf1\x03\xdc\x06\xcf\xf8\xb7\xf1\xab\x78\xb6\xe2\x76\xb0\x75\x32\x0b\x8e\xd8\x83\x2c\xcf\xe6\xcc\x2e\x1b\x0c\xba\x16\x3d\x58\x63\xc1\x2e\xda\x95\x6d\x4e\xe5\xfa\xb6\x75\x26\x0b\x69\x02\xfc\xee\xb1\xec\x2a\xbd\xd0\x60\xf2\x2f\x50\xf4\x2f\xb2\x3a\xde\x68\x84\x58\x72\xa0\xd4\x61\xde\x22\xec\xda\x43\x7d\xcd\xfb\xa9\x54\xf1\xae\x7f\x5c\x66\xc3\x74\xf4\x21\x1d\x76\xc1\xb7\xc1\x98\x5b\x34\x5a\x2d\x33\x71\x5d\xdc\x20\xdf\x7a\x20\xb0\xdc\xf3\x91\x1e\xd5\x7a\x34\x1a\x4b\x47\x3d\xeb\x6b\xee\x16\x3d\x67\xa2\x23\x0a\x21\xa2\xae\xb7\x0e\x34\xab\xc4\x72\xd2\xff\x6c\x2d\xa3\x7d\xac\x65\xbb\xb3\x9c\xb4\x19\x76\x78\x67\x6d\x90\x1f\xe3\x55\x91\x89\x77\xa7\xa7\x6f\x4f\xfa\x63\x37\x68\xbf\x20\xf2\x2e\x67\x6d\xcf\x8b\xd5\xba\x69\xee\xd5\xce\x94\xba\x8a\xd7\x17\x9d\x58\xfb\x8d\xd4\x7d\xac\x3e\xbf\x73\x90\x32\x61\xb4\x0d\x8f\x07\x3f\x48\xa5\xb7\x9f\x5f\xd6\x78\xbf\xd0\x0a\xce\x95\x8a\x56\x74\xf3\xe2\x05\xc3\x68\xe2\xdb\xa9\xfd\x12\x4b\x5a\xef\xa9\xfa\x21\xb5\x3a\xe6\xc6\xa7\x5b\xb8\x3a\xb9\xa8\xf0\xf6\xa5\xd4\xb8\x62\xc7\x95\x37\x8c\x7a\xe7\xd4\xb6\x07\xf7\x5d\x79\xe7\xc4\x83\x4d\x4b\xfd\x2f\x00\x00\xff\xff\x16\xe8\xae\x25\x03\x0e\x00\x00"),
},
"/build/prometheus/node-exporter/node-exporter-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
name: "node-exporter-prometheusRule.yaml",
@@ -405,7 +405,7 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/prometheus": &vfsgen۰DirInfo{
name: "prometheus",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 956818053, time.UTC),
modTime: time.Date(2025, 11, 20, 6, 44, 5, 168147675, time.UTC),
},
"/build/prometheus/prometheus/prometheus-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
name: "prometheus-clusterRole.yaml",
@@ -430,10 +430,10 @@ var assets = func() http.FileSystem {
},
"/build/prometheus/prometheus/prometheus-prometheus.yaml": &vfsgen۰CompressedFileInfo{
name: "prometheus-prometheus.yaml",
modTime: time.Date(2025, 7, 31, 8, 50, 53, 956443425, time.UTC),
uncompressedSize: 2390,
modTime: time.Date(2025, 11, 20, 6, 44, 5, 168271717, time.UTC),
uncompressedSize: 2437,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x95\x5f\x6f\xe3\x36\x0c\xc0\xdf\xfd\x29\x88\xdb\xb3\xd3\xa4\x3b\x60\x83\xde\x8a\xae\x38\x1c\xb6\x2b\x86\xdd\x6d\xef\x8a\xcc\x24\x42\x25\x51\xa5\xa8\x5c\x82\x61\xdf\x7d\x90\xed\xc4\x7f\xd6\xa6\x29\x30\x3f\xd9\x14\x49\xf3\xcf\x8f\x94\x8e\xf6\x2f\xe4\x64\x29\x28\xf0\x14\xac\x10\xdb\xb0\x5d\x18\x62\xa4\xb4\x30\xe4\x6f\xf6\xab\xea\xc9\x86\x46\xc1\xef\x4c\x1e\x65\x87\x39\x55\x1e\x45\x37\x5a\xb4\xaa\x00\x9c\x5e\xa3\x4b\xe5\x0d\x40\xc7\xb8\x78\xca\x6b\xe4\x80\x82\x69\x61\xe9\xc6\x90\x8f\x14\x30\x88\x82\x38\xd8\xbf\xac\x6b\x43\x12\x1d\x0c\x2a\x78\xfa\xf9\x35\x9d\xa0\x3d\x5e\xe1\x2a\x6a\x96\x9a\x36\x0a\x8a\xb8\x7e\x53\x7d\x7f\x2a\xc1\xed\xe2\xc7\x8f\x8b\x65\x05\xd0\xfd\xa7\x8b\xa3\xbc\xa7\xa8\xdb\xc0\xf2\x1a\x53\xdc\x21\x63\x3d\x54\xab\x4e\xc7\x24\xe8\xab\x14\xd1\x94\x3a\xe8\xcd\xc6\x06\x2b\xc7\xae\x26\x81\x1a\xbc\x9b\x48\x00\x22\xe3\x06\x99\xb1\xf9\x25\x17\x07\x5f\xcd\x0e\x9b\xec\x6c\xd8\x7e\xde\x06\x3a\x8b\x1f\x0e\x68\xb2\x94\xb8\x7a\xb3\xba\x37\xc4\x52\xa4\x5e\x56\x1e\xaf\xc5\xec\x1e\x0e\x91\x31\x95\x34\xd2\xf8\xac\x86\x27\x3c\xaa\x36\x8a\x9a\xc9\xe1\x2c\xf3\x21\x8b\x91\x0d\x00\x45\x64\x2d\xc4\x0a\x1e\x0e\x36\x49\x3a\x1f\x7e\x47\xbb\xdd\x89\x82\xd5\x72\xd9\xca\x22\x35\x77\x41\xec\xff\x97\x20\x35\x27\x5f\xdf\x90\xfd\x38\x93\x16\xb4\xaf\xe8\xd0\x94\xc0\x26\xe1\xb6\x05\xf8\x6d\x04\xe2\xf0\xbc\x07\xc9\x4b\x56\xff\x85\xf3\x92\xf6\x8b\x98\x5e\x32\xb8\x08\x6c\xf7\x9c\x31\x9c\xf7\xf7\x12\x93\x83\xa2\x50\x24\x47\xdb\xe3\xaf\x05\x87\xe9\xcf\x77\x94\xa4\x78\x7f\xa5\xcb\xda\x21\x8b\x0d\xdb\x7e\xc6\xcb\x97\xd7\x41\x6f\x91\xfb\x48\x6a\x18\xaf\x91\xfd\x6d\x35\x04\xac\x26\xfa\xb5\xd7\x36\x54\xb3\x74\xd4\x35\x19\x44\x62\x51\xf0\x1d\xd7\x15\x00\xee\xb5\xcb\xba\x70\xf3\x39\x08\xf2\x5e\x3b\x05\xab\xa2\x88\x07\x41\x0e\xda\xf5\x24\xc0\xdf\xff\x54\x00\xd6\xeb\x6d\xdf\x8b\x9b\xa1\xae\x6a\x3f\x8c\x3a\x35\x38\xc5\x6a\x5a\x1d\x4a\x0a\x9c\x0d\xf9\x50\xb5\xb8\x7f\x19\xed\xbe\xe9\xf6\x7b\x2f\x6c\xd7\x41\x76\x35\x5c\xef\x86\xea\x9a\x3d\x28\xe4\xca\x26\x38\x6d\x95\x1a\x70\xb3\x41\x23\x0a\x1e\xa9\x1f\xea\x8e\x9a\x76\xc9\x34\xd8\x58\xa3\x05\x9b\x6a\xb6\x43\x9e\xb3\x76\xad\xac\x74\x0e\xc7\x37\x4d\x5f\xd3\xee\xfb\xf1\x84\xc4\xb9\x1d\x5d\x0b\x07\x8d\xf9\x01\xd3\x1a\x5f\xb5\x2a\x87\x33\xd9\x73\x46\xee\x97\x94\xd7\x87\x7b\x0a\x26\x73\x59\xa7\xc7\x96\xf5\x92\x30\x63\x74\xd6\xe8\xa4\x60\x05\x3f\x80\x50\x43\xad\x30\x51\xe6\xf3\xe4\x39\xeb\xad\x9c\x9b\x6e\x62\x56\xf0\x61\xf5\xa1\xff\xf4\xe8\x89\x8f\x0a\x3e\x7e\xb2\xad\x84\xf1\x39\x63\x9a\xa9\xdf\x2e\x97\x7e\xae\xbf\x5c\x7e\x29\x16\x9c\xdd\xab\x29\x95\xb3\x29\xaa\x2f\x6c\xbe\x11\xe4\x23\x8c\xca\xee\xef\x87\xb1\x2e\x6e\x8a\x3c\x19\xd6\x11\x67\x43\x94\xd0\x64\xb6\x72\xbc\xa7\x20\x78\x90\xce\xeb\x26\x7d\x62\xca\x51\x41\xb7\xf5\x39\x87\xbb\xf4\x48\xe1\x0f\x22\x51\xb0\xd1\x2e\xe1\x20\xff\x33\x21\x77\x8a\x09\x79\x6f\x0d\xde\x19\x43\x39\xc8\xe3\x0c\xdb\xba\x0b\xae\x57\x7a\x03\x81\xa9\xd6\xbc\x2c\x28\x18\xda\x7b\x04\x7e\x2a\xec\x25\x21\x2e\x43\xdf\x21\x47\x2e\x7b\xbc\x77\xda\xfa\x6f\xe8\xa3\xd3\x72\xbe\x3b\x4f\x17\x76\x5f\xa1\x69\x93\x4f\xc2\x69\xf7\x7a\xc3\xfe\x07\x70\xbb\x6c\xbb\x3c\x9f\x9b\x7f\x03\x00\x00\xff\xff\xf1\x72\x17\xdb\x56\x09\x00\x00"),
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x55\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x0c\xd2\xb3\x1c\x27\x5d\xa0\x05\x6f\x41\x1a\x2c\x16\xed\x1a\x45\x77\xdb\x3b\x4d\x8d\x6d\x22\x24\x87\x19\x0e\xbd\x36\x8a\xfe\xf7\x82\x92\x6c\x7d\x34\xf1\x3a\x40\x75\x12\x87\x6f\x86\xf3\xf1\xf8\xa8\xa3\xfd\x0b\x39\x59\x0a\x0a\x3c\x05\x2b\xc4\x36\x6c\x17\x86\x18\x29\x2d\x0c\xf9\xdb\xfd\x5d\xf5\x6c\x43\xa3\xe0\x77\x26\x8f\xb2\xc3\x9c\x2a\x8f\xa2\x1b\x2d\x5a\x55\x00\x4e\xaf\xd1\xa5\xf2\x07\xa0\x63\x5c\x3c\xe7\x35\x72\x40\xc1\xb4\xb0\x74\x6b\xc8\x47\x0a\x18\x44\x41\x1c\xfc\x5f\xc7\xda\x90\x44\x07\x83\x0a\x9e\x7f\x7e\x0b\x13\xb4\xc7\x2b\x42\x45\xcd\x52\xd3\x46\x41\x31\xd7\xdf\x85\xef\x4f\x2d\xb8\x5f\xfc\xf8\x61\xb1\xac\x00\xba\x73\xba\x3c\xca\x7f\x8a\xba\x4d\x2c\xaf\x31\xc5\x1d\x32\xd6\x43\xb7\xea\x74\x4c\x82\xbe\x4a\x11\x4d\xe9\x43\x64\x4b\x6c\xe5\xf8\xe8\x74\x4a\xab\x36\xd0\x4d\x07\xa9\x8d\xcb\x49\x90\x6b\xc3\x56\xac\xd1\xee\xa6\x02\xd0\x9b\x8d\x0d\x56\x8e\x5d\x0b\x03\x35\xf8\x30\xb1\x94\x80\xb8\x41\x66\x6c\x7e\xc9\xe5\xbc\x2f\x66\x87\x4d\x76\x36\x6c\x3f\x6d\x03\x9d\xcd\x4f\x07\x34\x59\x4a\x19\xbd\x5b\xdd\x3b\x62\xe9\x69\x6f\x2b\x9f\xd7\x62\x76\x4f\x87\xc8\x98\x4a\xd5\x69\xbc\x57\xc3\x33\x1e\x55\x9b\x45\xcd\xe4\x70\xd6\xa8\xa1\xe8\x91\x0f\x00\x45\x64\x2d\xc4\x0a\x9e\x0e\x36\x49\x3a\x6f\x7e\x43\xbb\xdd\x89\x82\xbb\xe5\xb2\xb5\x45\x6a\x1e\x82\xd8\xff\xaf\x40\x6a\x4e\xb1\xbe\x22\xfb\x71\x25\x2d\x2f\xbf\xa0\x43\x53\x12\x9b\xa4\xdb\x36\xe0\xb7\x11\x6f\x87\xef\x3d\x0c\xbe\xe4\xf5\x5f\x2e\x5f\x42\xbf\xca\xea\x4b\x0e\x17\xf9\xdd\x7d\x67\xd6\xce\xe7\x7b\x89\xc2\x03\x50\x28\x92\xa3\xed\xf1\xd7\x42\x87\xe9\xe1\x3b\x4a\x52\xa2\xbf\x31\x65\xed\x90\xc5\x86\x6d\x2f\x09\x65\xe5\x75\xd0\x5b\xe4\x3e\x93\x1a\xc6\xaa\xb3\xbf\xaf\x86\x84\xd5\x04\x5f\x7b\x6d\x43\x35\x2b\x47\x5d\x53\x41\x24\x16\x05\xdf\x70\x5d\x01\xe0\x5e\xbb\xac\x0b\x6f\x3e\x05\x41\xde\x6b\xa7\xe0\xae\x00\xf1\x20\xc8\x41\xbb\x9e\x09\xf0\xf7\x3f\x15\x80\xf5\x7a\xdb\xcf\xe2\x76\xe8\xab\xda\x0f\xca\x40\x0d\x4e\x69\x35\xed\x0e\x25\x05\xce\x86\x7c\xa8\x5a\xba\x7f\x1e\x49\xe5\x54\x2c\xdf\x4b\xb6\xeb\x48\x76\x35\xb9\xde\x4d\xaa\x6b\x64\x53\xc8\x15\x25\x38\xa9\x4a\x0d\xb8\xd9\xa0\x11\x05\x2b\xea\x2f\x75\xc7\x9a\x56\x64\x1a\x6c\xac\xd1\x82\x4d\x35\xd3\x90\x97\xac\x5d\x6b\x2b\x93\xc3\xf1\xc3\xd4\xf7\xb4\x5b\xaf\x4e\x94\x38\x8f\xa3\x1b\xe1\x80\x98\x6f\x30\xad\xf1\x4d\xaf\xb2\x39\xb3\xbd\x64\xe4\x5e\xa4\xbc\x3e\x3c\x52\x30\x99\x8b\x9c\x1e\x5b\xae\x97\x82\x19\xa3\xb3\x46\x27\x05\x77\xf0\x03\x08\x35\xd4\x1a\x13\x65\x3e\xdf\x3c\x67\xbd\x95\xf3\xd0\x4d\xcc\x0a\x6e\xee\x6e\xfa\xa5\x47\x4f\x7c\x54\xf0\xe1\xa3\x6d\x2d\x8c\x2f\x19\xd3\x0c\x7e\xbf\x5c\xfa\x39\x7e\xb9\xfc\x5c\x3c\x38\xbb\x37\x4b\x2a\x7b\x53\xaa\xbe\xa2\x7c\x23\x92\x8f\x68\x54\xb4\xbf\xbf\x8c\x75\x09\x53\xec\xc9\xb0\x8e\x38\xbb\x44\x09\x4d\x6e\x1f\x3b\x0a\x82\x07\xe9\xa2\x6e\xd2\x47\xa6\x1c\x15\x74\xaa\xcf\x39\x3c\xa4\x15\x85\x3f\x88\x44\xc1\x46\xbb\x84\x83\xfd\xcf\x84\xdc\x01\x13\xf2\xde\x1a\x7c\x30\x86\x72\x90\xd5\x8c\xb6\x75\x97\x5c\x0f\xfa\x0e\x05\xa6\xa8\x79\x5b\x50\x30\xb4\xef\x08\xfc\x54\xb8\x97\x84\xb8\x5c\xfa\x8e\x72\xe4\xb2\xc7\x47\xa7\xad\xff\x8a\x3e\x3a\x2d\xe7\xb7\xf3\xf4\xbe\xf7\x1d\x9a\x0e\xf9\x64\x9c\x4e\xaf\x77\xec\x0f\x80\xfb\x65\x3b\xe5\xf9\xbd\xf9\x37\x00\x00\xff\xff\xf9\x77\x43\xb8\x85\x09\x00\x00"),
},
"/build/prometheus/prometheus/prometheus-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
name: "prometheus-prometheusRule.yaml",

View File

@@ -11,5 +11,5 @@ data:
notification:
endpoint: http://notification-manager-svc.kubesphere-monitoring-system.svc:19093
terminal:
image: alpine:3.14
timeout: 600
image: beclab/alpine:3.14
timeout: 7200

View File

@@ -28,6 +28,7 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
priorityClassName: "system-cluster-critical"
containers:
- command:
- ks-apiserver

View File

@@ -35,6 +35,7 @@ spec:
hostPath:
path: /etc/localtime
type: ""
priorityClassName: "system-cluster-critical"
containers:
- args:
- --host=127.0.0.1

View File

@@ -29,7 +29,7 @@ spec:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 1m
interval: 10s
metricRelabelings:
- action: keep
regex: container_cpu_usage_seconds_total|container_memory_usage_bytes|container_memory_cache|container_network_.+_bytes_total|container_memory_working_set_bytes|container_cpu_cfs_.*periods_total|container_processes.*|container_threads.*

View File

@@ -31,6 +31,7 @@ spec:
- matchExpressions:
- key: node-role.kubernetes.io/edge
operator: DoesNotExist
priorityClassName: "system-cluster-critical"
containers:
- args:
- --web.listen-address=127.0.0.1:9100
@@ -42,7 +43,7 @@ spec:
- --collector.netdev.address-info
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: beclab/node-exporter:0.0.4
image: beclab/node-exporter:0.0.5
name: node-exporter
securityContext:
privileged: true

View File

@@ -10,6 +10,7 @@ metadata:
name: k8s
namespace: kubesphere-monitoring-system
spec:
priorityClassName: "system-cluster-critical"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:

View File

@@ -0,0 +1,41 @@
package plugins
import (
"context"
"path"
"github.com/beclab/Olares/cli/pkg/common"
cc "github.com/beclab/Olares/cli/pkg/core/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/utils"
ctrl "sigs.k8s.io/controller-runtime"
)
type ApplyKsConfigManifests struct {
common.KubeAction
}
func (t *ApplyKsConfigManifests) Execute(runtime connector.Runtime) error {
config, err := ctrl.GetConfig()
if err != nil {
return err
}
var appKsConfigName = common.ChartNameKsConfig
var appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, appKsConfigName)
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceKubesphereSystem)
if err != nil {
return err
}
var values = make(map[string]interface{})
if err := utils.UpgradeCharts(context.Background(), actionConfig, settings, appKsConfigName,
appPath, "", common.NamespaceKubesphereSystem, values, false); err != nil {
logger.Errorf("failed to install %s chart: %v", appKsConfigName, err)
return err
}
return nil
}

View File

@@ -0,0 +1,30 @@
package pipelines
import (
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/module"
"github.com/beclab/Olares/cli/pkg/core/pipeline"
"github.com/beclab/Olares/cli/pkg/gpu"
)
func DisableNouveau() error {
arg := common.NewArgument()
arg.SetConsoleLog("gpudisable-nouveau.log", true)
runtime, err := common.NewKubeRuntime(common.AllInOne, *arg)
if err != nil {
return err
}
p := &pipeline.Pipeline{
Name: "DisableNouveau",
Modules: []module.Module{
&gpu.DisableNouveauModule{},
},
Runtime: runtime,
}
return p.Start()
}

View File

@@ -1,103 +0,0 @@
package pipelines
import (
"fmt"
"io"
"os/exec"
"path"
"strings"
"github.com/beclab/Olares/cli/cmd/ctl/options"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/module"
"github.com/beclab/Olares/cli/pkg/core/pipeline"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/pkg/manifest"
"github.com/beclab/Olares/cli/pkg/utils"
)
func UpgradeGpuDrivers(opt *options.InstallGpuOptions) error {
arg := common.NewArgument()
arg.SetOlaresVersion(opt.Version)
arg.SetCudaVersion(opt.Cuda)
arg.SetBaseDir(opt.BaseDir)
arg.SetConsoleLog("gpuupgrade.log", true)
runtime, err := common.NewKubeRuntime(common.AllInOne, *arg)
if err != nil {
return err
}
manifestFile := path.Join(runtime.GetInstallerDir(), "installation.manifest")
runtime.Arg.SetManifest(manifestFile)
manifestMap, err := manifest.ReadAll(runtime.Arg.Manifest)
if err != nil {
logger.Fatal(err)
}
p := &pipeline.Pipeline{
Name: "UpgradeGpuDrivers",
Modules: []module.Module{
&gpu.ExitIfNoDriverUpgradeNeededModule{},
&gpu.UninstallCudaModule{},
&gpu.InstallDriversModule{
ManifestModule: manifest.ManifestModule{
Manifest: manifestMap,
BaseDir: runtime.Arg.BaseDir,
},
FailOnNoInstallation: true,
SkipNVMLCheckAfterInstall: true,
},
&gpu.InstallContainerToolkitModule{
ManifestModule: manifest.ManifestModule{
Manifest: manifestMap,
BaseDir: runtime.Arg.BaseDir,
},
// when nvidia driver is just upgraded
// nvidia-smi will fail to execute
SkipCudaCheck: true,
},
&gpu.RestartContainerdModule{},
&gpu.NodeLabelingModule{},
},
Runtime: runtime,
}
if err := p.Start(); err != nil {
return err
}
fmt.Println()
fmt.Println("The GPU driver has been upgraded, for it to work properly, the machine needs to be rebooted.")
reader, err := utils.GetBufIOReaderOfTerminalInput()
if err != nil {
return nil
}
for {
fmt.Printf("Reboot now? [yes/no]: ")
input, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("failed to read user input for reboot confirmation: %v", err)
}
input = strings.ToLower(strings.TrimSpace(input))
if input == "" {
continue
}
if strings.HasPrefix("yes", input) {
output, err := exec.Command("reboot").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to reboot: %v", err)
}
fmt.Println(string(output))
return nil
} else if strings.HasPrefix("no", input) {
return nil
}
}
}

View File

@@ -675,7 +675,7 @@ func (m *ChangeIPModule) addKubernetesTasks() {
},
&task.LocalTask{
Name: "RegenerateK8sFilesWithKubeadm",
Action: new(RegenerateFilesForK8sIPChange),
Action: new(RegenerateFilesForK8s),
},
&task.LocalTask{
Name: "CopyNewKubeConfig",

View File

@@ -366,11 +366,11 @@ func (a *ApplySystemEnv) Execute(runtime connector.Runtime) error {
envItem.Default = procVal
}
err = apputils.CheckEnvValueByType(envItem.Value, envItem.Type)
err = envItem.ValidateValue(envItem.Value)
if err != nil {
return fmt.Errorf("invalid system env value: %s", envItem.Value)
}
err = apputils.CheckEnvValueByType(envItem.Default, envItem.Type)
err = envItem.ValidateValue(envItem.Default)
if err != nil {
return fmt.Errorf("invalid system env default value: %s", envItem.Value)
}

View File

@@ -447,11 +447,11 @@ func (a *PrepareFilesForK8sIPChange) Execute(runtime connector.Runtime) error {
})
}
type RegenerateFilesForK8sIPChange struct {
type RegenerateFilesForK8s struct {
common.KubeAction
}
func (a *RegenerateFilesForK8sIPChange) Execute(runtime connector.Runtime) error {
func (a *RegenerateFilesForK8s) Execute(runtime connector.Runtime) error {
initCmd := "/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --skip-phases=preflight,mark-control-plane,bootstrap-token,addon,show-join-command"
if _, err := runtime.GetRunner().SudoCmd(initCmd, false, false); err != nil {

110
cli/pkg/upgrade/1_12_2.go Normal file
View File

@@ -0,0 +1,110 @@
package upgrade
import (
"fmt"
"os/exec"
"strings"
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/version"
)
var version_1_12_2 = semver.MustParse("1.12.2")
type upgrader_1_12_2 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_2) Version() *semver.Version {
cliVersion, err := semver.NewVersion(version.VERSION)
// tolerate local dev version
if err != nil {
return version_1_12_2
}
if samePatchLevelVersion(version_1_12_2, cliVersion) && getReleaseLineOfVersion(cliVersion) == mainLine {
return cliVersion
}
return version_1_12_2
}
func (u upgrader_1_12_2) AddedBreakingChange() bool {
if u.Version().Equal(version_1_12_2) {
// if this version introduced breaking change
return true
}
return false
}
func nvidiactkNeedsMigration() (bool, error) {
_, err := exec.LookPath("nvidia-ctk")
if err != nil {
return false, nil
}
out, err := exec.Command("nvidia-ctk", "-v").Output()
if err != nil {
return false, err
}
lines := strings.Split(string(out), "\n")
var version *semver.Version
for _, line := range lines {
var versionStr string
if n, err := fmt.Sscanf(line, "NVIDIA Container Toolkit CLI version %s", &versionStr); n == 1 && err == nil {
versionStr = strings.TrimSpace(versionStr)
version, err = semver.NewVersion(versionStr)
if err != nil {
continue
}
break
}
}
if version == nil {
return false, fmt.Errorf("failed to parse nvidia-ctk version")
}
minVer := semver.MustParse("1.18.0")
if version.GreaterThanEqual(minVer) {
return true, nil
}
return false, nil
}
func (u upgrader_1_12_2) PrepareForUpgrade() []task.Interface {
var preTasks []task.Interface
needsMigration, err := nvidiactkNeedsMigration()
if err != nil || needsMigration {
preTasks = append(preTasks,
&task.LocalTask{
Name: "InstallNvidiaContainerToolkit",
Action: new(gpu.InstallNvidiaContainerToolkit),
Retry: 5,
Delay: 10 * time.Second,
},
&task.LocalTask{
Name: "ConfigureContainerdRuntime",
Action: new(gpu.ConfigureContainerdRuntime),
Retry: 5,
Delay: 10 * time.Second,
},
)
}
preTasks = append(preTasks, u.upgraderBase.PrepareForUpgrade()...)
return preTasks
}
func (u upgrader_1_12_2) UpgradeSystemComponents() []task.Interface {
var preTasks []task.Interface
preTasks = append(preTasks,
&task.LocalTask{
Name: "UpgradeL4",
Action: new(upgradeL4BFLProxy),
Retry: 5,
Delay: 10 * time.Second,
})
return append(preTasks, u.upgraderBase.UpgradeSystemComponents()...)
}
func init() {
registerMainUpgrader(upgrader_1_12_2{})
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apixclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -46,7 +45,7 @@ func (u upgrader_1_12_2_20251020) UpgradeSystemComponents() []task.Interface {
},
&task.LocalTask{
Name: "UpgradeL4BflProxy",
Action: new(upgradeL4),
Action: &upgradeL4BFLProxy{Tag: "v0.3.6"},
Retry: 3,
Delay: 5 * time.Second,
},
@@ -147,20 +146,6 @@ func (d *deleteUserEnvsIfExists) Execute(runtime connector.Runtime) error {
return nil
}
type upgradeL4 struct {
common.KubeAction
}
func (u *upgradeL4) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl set image deployment/l4-bfl-proxy proxy=beclab/l4-bfl-proxy:v0.3.6 -n os-network", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to upgrade L4 network proxy")
}
logger.Infof("L4 upgrade to version v0.3.5 completed successfully")
return nil
}
func init() {
registerDailyUpgrader(upgrader_1_12_2_20251020{})
}

View File

@@ -0,0 +1,22 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251112 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251112) Version() *semver.Version {
return semver.MustParse("1.12.3-20251112")
}
func (u upgrader_1_12_3_20251112) PrepareForUpgrade() []task.Interface {
return append(regenerateKubeFiles(), u.upgraderBase.PrepareForUpgrade()...)
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251112{})
}

View File

@@ -0,0 +1,32 @@
package upgrade
import (
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251114 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251114) Version() *semver.Version {
return semver.MustParse("1.12.3-20251114")
}
func (u upgrader_1_12_3_20251114) UpgradeSystemComponents() []task.Interface {
pre := []task.Interface{
&task.LocalTask{
Name: "UpgradeL4BFLProxy",
Action: &upgradeL4BFLProxy{Tag: "v0.3.8"},
Retry: 3,
Delay: 5 * time.Second,
},
}
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251114{})
}

View File

@@ -0,0 +1,32 @@
package upgrade
import (
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251118 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251118) Version() *semver.Version {
return semver.MustParse("1.12.3-20251118")
}
func (u upgrader_1_12_3_20251118) UpgradeSystemComponents() []task.Interface {
pre := []task.Interface{
&task.LocalTask{
Name: "UpgradeL4BFLProxy",
Action: &upgradeL4BFLProxy{Tag: "v0.3.9"},
Retry: 3,
Delay: 5 * time.Second,
},
}
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251118{})
}

View File

@@ -0,0 +1,29 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251126 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251126) Version() *semver.Version {
return semver.MustParse("1.12.3-20251126")
}
func (u upgrader_1_12_3_20251126) PrepareForUpgrade() []task.Interface {
tasks := make([]task.Interface, 0)
tasks = append(tasks, upgradeKsConfig()...)
tasks = append(tasks, upgradePrometheusServiceMonitorKubelet()...)
tasks = append(tasks, upgradeKSCore()...)
tasks = append(tasks, regenerateKubeFiles()...)
tasks = append(tasks, u.upgraderBase.PrepareForUpgrade()...)
return tasks
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251126{})
}

View File

@@ -0,0 +1,41 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251127 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251127) Version() *semver.Version {
return semver.MustParse("1.12.3-20251127")
}
func (u upgrader_1_12_3_20251127) NeedRestart() bool {
return true
}
// put GPU driver upgrade step at the very end right before updating the version
func (u upgrader_1_12_3_20251127) UpdateOlaresVersion() []task.Interface {
var tasks []task.Interface
tasks = append(tasks,
&task.LocalTask{
Name: "UpgradeGPUDriver",
Action: new(upgradeGPUDriverIfNeeded),
},
)
tasks = append(tasks, u.upgraderBase.UpdateOlaresVersion()...)
tasks = append(tasks,
&task.LocalTask{
Name: "RebootIfNeeded",
Action: new(rebootIfNeeded),
},
)
return tasks
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251127{})
}

View File

@@ -0,0 +1,25 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_3_20251203 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_3_20251203) Version() *semver.Version {
return semver.MustParse("1.12.3-20251203")
}
func (u upgrader_1_12_3_20251203) PrepareForUpgrade() []task.Interface {
tasks := make([]task.Interface, 0)
tasks = append(tasks, upgradeNodeExporter()...)
tasks = append(tasks, u.upgraderBase.PrepareForUpgrade()...)
return tasks
}
func init() {
registerDailyUpgrader(upgrader_1_12_3_20251203{})
}

View File

@@ -42,6 +42,10 @@ func (u upgraderBase) AddedBreakingChange() bool {
return false
}
func (u upgraderBase) NeedRestart() bool {
return false
}
func (u upgraderBase) PrepareForUpgrade() []task.Interface {
var tasks []task.Interface
tasks = append(tasks, upgradeKSCore()...)

View File

@@ -16,6 +16,7 @@ type upgrader interface {
UpdateOlaresVersion() []task.Interface
PostUpgrade() []task.Interface
AddedBreakingChange() bool
NeedRestart() bool
}
type breakingUpgrader interface {

View File

@@ -1,16 +1,38 @@
package upgrade
import (
"context"
"fmt"
"path"
"strings"
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
"github.com/beclab/Olares/cli/pkg/clientset"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/container"
cc "github.com/beclab/Olares/cli/pkg/core/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/core/util"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/pkg/k3s"
k3stemplates "github.com/beclab/Olares/cli/pkg/k3s/templates"
"github.com/beclab/Olares/cli/pkg/kubernetes"
"github.com/beclab/Olares/cli/pkg/kubesphere"
"github.com/beclab/Olares/cli/pkg/kubesphere/plugins"
"github.com/beclab/Olares/cli/pkg/manifest"
"time"
"github.com/beclab/Olares/cli/pkg/phase"
"github.com/beclab/Olares/cli/pkg/terminus"
"github.com/beclab/Olares/cli/pkg/utils"
"github.com/pkg/errors"
"k8s.io/utils/ptr"
)
const cacheRebootNeeded = "reboot.needed"
type upgradeContainerdAction struct {
common.KubeAction
}
@@ -63,3 +85,251 @@ func upgradeKSCore() []task.Interface {
},
}
}
func upgradePrometheusServiceMonitorKubelet() []task.Interface {
return []task.Interface{
// prometheus kubelet ServiceMonitor
&task.LocalTask{
Name: "ApplyKubeletServiceMonitor",
Action: new(applyKubeletServiceMonitorAction),
Retry: 5,
Delay: 5 * time.Second,
},
}
}
func upgradeKsConfig() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "CopyEmbeddedKSManifests",
Action: new(plugins.CopyEmbedFiles),
},
&task.LocalTask{
Name: "ApplyKsConfigManifests",
Action: new(plugins.ApplyKsConfigManifests),
Retry: 5,
Delay: 5 * time.Second,
},
}
}
// applyKubeletServiceMonitorAction applies embedded prometheus kubelet ServiceMonitor
type applyKubeletServiceMonitorAction struct {
common.KubeAction
}
func (a *applyKubeletServiceMonitorAction) Execute(runtime connector.Runtime) error {
kubectlpath, err := util.GetCommand(common.CommandKubectl)
if err != nil {
return errors.Wrap(errors.WithStack(err), "kubectl not found")
}
manifest := path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "prometheus", "kubernetes", "kubernetes-serviceMonitorKubelet.yaml")
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s apply -f %s", kubectlpath, manifest), false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "apply kubelet ServiceMonitor failed")
}
return nil
}
// applyNodeExporterAction applies embedded node-exporter
type applyNodeExporterAction struct {
common.KubeAction
}
func (a *applyNodeExporterAction) Execute(runtime connector.Runtime) error {
kubectlpath, err := util.GetCommand(common.CommandKubectl)
if err != nil {
return errors.Wrap(errors.WithStack(err), "kubectl not found")
}
manifest := path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "prometheus", "node-exporter", "node-exporter-daemonset.yaml")
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s apply -f %s", kubectlpath, manifest), false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "apply node-exporter failed")
}
return nil
}
func upgradeNodeExporter() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "CopyEmbeddedKSManifests",
Action: new(plugins.CopyEmbedFiles),
},
&task.LocalTask{
Name: "applyNodeExporterManifests",
Action: new(applyNodeExporterAction),
},
}
}
func regenerateKubeFiles() []task.Interface {
var tasks []task.Interface
kubeType := phase.GetKubeType()
if kubeType == common.K3s {
tasks = append(tasks,
&task.LocalTask{
Name: "RegenerateK3sService",
Action: new(k3s.GenerateK3sService),
},
&task.LocalTask{
Name: "RestartK3sService",
Action: &terminus.SystemctlCommand{
Command: "restart",
UnitNames: []string{k3stemplates.K3sService.Name()},
DaemonReloadPreExec: true,
},
},
)
} else {
tasks = append(tasks,
&task.LocalTask{
Name: "RegenerateKubeadmConfig",
Action: &kubernetes.GenerateKubeadmConfig{
IsInitConfiguration: true,
},
},
&task.LocalTask{
Name: "RegenerateK8sFilesWithKubeadm",
Action: new(terminus.RegenerateFilesForK8s),
},
)
}
tasks = append(tasks,
&task.LocalTask{
Name: "WaitForKubeAPIServerUp",
Action: new(precheck.GetKubernetesNodesStatus),
Retry: 10,
Delay: 10,
},
)
return tasks
}
type upgradeL4BFLProxy struct {
common.KubeAction
Tag string
}
func (u *upgradeL4BFLProxy) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"/usr/local/bin/kubectl set image deployment/l4-bfl-proxy proxy=beclab/l4-bfl-proxy:%s -n os-network", u.Tag), false, true); err != nil {
return errors.Wrapf(errors.WithStack(err), "failed to upgrade L4 network proxy to version %s", u.Tag)
}
logger.Infof("L4 upgrade to version %s completed successfully", u.Tag)
return nil
}
type upgradeGPUDriverIfNeeded struct {
common.KubeAction
}
func (a *upgradeGPUDriverIfNeeded) Execute(runtime connector.Runtime) error {
sys := runtime.GetSystemInfo()
if sys.IsWsl() {
return nil
}
if !(sys.IsUbuntu() || sys.IsDebian()) {
return nil
}
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
if err != nil {
return err
}
if strings.TrimSpace(model) == "" {
return nil
}
m, err := manifest.ReadAll(a.KubeConf.Arg.Manifest)
if err != nil {
return err
}
item, err := m.Get("cuda-driver")
if err != nil {
return err
}
var targetDriverVersionStr string
if parts := strings.Split(item.Filename, "-"); len(parts) >= 3 {
targetDriverVersionStr = strings.TrimSuffix(parts[len(parts)-1], ".run")
}
if targetDriverVersionStr == "" {
return fmt.Errorf("failed to parse target CUDA driver version from %s", item.Filename)
}
targetVersion, err := semver.NewVersion(targetDriverVersionStr)
if err != nil {
return fmt.Errorf("invalid target driver version '%s': %v", targetDriverVersionStr, err)
}
var needUpgrade bool
status, derr := utils.GetNvidiaStatus(runtime)
// for now, consider it as not installed if error occurs
// and continue to upgrade
if derr != nil {
logger.Warnf("failed to detect NVIDIA driver status, assuming upgrade is needed: %v", derr)
needUpgrade = true
}
if status != nil && status.Installed {
currentStr := status.DriverVersion
if status.Mismatch && status.LibraryVersion != "" {
currentStr = status.LibraryVersion
}
if v, perr := semver.NewVersion(currentStr); perr == nil {
needUpgrade = targetVersion.GreaterThan(v)
} else {
// cannot parse current version, assume upgrade needed
needUpgrade = true
}
} else {
needUpgrade = true
}
changed := false
if needUpgrade {
// if apt-installed, uninstall apt nvidia packages but keep toolkit
if status != nil && status.InstallMethod != utils.GPUDriverInstallMethodRunfile {
if err := new(gpu.UninstallNvidiaDrivers).Execute(runtime); err != nil {
return err
}
}
_, _ = runtime.GetRunner().SudoCmd("apt-get update", false, true)
if _, err := runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends dkms build-essential linux-headers-$(uname -r)", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to install kernel build dependencies for NVIDIA runfile")
}
// install runfile
runfile := item.FilePath(runtime.GetBaseDir())
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", runfile), false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to chmod +x runfile")
}
cmd := fmt.Sprintf("sh %s -z --no-x-check --allow-installation-with-running-driver --no-check-for-alternate-installs --dkms --rebuild-initramfs -s", runfile)
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to install NVIDIA driver via runfile")
}
client, err := clientset.NewKubeClient()
if err != nil {
return errors.Wrap(errors.WithStack(err), "kubeclient create error")
}
err = gpu.UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &targetDriverVersionStr, ptr.To(common.CurrentVerifiedCudaVersion), ptr.To("true"))
if err != nil {
return err
}
changed = true
}
needReboot := changed || (status != nil && status.Mismatch)
a.PipelineCache.Set(cacheRebootNeeded, needReboot)
return nil
}
type rebootIfNeeded struct {
common.KubeAction
}
func (r *rebootIfNeeded) Execute(runtime connector.Runtime) error {
val, ok := r.PipelineCache.GetMustBool(cacheRebootNeeded)
if ok && val {
_, _ = runtime.GetRunner().SudoCmd("reboot now", false, false)
}
return nil
}

View File

@@ -20,6 +20,7 @@ type VersionSpec struct {
ReleaseNum int `json:"releaseNum"`
PreRelease bool `json:"prerelease"`
AddedBreakingChange bool `json:"addedBreakingChange"`
NeedRestart bool `json:"needRestart"`
MinimumUpgradableVersions MinimumVersionConstraints `json:"minimumUpgradableVersions"`
}
@@ -178,6 +179,7 @@ func CurrentVersionSpec() (spec *VersionSpec, err error) {
}
u := getUpgraderByVersion(v)
spec.AddedBreakingChange = u.AddedBreakingChange()
spec.NeedRestart = u.NeedRestart()
if spec.ReleaseType == releaseTypeDaily {
lastBreakingVersion := getLastBreakingVersion(dailyUpgraders, v)
if lastBreakingVersion == nil {

View File

@@ -3,15 +3,29 @@ package utils
import (
"encoding/xml"
"errors"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/util"
"k8s.io/klog/v2"
)
const (
// NVIDIA driver install method constants
GPUDriverInstallMethodUnknown = "unknown"
GPUDriverInstallMethodApt = "apt"
GPUDriverInstallMethodRunfile = "runfile"
// GPU status/message constants parsed from nvidia-smi outputs
GPUStatusDriverLibraryMismatch = "Driver/library version mismatch"
GPUStatusCouldntCommunicateWithDrv = "couldn't communicate with the NVIDIA driver"
GPUStatusNvmlLibraryVersionPrefix = "NVML library version:"
)
type GPU struct {
ID string `xml:"id,attr" json:"id"`
ProductName string `xml:"product_name" json:"product_name"`
@@ -165,7 +179,19 @@ type NvidiaGpuInfo struct {
GPUS []GPU `xml:"gpu" json:"gpus"`
}
func ExecNvidiaSmi(execRuntime connector.Runtime) (gpuInfo *NvidiaGpuInfo, installed bool, err error) {
// NvidiaStatus is the unified GPU/driver status, combining nvidia-smi XML info and driver health.
type NvidiaStatus struct {
Installed bool
Running bool // whether kernel driver module is loaded
Info *NvidiaGpuInfo
DriverVersion string
CudaVersion string
LibraryVersion string // NVML library version when mismatch occurs
Mismatch bool // whether nvidia-smi reports Driver/library version mismatch
InstallMethod string // apt | runfile | unknown
}
func findNvidiaSmiPath() (string, error) {
cmd := "nvidia-smi"
if runtime.GOOS == "windows" {
cmd += ".exe"
@@ -179,32 +205,149 @@ func ExecNvidiaSmi(execRuntime connector.Runtime) (gpuInfo *NvidiaGpuInfo, insta
_, e := os.Stat(nvidiaSmiFile)
if e != nil {
if os.IsNotExist(e) {
return nil, false, nil
return "", exec.ErrNotFound
}
return nil, false, err
return "", e
}
cmdPath = nvidiaSmiFile
} else {
return nil, false, err
return "", err
}
}
out, err := execRuntime.GetRunner().SudoCmd(cmdPath+" -q -x", false, false)
if err != nil {
// when nvidia-smi command is installed but cuda is not installed
if strings.Contains(out, "couldn't communicate with the NVIDIA driver") {
return nil, false, nil
}
klog.Error("Error running nvidia-smi:", err)
return nil, false, err
}
var data NvidiaGpuInfo
if err := xml.Unmarshal([]byte(out), &data); nil != err {
klog.Error("Error unmarshalling from XML:", err)
return nil, false, err
}
return &data, true, nil
return cmdPath, nil
}
func GetNvidiaStatus(execRuntime connector.Runtime) (*NvidiaStatus, error) {
status := &NvidiaStatus{InstallMethod: GPUDriverInstallMethodUnknown}
if out, _ := execRuntime.GetRunner().SudoCmd("dpkg -l | awk '/^(ii|i[UuFHWt]|rc|..R)/ {print $2}' | grep -i nvidia-driver", false, false); strings.TrimSpace(out) != "" {
status.InstallMethod = GPUDriverInstallMethodApt
} else {
if util.IsExist("/usr/bin/nvidia-uninstall") || util.IsExist("/usr/bin/nvidia-installer") {
status.InstallMethod = GPUDriverInstallMethodRunfile
}
}
// detect whether any NVIDIA kernel module is loaded (driver running)
// this is a seperate status besides the installed status
if out, _ := execRuntime.GetRunner().SudoCmd("lsmod | grep -i nvidia 2>/dev/null", false, false); strings.TrimSpace(out) != "" {
status.Running = true
}
// read running kernel driver version from sysfs if available
var kernelDriverVersion string
if status.Running {
if v, _ := execRuntime.GetRunner().SudoCmd("cat /sys/module/nvidia/version 2>/dev/null", false, false); strings.TrimSpace(v) != "" {
kernelDriverVersion = strings.TrimSpace(v)
}
}
cmdPath, pathErr := findNvidiaSmiPath()
if pathErr == nil {
out, err := execRuntime.GetRunner().SudoCmd(cmdPath+" -q -x", false, false)
if err == nil {
var data NvidiaGpuInfo
uerr := xml.Unmarshal([]byte(out), &data)
if uerr == nil {
status.Installed = true
// nvidia-smi works => kernel driver is active
status.Running = true
status.Info = &data
status.DriverVersion = data.DriverVersion
status.CudaVersion = data.CudaVersion
return status, nil
}
return status, fmt.Errorf("failed to unmarshal nvidia-smi XML: %v", uerr)
}
if strings.Contains(out, GPUStatusDriverLibraryMismatch) {
status.Installed = true
status.Mismatch = true
status.LibraryVersion = parseNvmlLibraryVersion(out)
// kernel may still be running; prefer kernel driver version if available
if kernelDriverVersion != "" {
status.DriverVersion = kernelDriverVersion
}
return status, nil
}
// for now, consider as not installed
if strings.Contains(out, GPUStatusCouldntCommunicateWithDrv) {
// even if userland not communicating, kernel may be running
if kernelDriverVersion != "" {
status.DriverVersion = kernelDriverVersion
}
return status, nil
}
return status, fmt.Errorf("failed to get NVIDIA driver status: %v", out)
}
// consider as not installed
// if kernel is running after uninstall (without reboot), reflect the running version
if kernelDriverVersion != "" {
status.DriverVersion = kernelDriverVersion
}
return status, nil
}
func parseNvmlLibraryVersion(out string) string {
lines := strings.Split(out, "\n")
for _, l := range lines {
l = strings.TrimSpace(l)
// handle token like "NVML library version:575.57"
if idx := strings.Index(l, GPUStatusNvmlLibraryVersionPrefix); idx >= 0 {
v := strings.TrimSpace(strings.TrimPrefix(l, GPUStatusNvmlLibraryVersionPrefix))
// in case there are trailing characters
v = strings.FieldsFunc(v, func(r rune) bool { return r == ' ' || r == '\t' || r == '\r' || r == ')' || r == '(' })[0]
return v
}
}
return ""
}
func DetectNvidiaModelAndArch(execRuntime connector.Runtime) (model string, architecture string, err error) {
if execRuntime.GetSystemInfo().IsDarwin() {
return "", "", nil
}
out, e := execRuntime.GetRunner().SudoCmd("lspci | grep -i -e vga -e 3d | grep -i nvidia || true", false, false)
if e != nil {
klog.Error("Error running lspci:", e)
return "", "", e
}
out = strings.TrimSpace(out)
if out == "" {
return "", "", nil
}
// try to extract codename in square brackets e.g. "AD106 [GeForce RTX 4060 Ti]"
// examples: "NVIDIA Corporation AD106 [GeForce RTX 4060 Ti]"
model = out
architecture = "Unknown"
upper := strings.ToUpper(out)
// codename appears as two letters followed by digits, within the line, often right before '['
// detect common prefixes: AD(Ada), GB(Blackwell), GH(Hopper), GA(Ampere), TU(Turing), GV(Volta), GP(Pascal), GM(Maxwell), GK(Kepler), GF(Fermi)
codePrefixes := []struct {
Prefix string
Arch string
}{
{"AD", "Ada Lovelace"},
{"GB", "Blackwell"},
{"GH", "Hopper"},
{"GA", "Ampere"},
{"TU", "Turing"},
{"GV", "Volta"},
{"GP", "Pascal"},
{"GM", "Maxwell"},
{"GK", "Kepler"},
{"GF", "Fermi"},
}
for _, p := range codePrefixes {
if strings.Contains(upper, p.Prefix) {
architecture = p.Arch
break
}
}
// get bracket part as model if present
if i := strings.Index(out, "["); i >= 0 {
if j := strings.Index(out[i:], "]"); j > 0 {
model = strings.TrimSpace(out[i+1 : i+j])
}
}
return model, architecture, nil
}

View File

@@ -0,0 +1,57 @@
package lvm
import (
"bytes"
"errors"
"log"
"os/exec"
)
type command[T any] struct {
cmd string
defaultArgs []string
format func(data []byte) (T, error)
}
func (c *command[T]) Run(args ...string) (*T, string, error) {
if c.cmd == "" {
return nil, "", errors.ErrUnsupported
}
allArgs := append(c.defaultArgs, args...)
o, e, err := runCommandSplit(c.cmd, allArgs...)
if err != nil {
return nil, string(e), err
}
result, err := c.format(o)
if err != nil {
return nil, "", err
}
return &result, "", nil
}
func runCommandSplit(command string, args ...string) ([]byte, []byte, error) {
var cmdStdout bytes.Buffer
var cmdStderr bytes.Buffer
cmd := exec.Command(command, args...)
cmd.Stdout = &cmdStdout
cmd.Stderr = &cmdStderr
err := cmd.Run()
output := cmdStdout.Bytes()
error_output := cmdStderr.Bytes()
return output, error_output, err
}
func findCmd(cmd string) string {
path, err := exec.LookPath(cmd)
if err != nil {
log.Printf("failed to find command %s: %v\n", cmd, err)
return ""
}
return path
}

179
cli/pkg/utils/lvm/disk.go Normal file
View File

@@ -0,0 +1,179 @@
package lvm
import (
"bytes"
"encoding/json"
)
/*
lsblk -J
{
"blockdevices": [
{
"name": "nvme0n1",
"maj:min": "259:0",
"rm": false,
"size": "1.9T",
"ro": false,
"type": "disk",
"mountpoints": [
null
],
"children": [
{
"name": "nvme0n1p1",
"maj:min": "259:1",
"rm": false,
"size": "512M",
"ro": false,
"type": "part",
"mountpoints": [
"/boot/efi"
]
},{
"name": "nvme0n1p2",
"maj:min": "259:2",
"rm": false,
"size": "1.9T",
"ro": false,
"type": "part",
"mountpoints": [
null
],
"children": [
{
"name": "olares--vg-swap",
"maj:min": "252:0",
"rm": false,
"size": "1G",
"ro": false,
"type": "lvm",
"mountpoints": [
"[SWAP]"
]
},{
"name": "olares--vg-root",
"maj:min": "252:1",
"rm": false,
"size": "100G",
"ro": false,
"type": "lvm",
"mountpoints": [
"/"
]
},{
"name": "olares--vg-data",
"maj:min": "252:2",
"rm": false,
"size": "1.8T",
"ro": false,
"type": "lvm",
"mountpoints": [
"/olares", "/var"
]
}
]
}
]
}
]
}
*/
const LBLK = "lsblk"
type BlkPart struct {
Name string `json:"name"`
MajMin string `json:"maj:min"`
Rm bool `json:"rm"`
Size string `json:"size"`
Ro bool `json:"ro"`
Type string `json:"type"`
Mountpoints BlkList[string] `json:"mountpoints"`
Children BlkList[BlkPart] `json:"children,omitempty"`
}
type BlkList[T any] []T
type BlkResult struct {
Blockdevices BlkList[BlkPart] `json:"blockdevices"`
}
func CommandLBLK() *command[BlkResult] {
cmd := findCmd(LBLK)
return &command[BlkResult]{
cmd: cmd,
defaultArgs: []string{"-J"},
format: func(data []byte) (BlkResult, error) {
var res BlkResult
err := json.Unmarshal(data, &res)
return res, err
},
}
}
func (s *BlkList[T]) UnmarshalJSON(b []byte) error {
b = bytes.TrimSpace(b)
if bytes.Equal(b, []byte("null")) {
*s = nil
return nil
}
var raws []json.RawMessage
if err := json.Unmarshal(b, &raws); err != nil {
return err
}
var out []T
for _, r := range raws {
if bytes.Equal(bytes.TrimSpace(r), []byte("null")) {
continue
}
var v T
if err := json.Unmarshal(r, &v); err != nil {
return err
}
out = append(out, v)
}
*s = out
return nil
}
/*
findmnt -n -J --target /olares
{
"filesystems": [
{
"target": "/olares",
"source": "/dev/mapper/olares--vg-data[/olares]",
"fstype": "ext4",
"options": "rw,relatime"
}
]
}
*/
type Filesystem struct {
Target string `json:"target"`
Source string `json:"source"`
Fstype string `json:"fstype"`
Options string `json:"options"`
}
type FindMntResult struct {
Filesystems []Filesystem `json:"filesystems"`
}
const FINDMNT = "findmnt"
func CommandFindMnt() *command[FindMntResult] {
cmd := findCmd(FINDMNT)
return &command[FindMntResult]{
cmd: cmd,
defaultArgs: []string{"-J"},
format: func(data []byte) (FindMntResult, error) {
var res FindMntResult
err := json.Unmarshal(data, &res)
return res, err
},
}
}

147
cli/pkg/utils/lvm/lvm.go Normal file
View File

@@ -0,0 +1,147 @@
package lvm
import (
"encoding/json"
)
const (
LVS = "lvs"
VGS = "vgs"
PVS = "pvs"
)
/*
{
"report": [
{
"lv": [
{"lv_name":"data", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"1.76t", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""},
{"lv_name":"root", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"100.00g", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""},
{"lv_name":"swap", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"1.00g", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""}
]
}
]
}
*/
type LvItem struct {
LvName string `json:"lv_name"`
VgName string `json:"vg_name"`
LvAttr string `json:"lv_attr"`
LvSize string `json:"lv_size"`
PoolLv string `json:"pool_lv"`
Origin string `json:"origin"`
DataPercent string `json:"data_percent"`
MetadataPercent string `json:"metadata_percent"`
MovePv string `json:"move_pv"`
MirrorLog string `json:"mirror_log"`
CopyPercent string `json:"copy_percent"`
ConvertLv string `json:"convert_lv"`
LvPath string `json:"lv_path"`
LvDmPath string `json:"lv_dm_path"`
Mountpoints []string `json:"mountpoints"`
}
type LvsResult struct {
Report []struct {
Lv []LvItem `json:"lv"`
} `json:"report"`
}
func CommandLVS() *command[LvsResult] {
cmd := findCmd(LVS)
return &command[LvsResult]{
cmd: cmd,
defaultArgs: []string{"--reportformat", "json"},
format: func(data []byte) (LvsResult, error) {
var res LvsResult
err := json.Unmarshal(data, &res)
return res, err
},
}
}
/*
{
"report": [
{
"vg": [
{"vg_name":"olares-vg", "pv_count":"1", "lv_count":"3", "snap_count":"0", "vg_attr":"wz--n-", "vg_size":"1.86t", "vg_free":"0 "}
]
}
]
}
*/
type VgItem struct {
VgName string `json:"vg_name"`
PvCount string `json:"pv_count"`
LvCount string `json:"lv_count"`
SnapCount string `json:"snap_count"`
VgAttr string `json:"vg_attr"`
VgSize string `json:"vg_size"`
VgFree string `json:"vg_free"`
PvName string `json:"pv_name"`
}
type VgsResult struct {
Report []struct {
Vg []VgItem `json:"vg"`
} `json:"report"`
}
func CommandVGS() *command[VgsResult] {
cmd := findCmd(VGS)
return &command[VgsResult]{
cmd: cmd,
defaultArgs: []string{"--reportformat", "json"},
format: func(data []byte) (VgsResult, error) {
var res VgsResult
err := json.Unmarshal(data, &res)
return res, err
},
}
}
/*
{
"report": [
{
"pv": [
{"pv_name":"/dev/nvme0n1p2", "vg_name":"olares-vg", "pv_fmt":"lvm2", "pv_attr":"a--", "pv_size":"1.86t", "pv_free":"0 "}
]
}
]
}
*/
type PvItem struct {
PvName string `json:"pv_name"`
VgName string `json:"vg_name"`
PvFmt string `json:"pv_fmt"`
PvAttr string `json:"pv_attr"`
PvSize string `json:"pv_size"`
PvFree string `json:"pv_free"`
}
type PvsResult struct {
Report []struct {
Pv []PvItem `json:"pv"`
} `json:"report"`
}
func CommandPVS() *command[PvsResult] {
cmd := findCmd(PVS)
return &command[PvsResult]{
cmd: cmd,
defaultArgs: []string{"--reportformat", "json"},
format: func(data []byte) (PvsResult, error) {
var res PvsResult
err := json.Unmarshal(data, &res)
return res, err
},
}
}

272
cli/pkg/utils/lvm/tools.go Normal file
View File

@@ -0,0 +1,272 @@
package lvm
import (
"errors"
"log"
"os"
"os/exec"
"slices"
)
func FindCurrentLVM() ([]*VgItem, error) {
VG := CommandVGS()
result, errmsg, err := VG.Run()
if err != nil {
log.Printf("failed to run vgs command: %s \n%s\n", err, errmsg)
return nil, err
}
if len(result.Report) == 0 || len(result.Report[0].Vg) == 0 {
err = errors.New("no volume groups found")
return nil, err
}
var vgs []*VgItem
for _, vg := range result.Report[0].Vg {
if vg.PvCount == "0" || vg.LvCount == "0" {
continue
}
vgs = append(vgs, &vg)
}
if len(vgs) == 0 {
err = errors.New("no valid volume groups found")
return nil, err
}
return vgs, nil
}
func FindUnmountedDevices() (map[string]*BlkPart, error) {
lblkCmd := CommandLBLK()
result, errmsg, err := lblkCmd.Run()
if err != nil {
log.Printf("failed to run lsblk command: %s \n%s\n", err, errmsg)
return nil, err
}
var unmountedDevices map[string]*BlkPart = make(map[string]*BlkPart)
var unmountedPart func(part BlkPart) bool
unmountedPart = func(part BlkPart) bool {
if len(part.Mountpoints) > 0 {
return false
}
if len(part.Mountpoints) == 0 && len(part.Children) == 0 {
return true
}
for _, child := range part.Children {
if !unmountedPart(child) {
return false
}
}
return true
}
for _, dev := range result.Blockdevices {
if dev.Type != "disk" {
continue
}
if unmountedPart(dev) {
unmountedDevices["/dev/"+dev.Name] = &dev
}
}
return unmountedDevices, nil
}
func FindLvByVgName(vgName string) ([]*LvItem, error) {
LV := CommandLVS()
result, errmsg, err := LV.Run("-o", "+lv_dm_path,lv_path")
if err != nil {
log.Printf("failed to run lvs command: %s \n%s\n", err, errmsg)
return nil, err
}
if len(result.Report) == 0 || len(result.Report[0].Lv) == 0 {
return nil, nil
}
var lvs []*LvItem
for _, lv := range result.Report[0].Lv {
if lv.VgName == vgName {
mountpoints, err := FindMountpointsByLvDmPath(lv.LvDmPath)
if err == nil {
lv.Mountpoints = mountpoints
}
lvs = append(lvs, &lv)
}
}
return lvs, nil
}
func FindMountpointsByLvDmPath(lvDmPath string) ([]string, error) {
FINDMNT := CommandFindMnt()
result, errmsg, err := FINDMNT.Run(lvDmPath)
if err != nil && errmsg != "" {
log.Printf("failed to run findmnt command: %s \n%s\n", err, errmsg)
return nil, err
}
if result == nil || len(result.Filesystems) == 0 {
return nil, nil
}
var mountpoints []string
for _, fs := range result.Filesystems {
mountpoints = append(mountpoints, fs.Target)
}
return mountpoints, nil
}
/*
wipefs -a /dev/nvme0n1
sgdisk --zap-all /dev/nvme0n1
*/
func DeleteDevicePartitions(devicePath string) error {
c, err := exec.Command("wipefs", "-a", devicePath).CombinedOutput()
if err != nil {
log.Printf("failed to wipe device %s: %s\n", devicePath, c)
return err
}
// c, err = exec.Command("sgdisk", "--zap-all", devicePath).CombinedOutput()
// if err != nil {
// log.Printf("failed to zap device %s: %s\n", devicePath, c)
// return err
// }
return nil
}
/*
sudo parted /dev/sdX mklabel gpt
sudo parted -a optimal /dev/sdX mkpart primary 1MiB 100%
*/
func MakePartOnDevice(devicePath string) error {
c, err := exec.Command("parted", "-s", devicePath, "mklabel", "gpt").CombinedOutput()
if err != nil {
log.Printf("failed to make partition table on device %s: %s\n", devicePath, c)
return err
}
c, err = exec.Command("parted", "-a", "optimal", devicePath, "mkpart", "primary", "1MiB", "100%").CombinedOutput()
if err != nil {
log.Printf("failed to make partition on device %s: %s\n", devicePath, c)
return err
}
return nil
}
/*
sudo pvcreate /dev/sdX1
sudo vgextend target_vg /dev/sdX1
*/
func AddNewPV(devicePath string, vg string) error {
partition := devicePath + "p1"
c, err := exec.Command("pvcreate", "-f", partition).CombinedOutput()
if err != nil {
log.Printf("failed to create physical volume on device %s: %s\n", partition, c)
return err
}
c, err = exec.Command("vgextend", vg, partition).CombinedOutput()
if err != nil {
log.Printf("failed to extend volume group %s with device %s: %s\n", vg, partition, c)
return err
}
return nil
}
/*
lvextend -l +100%FREE "/dev/$VG_NAME/$LV_ROOT_NAME"
resize2fs "/dev/$VG_NAME/$LV_ROOT_NAME"
*/
func ExtendLv(vg, lv string) error {
c, err := exec.Command("lvextend", "-l", "+100%FREE", "/dev/"+vg+"/"+lv).CombinedOutput()
if err != nil {
log.Printf("failed to extend logical volume %s in volume group %s: %s\n", lv, vg, c)
return err
}
c, err = exec.Command("resize2fs", "/dev/"+vg+"/"+lv).CombinedOutput()
if err != nil {
log.Printf("failed to resize filesystem on logical volume %s in volume group %s: %s\n", lv, vg, c)
return err
}
return nil
}
func DeactivateLv(vg string) error {
c, err := exec.Command("lvchange", "-an", vg).CombinedOutput()
if err != nil {
log.Printf("failed to deactivate logical volume in volume group %s: %s\n", vg, c)
return err
}
return nil
}
func RemoveLv(lvpath string) error {
_, err := os.Stat(lvpath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
log.Printf("failed to stat logical volume %s: %s\n", lvpath, err)
return err
}
c, err := exec.Command("lvremove", "-f", lvpath).CombinedOutput()
if err != nil {
log.Printf("failed to remove logical volume %s: %s\n", lvpath, c)
return err
}
return nil
}
func RemoveVg(vg string) error {
c, err := exec.Command("vgremove", "-f", vg).CombinedOutput()
if err != nil {
log.Printf("failed to remove volume group %s: %s\n", vg, c)
return err
}
return nil
}
func RemovePv(pv string) error {
c, err := exec.Command("pvremove", "-f", pv).CombinedOutput()
if err != nil {
log.Printf("failed to remove physical volume %s: %s\n", pv, c)
return err
}
return nil
}
func FindVgsOnDevice(devicePaths []string) ([]*VgItem, error) {
VG := CommandVGS()
result, errmsg, err := VG.Run("-o", "+pv_name")
if err != nil {
log.Printf("failed to run vgs command: %s \n%s\n", err, errmsg)
return nil, err
}
var vgs []*VgItem
for _, vg := range result.Report[0].Vg {
if slices.Contains(devicePaths, vg.PvName) {
vgs = append(vgs, &vg)
}
}
return vgs, nil
}

View File

@@ -1,30 +0,0 @@
package utils
import (
"fmt"
"path/filepath"
"testing"
"github.com/beclab/Olares/cli/pkg/common"
)
func TestA(t *testing.T) {
var a = "/home/ubuntu/.terminus/versions/v1.8.0-20240928/wizard/config/apps/argo"
var b = filepath.Base(a)
fmt.Println("---b---", b)
}
func TestExecNvidiaSmi(t *testing.T) {
runtime := common.LocalRuntime{}
info, installed, err := ExecNvidiaSmi(&runtime)
if err != nil {
t.Log(err)
t.Fail()
return
}
t.Log(installed)
t.Log(info)
}

View File

@@ -9,6 +9,7 @@ import (
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/beclab/Olares/cli/pkg/web5/crypto/dsa"
@@ -24,10 +25,18 @@ var (
)
var (
db *leveldb.DB
db *leveldb.DB
dbOnce sync.Once
)
func init() {
func getDB() *leveldb.DB {
dbOnce.Do(func() {
initDB()
})
return db
}
func initDB() {
var (
err error
info os.FileInfo
@@ -84,7 +93,7 @@ type CheckJWSResult struct {
func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
name := strings.Replace(olares_id, "@", ".", -1)
// Try to get from cache first
cached, err := db.Get([]byte(name), nil)
cached, err := getDB().Get([]byte(name), nil)
if err == nil {
var result didcore.ResolutionResult
if err := json.Unmarshal(cached, &result); err == nil {
@@ -117,7 +126,7 @@ func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
}
// Cache the result
if err := db.Put([]byte(name), body, nil); err != nil {
if err := getDB().Put([]byte(name), body, nil); err != nil {
// Log error but don't fail
fmt.Printf("failed to cache DID document: %v\n", err)
}

View File

@@ -78,7 +78,7 @@ func (i *InstallAppxPackage) Execute(runtime connector.Runtime) error {
wslAppxPackage := wslAppxPackageObj.(*files.KubeBinary)
var ps = &utils.PowerShellCommandExecutor{
Commands: []string{fmt.Sprintf("Add-AppxPackage %s -ForceUpdateFromAnyVersion", wslAppxPackage.Path())},
Commands: []string{fmt.Sprintf("Add-AppxPackage \"%s\" -ForceUpdateFromAnyVersion", wslAppxPackage.Path())},
}
if _, err := ps.Run(); err != nil {
@@ -216,7 +216,7 @@ func (i *InstallWSLDistro) Execute(runtime connector.Runtime) error {
logger.Infof("%s path: %s", ubuntuTool, installerPath)
var checkInstallerPs = &utils.PowerShellCommandExecutor{
Commands: []string{fmt.Sprintf("Test-Path %s", installerPath)},
Commands: []string{fmt.Sprintf("Test-Path \"%s\"", installerPath)},
}
installerExists, err := checkInstallerPs.Run()
if err != nil {
@@ -314,7 +314,7 @@ func (c *ConfigWslConf) Execute(runtime connector.Runtime) error {
}
cmd = &utils.DefaultCommandExecutor{
Commands: []string{"--shutdown", distro},
Commands: []string{"-t", distro},
}
if _, err := cmd.RunCmd("wsl", utils.DEFAULT); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("shutdown wsl %s failed", distro))
@@ -493,7 +493,6 @@ func (i *InstallTerminus) Execute(runtime connector.Runtime) error {
fmt.Sprintf("export %s=%s", common.ENV_HOST_IP, systemInfo.GetLocalIp()),
fmt.Sprintf("export %s=%s", common.ENV_DISABLE_HOST_IP_PROMPT, os.Getenv(common.ENV_DISABLE_HOST_IP_PROMPT)),
fmt.Sprintf("export %s=%s", common.ENV_OLARES_CDN_SERVICE, i.KubeConf.Arg.OlaresCDNService),
fmt.Sprintf("export %s=%s", common.ENV_NVIDIA_CONTAINER_REPO_MIRROR, os.Getenv(common.ENV_NVIDIA_CONTAINER_REPO_MIRROR)),
}
var bashUrl = fmt.Sprintf("https://%s", cc.DefaultBashUrl)

View File

@@ -1,8 +1,13 @@
package wizard
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"log"
@@ -21,10 +26,10 @@ type App struct {
func NewApp(sender Sender) *App {
// Create simplified client state (backend CLI doesn't need complex state management)
state := &SimpleClientState{}
// Initialize Client (corresponds to original TypeScript's new Client(this.state, sender, hook))
client := NewClient(state, sender)
return &App{
Version: "3.0",
API: client,
@@ -35,7 +40,7 @@ func NewApp(sender Sender) *App {
func NewAppWithBaseURL(baseURL string) *App {
// Create HTTP Sender
sender := NewHTTPSender(baseURL)
// Create App with HTTP Sender
return NewApp(sender)
}
@@ -76,7 +81,7 @@ func (s *SimpleClientState) GetDevice() *DeviceInfo {
// Signup function - based on original TypeScript signup method (ref: app.ts)
func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
log.Printf("Starting signup process for DID: %s", params.DID)
// 1. Initialize account object (ref: app.ts line 954-959)
account := &Account{
ID: generateUUID(),
@@ -90,27 +95,34 @@ func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
},
Orgs: []OrgInfo{}, // Initialize as empty array to prevent undefined
Settings: AccountSettings{},
Version: "3.0.14",
}
// Initialize account with master password (ref: account.ts line 182-190)
err := a.initializeAccount(account, params.MasterPassword)
if err != nil {
return nil, fmt.Errorf("failed to initialize account: %v", err)
}
log.Printf("Account initialized: ID=%s, DID=%s, Name=%s", account.ID, account.DID, account.Name)
// 2. Initialize auth object (ref: app.ts line 964-970)
auth := NewAuth(params.DID)
authKey, err := auth.GetAuthKey(params.MasterPassword)
if err != nil {
return nil, fmt.Errorf("failed to get auth key: %v", err)
}
// Calculate verifier (ref: app.ts line 968-970)
srpClient := NewSRPClient(SRPGroup4096)
err = srpClient.Initialize(authKey)
if err != nil {
return nil, fmt.Errorf("failed to initialize SRP client: %v", err)
}
auth.Verifier = srpClient.GetV()
log.Printf("SRP verifier generated: %x...", auth.Verifier[:8])
// 3. Send create account request to server (ref: app.ts line 973-987)
createParams := CreateAccountParams{
Account: *account,
@@ -121,36 +133,45 @@ func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
BFLUser: params.BFLUser,
JWS: params.JWS,
}
response, err := a.API.CreateAccount(createParams)
if err != nil {
return nil, fmt.Errorf("failed to create account on server: %v", err)
}
log.Printf("Account created on server successfully")
log.Printf("MFA token received: %s", response.MFA)
// 4. Login to newly created account (ref: app.ts line 991)
loginParams := LoginParams{
DID: params.DID,
Password: params.MasterPassword,
}
err = a.Login(loginParams)
if err != nil {
return nil, fmt.Errorf("failed to login after signup: %v", err)
}
log.Printf("Login after signup successful")
// 5. Activate account (ref: app.ts line 1039-1046)
// 5. Initialize main vault and create TOTP item (ref: app.ts line 1003-1038)
// err = a.initializeMainVaultWithTOTP(response.MFA)
// if err != nil {
// log.Printf("Warning: Failed to initialize main vault with TOTP: %v", err)
// // Don't return error as account creation was successful
// } else {
// log.Printf("Main vault initialized with TOTP item successfully")
// }
// 6. Activate account (ref: app.ts line 1039-1046)
activeParams := ActiveAccountParams{
ID: a.API.State.GetAccount().ID, // Use logged-in account ID
BFLToken: params.BFLToken,
BFLUser: params.BFLUser,
JWS: params.JWS,
}
err = a.API.ActiveAccount(activeParams)
if err != nil {
log.Printf("Warning: Failed to activate account: %v", err)
@@ -158,7 +179,7 @@ func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
} else {
log.Printf("Account activated successfully")
}
log.Printf("Signup completed successfully for DID: %s", params.DID)
return response, nil
}
@@ -166,21 +187,21 @@ func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
// Login function - simplified version
func (a *App) Login(params LoginParams) error {
log.Printf("Starting login process for DID: %s", params.DID)
// 1. Start creating session
startParams := StartCreateSessionParams{
DID: params.DID,
AuthToken: params.AuthToken,
AsAdmin: params.AsAdmin,
}
startResponse, err := a.API.StartCreateSession(startParams)
if err != nil {
return fmt.Errorf("failed to start create session: %v", err)
}
log.Printf("Session creation started for Account ID: %s", startResponse.AccountID)
// 2. Use SRP for authentication
authKey, err := deriveKeyPBKDF2(
[]byte(params.Password),
@@ -191,56 +212,60 @@ func (a *App) Login(params LoginParams) error {
if err != nil {
return fmt.Errorf("failed to derive auth key: %v", err)
}
// 3. SRP client negotiation
srpClient := NewSRPClient(SRPGroup4096)
err = srpClient.Initialize(authKey)
if err != nil {
return fmt.Errorf("failed to initialize SRP client: %v", err)
}
err = srpClient.SetB(startResponse.B.Bytes())
if err != nil {
return fmt.Errorf("failed to set B value: %v", err)
}
log.Printf("SRP negotiation completed")
// 4. Complete session creation
completeParams := CompleteCreateSessionParams{
SRPId: startResponse.SRPId,
AccountID: startResponse.AccountID,
A: Base64Bytes(srpClient.GetA()),
M: Base64Bytes(srpClient.GetM1()),
AddTrustedDevice: false, // Don't add trusted device by default
Kind: "oe", // Based on server logs, kind should be "oe"
Version: "4.0.0", // Based on server logs, version should be "4.0.0"
AddTrustedDevice: false, // Don't add trusted device by default
Kind: "oe", // Based on server logs, kind should be "oe"
Version: "4.0.0", // Based on server logs, version should be "4.0.0"
}
session, err := a.API.CompleteCreateSession(completeParams)
if err != nil {
return fmt.Errorf("failed to complete create session: %v", err)
}
// 5. Set session key
sessionKey := srpClient.GetK()
session.Key = sessionKey
a.API.State.SetSession(session)
log.Printf("Session created: %s", session.ID)
log.Printf("Session key length: %d bytes", len(sessionKey))
log.Printf("Session key (hex): %x", sessionKey)
// 6. Temporarily skip GetAccount call due to signature verification issues
// Create a simplified account object for subsequent operations
// account, err := a.API.GetAccount()
// if err != nil {
// return fmt.Errorf("failed to get account: %v", err)
// }
account := &Account{
ID: startResponse.AccountID,
DID: params.DID,
ID: startResponse.AccountID,
DID: params.DID,
Name: params.DID,
}
a.API.State.SetAccount(account)
log.Printf("Login completed successfully for DID: %s (skipped GetAccount due to signature issue)", params.DID)
return nil
}
@@ -271,12 +296,12 @@ func (c *Client) CreateAccount(params CreateAccountParams) (*CreateAccountRespon
if err != nil {
return nil, err
}
var result CreateAccountResponse
if err := c.parseResponse(response.Result, &result); err != nil {
return nil, fmt.Errorf("failed to parse CreateAccount response: %v", err)
}
return &result, nil
}
@@ -292,17 +317,17 @@ func (c *Client) StartCreateSession(params StartCreateSessionParams) (*StartCrea
if err != nil {
return nil, err
}
// Add debug info: print raw response
if responseBytes, err := json.Marshal(response.Result); err == nil {
log.Printf("StartCreateSession raw response: %s", string(responseBytes))
}
var result StartCreateSessionResponse
if err := c.parseResponse(response.Result, &result); err != nil {
return nil, fmt.Errorf("failed to parse StartCreateSession response: %v", err)
}
return &result, nil
}
@@ -312,12 +337,12 @@ func (c *Client) CompleteCreateSession(params CompleteCreateSessionParams) (*Ses
if err != nil {
return nil, err
}
var result Session
if err := c.parseResponse(response.Result, &result); err != nil {
return nil, fmt.Errorf("failed to parse CompleteCreateSession response: %v", err)
}
return &result, nil
}
@@ -327,12 +352,27 @@ func (c *Client) GetAccount() (*Account, error) {
if err != nil {
return nil, err
}
var result Account
if err := c.parseResponse(response.Result, &result); err != nil {
return nil, fmt.Errorf("failed to parse GetAccount response: %v", err)
}
return &result, nil
}
func (c *Client) UpdateVault(vault Vault) (*Vault, error) {
requestParams := []interface{}{vault}
response, err := c.call("updateVault", requestParams)
if err != nil {
return nil, err
}
var result Vault
if err := c.parseResponse(response.Result, &result); err != nil {
return nil, fmt.Errorf("failed to parse UpdateVault response: %v", err)
}
return &result, nil
}
@@ -359,7 +399,7 @@ type ActiveAccountParams struct {
}
type StartCreateSessionParams struct {
DID string `json:"did"`
DID string `json:"did"`
AuthToken *string `json:"authToken,omitempty"`
AsAdmin *bool `json:"asAdmin,omitempty"`
}
@@ -418,7 +458,7 @@ func (a *Auth) GetAuthKey(password string) ([]byte, error) {
if len(a.KeyParams.Salt) == 0 {
a.KeyParams.Salt = Base64Bytes(generateRandomBytes(16))
}
// Use PBKDF2 to derive key (ref: auth.ts line 284 and crypto.ts line 78-101)
return deriveKeyPBKDF2(
[]byte(password),
@@ -451,3 +491,236 @@ func generateRandomBytes(length int) []byte {
func getCurrentTimeISO() string {
return time.Now().UTC().Format(time.RFC3339)
}
// initializeMainVaultWithTOTP initializes main vault and creates TOTP item (ref: app.ts line 1003-1038)
func (a *App) initializeMainVaultWithTOTP(mfaToken string) error {
account := a.API.State.GetAccount()
if account == nil {
return fmt.Errorf("account is null")
}
// 1. Initialize main vault (ref: server.ts line 1573-1579)
vault := &Vault{
Kind: "vault", // Serializable.kind getter (ref: vault.ts line 18-20)
ID: generateUUID(),
Name: "My Vault",
Owner: account.ID,
Created: getCurrentTimeISO(),
Updated: getCurrentTimeISO(),
Items: []VaultItem{}, // Initialize empty items array
Version: "4.0.0", // Serialization version (ref: encoding.ts toRaw)
}
// 2. Initialize parent class fields (SharedContainer extends BaseContainer)
// BaseContainer has: encryptionParams: AESEncryptionParams = new AESEncryptionParams()
vault.EncryptionParams = EncryptionParams{
Algorithm: "AES-GCM",
TagSize: 128,
KeySize: 256,
IV: "", // Empty, will be set when data is encrypted
AdditionalData: "", // Empty, will be set when data is encrypted
Version: "4.0.0",
}
// SharedContainer has: keyParams: RSAEncryptionParams = new RSAEncryptionParams()
vault.KeyParams = map[string]any{
"algorithm": "RSA-OAEP",
"hash": "SHA-256",
"kind": "c",
"version": "4.0.0",
}
// SharedContainer has: accessors: Accessor[] = []
vault.Accessors = []map[string]any{} // Empty array, will be populated via updateAccessors()
log.Printf("Main vault initialized: ID=%s, Name=%s, Owner=%s", vault.ID, vault.Name, vault.Owner)
// 2. Get authenticator template (ref: app.ts line 1008-1014)
template := GetAuthenticatorTemplate()
if template == nil {
return fmt.Errorf("authenticator template is null")
}
// 3. Set MFA token value (ref: app.ts line 1015)
template.Fields[0].Value = mfaToken
log.Printf("TOTP template prepared with MFA token: %s...", mfaToken[:min(8, len(mfaToken))])
// 4. Create vault item (ref: app.ts line 1024-1033)
item, err := a.createVaultItem(CreateVaultItemParams{
Name: account.Name,
Vault: vault,
Fields: template.Fields,
Tags: []string{},
Icon: template.Icon,
Type: VaultTypeTerminusTotp,
})
if err != nil {
return fmt.Errorf("failed to create vault item: %v", err)
}
log.Printf("TOTP vault item created: ID=%s, Name=%s", item.ID, item.Name)
log.Printf("TOTP field value: %s", item.Fields[0].Value)
// 5. Add item to vault
vault.Items = append(vault.Items, *item)
// 6. Update vault on server (ref: app.ts line 2138: await this.addItems([item], vault))
// Note: The vault is created empty without encryption. Items will be encrypted when
// the user unlocks the vault for the first time via vault.unlock() -> vault.updateAccessors()
err = a.updateVault(vault)
if err != nil {
return fmt.Errorf("failed to update vault on server: %v", err)
}
log.Printf("Vault updated on server successfully")
return nil
}
// CreateVaultItemParams parameters for creating a vault item
type CreateVaultItemParams struct {
Name string
Vault *Vault
Fields []Field
Tags []string
Icon string
Type VaultType
}
// createVaultItem creates a new vault item (ref: app.ts line 2096-2141)
func (a *App) createVaultItem(params CreateVaultItemParams) (*VaultItem, error) {
account := a.API.State.GetAccount()
if account == nil {
return nil, fmt.Errorf("account is null")
}
// Create vault item (ref: item.ts line 451-475)
item := &VaultItem{
ID: generateUUID(),
Name: params.Name,
Type: params.Type,
Icon: params.Icon,
Fields: params.Fields,
Tags: params.Tags,
Updated: getCurrentTimeISO(),
UpdatedBy: account.ID,
}
log.Printf("Vault item created: ID=%s, Name=%s, Type=%d", item.ID, item.Name, item.Type)
return item, nil
}
// updateVault updates vault on server (ref: app.ts line 1855-2037)
func (a *App) updateVault(vault *Vault) error {
// Update vault revision
vault.Revision = generateUUID()
vault.Updated = getCurrentTimeISO()
// Call server API to update vault
updatedVault, err := a.API.UpdateVault(*vault)
if err != nil {
return fmt.Errorf("failed to update vault on server: %v", err)
}
log.Printf("Vault updated on server: ID=%s, Revision=%s", updatedVault.ID, updatedVault.Revision)
return nil
}
// min returns the minimum of two integers
func min(a, b int) int {
if a < b {
return a
}
return b
}
// initializeAccount initializes account with RSA keys and encryption parameters (ref: account.ts line 182-190)
func (a *App) initializeAccount(account *Account, masterPassword string) error {
// 1. Generate RSA key pair (ref: account.ts line 183-186)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return fmt.Errorf("failed to generate RSA key pair: %v", err)
}
// 2. Extract public key and encode it (ref: account.ts line 186)
publicKeyDER, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
if err != nil {
return fmt.Errorf("failed to marshal public key: %v", err)
}
account.PublicKey = base64.StdEncoding.EncodeToString(publicKeyDER)
// 3. Set up key derivation parameters (ref: container.ts line 125-133)
salt := generateRandomBytes(16)
account.KeyParams = KeyParams{
Algorithm: "PBKDF2",
Hash: "SHA-256",
KeySize: 256,
Iterations: 100000,
Salt: base64.StdEncoding.EncodeToString(salt),
Version: "3.0.14",
}
// 4. Derive encryption key from master password
encryptionKey := pbkdf2.Key([]byte(masterPassword), salt, account.KeyParams.Iterations, 32, sha256.New)
// 5. Set up encryption parameters (ref: container.ts line 48-56)
iv := generateRandomBytes(16)
additionalData := generateRandomBytes(16)
account.EncryptionParams = EncryptionParams{
Algorithm: "AES-GCM",
TagSize: 128,
KeySize: 256,
IV: base64.StdEncoding.EncodeToString(iv),
AdditionalData: base64.StdEncoding.EncodeToString(additionalData),
Version: "3.0.14",
}
// 6. Create account secrets (private key + signing key)
privateKeyDER := x509.MarshalPKCS1PrivateKey(privateKey)
signingKey := generateRandomBytes(32) // HMAC key
// Combine private key and signing key into account secrets
accountSecrets := struct {
SigningKey []byte `json:"signingKey"`
PrivateKey []byte `json:"privateKey"`
}{
SigningKey: signingKey,
PrivateKey: privateKeyDER,
}
accountSecretsBytes, err := json.Marshal(accountSecrets)
if err != nil {
return fmt.Errorf("failed to marshal account secrets: %v", err)
}
// 7. Encrypt account secrets (ref: container.ts line 59-63)
encryptedData, err := a.encryptAESGCM(encryptionKey, accountSecretsBytes, iv, additionalData)
if err != nil {
return fmt.Errorf("failed to encrypt account secrets: %v", err)
}
account.EncryptedData = base64.StdEncoding.EncodeToString(encryptedData)
log.Printf("Account initialized with RSA key pair and encryption parameters")
log.Printf("Public key length: %d bytes", len(publicKeyDER))
log.Printf("Encrypted data length: %d bytes", len(encryptedData))
return nil
}
// encryptAESGCM encrypts data using AES-GCM
func (a *App) encryptAESGCM(key, plaintext, iv, additionalData []byte) ([]byte, error) {
// Import crypto/aes and crypto/cipher packages are needed at the top of the file
block, err := aes.NewCipher(key)
if err != nil {
return nil, fmt.Errorf("failed to create cipher: %v", err)
}
gcm, err := cipher.NewGCMWithNonceSize(block, 16)
if err != nil {
return nil, fmt.Errorf("failed to create GCM: %v", err)
}
// Encrypt the plaintext using AES-GCM
ciphertext := gcm.Seal(nil, iv, plaintext, additionalData)
return ciphertext, nil
}

View File

@@ -24,12 +24,12 @@ type Token struct {
// FirstFactorRequest represents first factor request structure
type FirstFactorRequest struct {
Username string `json:"username"`
Password string `json:"password"`
KeepMeLoggedIn bool `json:"keepMeLoggedIn"`
RequestMethod string `json:"requestMethod"`
TargetURL string `json:"targetURL"`
AcceptCookie bool `json:"acceptCookie"`
Username string `json:"username"`
Password string `json:"password"`
KeepMeLoggedIn bool `json:"keepMeLoggedIn"`
RequestMethod string `json:"requestMethod"`
TargetURL string `json:"targetURL"`
AcceptCookie bool `json:"acceptCookie"`
}
// FirstFactorResponse represents first factor response structure
@@ -41,10 +41,10 @@ type FirstFactorResponse struct {
// OnFirstFactor implements first factor authentication (ref: BindTerminusBusiness.ts)
func OnFirstFactor(baseURL, terminusName, osUser, osPwd string, acceptCookie, needTwoFactor bool) (*Token, error) {
log.Printf("Starting onFirstFactor for user: %s", osUser)
// Process password (salted MD5)
processedPassword := passwordAddSort(osPwd)
// Build request
reqData := FirstFactorRequest{
Username: osUser,
@@ -54,51 +54,51 @@ func OnFirstFactor(baseURL, terminusName, osUser, osPwd string, acceptCookie, ne
TargetURL: baseURL,
AcceptCookie: acceptCookie,
}
jsonData, err := json.Marshal(reqData)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %v", err)
}
// Send HTTP request
client := &http.Client{
Timeout: 10 * time.Second,
}
reqURL := fmt.Sprintf("%s/api/firstfactor?hideCookie=true", baseURL)
req, err := http.NewRequest("POST", reqURL, strings.NewReader(string(jsonData)))
if err != nil {
return nil, fmt.Errorf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
log.Printf("Sending request to: %s", reqURL)
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %v", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
}
var response FirstFactorResponse
if err := json.Unmarshal(body, &response); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %v", err)
}
if response.Status != "OK" {
return nil, fmt.Errorf("authentication failed: %s", response.Status)
}
log.Printf("First factor authentication successful")
return &response.Data, nil
}
@@ -111,7 +111,6 @@ func passwordAddSort(password string) string {
return fmt.Sprintf("%x", hash)
}
// Main authentication function - corresponds to original TypeScript _authenticate function
func Authenticate(req AuthenticateRequest) (*AuthenticateResponse, error) {
if platform == nil {
@@ -124,7 +123,7 @@ func Authenticate(req AuthenticateRequest) (*AuthenticateResponse, error) {
// Step 1: If no pending request, start new authentication request
if authReq == nil {
log.Printf("[%s] Step %d: req is empty, starting auth request...", req.Caller, step)
opts := StartAuthRequestOptions{
Type: &req.Type,
Purpose: req.Purpose,
@@ -170,49 +169,48 @@ func Authenticate(req AuthenticateRequest) (*AuthenticateResponse, error) {
return res, nil
}
// UserBindTerminus main user binding function (ref: TypeScript version)
func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName string) error {
func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName string) (string, error) {
log.Printf("Starting userBindTerminus for user: %s", terminusName)
// 1. Initialize global storage
if globalUserStore == nil {
log.Printf("Initializing global stores...")
err := InitializeGlobalStores(mnemonic, terminusName)
if err != nil {
return fmt.Errorf("failed to initialize global stores: %w", err)
return "", fmt.Errorf("failed to initialize global stores: %w", err)
}
log.Printf("Global stores initialized successfully")
}
// 2. Initialize platform and App (if not already initialized)
var app *App
if platform == nil {
log.Printf("Initializing platform...")
// Create App using vaultUrl as base URL
app = NewAppWithBaseURL(vaultUrl)
// Create and set WebPlatform (no need to pass mnemonic, uses global storage)
webPlatform := NewWebPlatform(app.API)
SetPlatform(webPlatform)
log.Printf("Platform initialized successfully with base URL: %s", vaultUrl)
} else {
// If platform already initialized, create new App instance for signup
app = NewAppWithBaseURL(vaultUrl)
}
log.Printf("Using bflUrl: %s", bflUrl)
// 3. Call onFirstFactor to get token (ref: TypeScript implementation)
token, err := OnFirstFactor(bflUrl, terminusName, localName, osPwd, false, false)
if err != nil {
return fmt.Errorf("onFirstFactor failed: %v", err)
return "", fmt.Errorf("onFirstFactor failed: %v", err)
}
log.Printf("First factor authentication successful, session_id: %s", token.SessionID)
// 4. Execute authentication - call _authenticate function from pkg/activate
authRes, err := Authenticate(AuthenticateRequest{
DID: localName,
@@ -222,14 +220,14 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName
Caller: "E001",
})
if err != nil {
return fmt.Errorf("authentication failed: %v", err)
return "", fmt.Errorf("authentication failed: %v", err)
}
log.Printf("Authentication successful for DID: %s", authRes.DID)
// 5. Generate JWS - ref: BindTerminusBusiness.ts
log.Printf("Creating JWS for signup...")
// Extract domain (ref: TypeScript implementation)
domain := vaultUrl
if strings.HasPrefix(domain, "http://") {
@@ -237,7 +235,7 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName
} else if strings.HasPrefix(domain, "https://") {
domain = domain[8:]
}
// Use globalUserStore to sign JWS (ref: userStore.signJWS in TypeScript)
jws, err := globalUserStore.SignJWS(map[string]any{
"name": terminusName,
@@ -246,14 +244,14 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName
"time": fmt.Sprintf("%d", time.Now().UnixMilli()),
})
if err != nil {
return fmt.Errorf("JWS signing failed: %v", err)
return "", fmt.Errorf("JWS signing failed: %v", err)
}
log.Printf("JWS created successfully: %s...", jws[:50])
// 6. Execute signup (call real implementation in app.go)
log.Printf("Executing signup...")
// Build SignupParams (ref: app.signup in BindTerminusBusiness.ts)
signupParams := SignupParams{
DID: authRes.DID,
@@ -265,15 +263,15 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName
BFLUser: localName,
JWS: jws,
}
// Call real app.Signup function
signupResponse, err := app.Signup(signupParams)
if err != nil {
return fmt.Errorf("signup failed: %v", err)
return "", fmt.Errorf("signup failed: %v", err)
}
log.Printf("Signup successful! MFA: %s", signupResponse.MFA)
// Save MFA token to UserStore for next stage use
err = globalUserStore.SetMFA(signupResponse.MFA)
if err != nil {
@@ -282,8 +280,8 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName
} else {
log.Printf("MFA token saved to UserStore for future use")
}
log.Printf("User bind to Terminus completed successfully!")
return nil
return token.AccessToken, nil
}

View File

@@ -83,27 +83,27 @@ type ErrorCode string
const (
ErrorCodeAuthenticationFailed ErrorCode = "email_verification_failed"
ErrorCodeNotFound ErrorCode = "not_found"
ErrorCodeServerError ErrorCode = "server_error"
ErrorCodeNotFound ErrorCode = "not_found"
ErrorCodeServerError ErrorCode = "server_error"
)
// AccountProvisioning represents account provisioning information
type AccountProvisioning struct {
ID string `json:"id"`
DID string `json:"did"`
Name *string `json:"name,omitempty"`
AccountID *string `json:"accountId,omitempty"`
Status string `json:"status"`
StatusLabel string `json:"statusLabel"`
StatusMessage string `json:"statusMessage"`
ActionURL *string `json:"actionUrl,omitempty"`
ActionLabel *string `json:"actionLabel,omitempty"`
MetaData map[string]any `json:"metaData,omitempty"`
SkipTos bool `json:"skipTos"`
BillingPage any `json:"billingPage,omitempty"`
Quota map[string]any `json:"quota"`
Features map[string]any `json:"features"`
Orgs []string `json:"orgs"`
ID string `json:"id"`
DID string `json:"did"`
Name *string `json:"name,omitempty"`
AccountID *string `json:"accountId,omitempty"`
Status string `json:"status"`
StatusLabel string `json:"statusLabel"`
StatusMessage string `json:"statusMessage"`
ActionURL *string `json:"actionUrl,omitempty"`
ActionLabel *string `json:"actionLabel,omitempty"`
MetaData map[string]any `json:"metaData,omitempty"`
SkipTos bool `json:"skipTos"`
BillingPage any `json:"billingPage,omitempty"`
Quota map[string]any `json:"quota"`
Features map[string]any `json:"features"`
Orgs []string `json:"orgs"`
}
type StartAuthRequestResponse struct {
@@ -130,11 +130,11 @@ type AuthenticateRequest struct {
}
type AuthenticateResponse struct {
DID string `json:"did"`
Token string `json:"token"`
AccountStatus AccountStatus `json:"accountStatus"`
Provisioning AccountProvisioning `json:"provisioning"`
DeviceTrusted bool `json:"deviceTrusted"`
DID string `json:"did"`
Token string `json:"token"`
AccountStatus AccountStatus `json:"accountStatus"`
Provisioning AccountProvisioning `json:"provisioning"`
DeviceTrusted bool `json:"deviceTrusted"`
}
type StartAuthRequestOptions struct {
@@ -161,9 +161,9 @@ type CompleteAuthRequestParams struct {
}
type CompleteAuthRequestResponse struct {
AccountStatus AccountStatus `json:"accountStatus"`
DeviceTrusted bool `json:"deviceTrusted"`
Provisioning AccountProvisioning `json:"provisioning"`
AccountStatus AccountStatus `json:"accountStatus"`
DeviceTrusted bool `json:"deviceTrusted"`
Provisioning AccountProvisioning `json:"provisioning"`
}
// Session represents a user session
@@ -192,19 +192,43 @@ type AccountSettings struct {
// Simplified version, can be extended as needed
}
// EncryptionParams represents AES encryption parameters
type EncryptionParams struct {
Algorithm string `json:"algorithm"` // "AES-GCM"
TagSize int `json:"tagSize"` // 128
KeySize int `json:"keySize"` // 256
IV string `json:"iv"` // Base64 encoded initialization vector
AdditionalData string `json:"additionalData"` // Base64 encoded additional data
Version string `json:"version"` // "3.0.14"
}
// KeyParams represents PBKDF2 key derivation parameters
type KeyParams struct {
Algorithm string `json:"algorithm"` // "PBKDF2"
Hash string `json:"hash"` // "SHA-256"
KeySize int `json:"keySize"` // 256
Iterations int `json:"iterations"` // 100000
Salt string `json:"salt"` // Base64 encoded salt
Version string `json:"version"` // "3.0.14"
}
type Account struct {
ID string `json:"id"`
DID string `json:"did"`
Name string `json:"name"`
Local bool `json:"local,omitempty"`
Created string `json:"created,omitempty"` // ISO 8601 format
Updated string `json:"updated,omitempty"` // ISO 8601 format
PublicKey []byte `json:"publicKey,omitempty"` // RSA public key
MainVault MainVault `json:"mainVault"` // Main vault information
Orgs []OrgInfo `json:"orgs"` // Organization list (important: prevent undefined)
Revision string `json:"revision,omitempty"` // Version control
Kid string `json:"kid,omitempty"` // Key ID
Settings AccountSettings `json:"settings,omitempty"` // Account settings
ID string `json:"id"`
DID string `json:"did"`
Name string `json:"name"`
Local bool `json:"local,omitempty"`
Created string `json:"created,omitempty"` // ISO 8601 format
Updated string `json:"updated,omitempty"` // ISO 8601 format
PublicKey string `json:"publicKey,omitempty"` // Base64 encoded RSA public key
EncryptedData string `json:"encryptedData,omitempty"` // Base64 encoded encrypted data
EncryptionParams EncryptionParams `json:"encryptionParams,omitempty"` // AES encryption parameters
KeyParams KeyParams `json:"keyParams,omitempty"` // PBKDF2 key derivation parameters
MainVault MainVault `json:"mainVault"` // Main vault information
Orgs []OrgInfo `json:"orgs"` // Organization list (important: prevent undefined)
Revision string `json:"revision,omitempty"` // Version control
Kid string `json:"kid,omitempty"` // Key ID
Settings AccountSettings `json:"settings,omitempty"` // Account settings
Version string `json:"version,omitempty"` // Version
}
type DeviceInfo struct {
@@ -215,10 +239,10 @@ type DeviceInfo struct {
// Request represents an RPC request
type Request struct {
Method string `json:"method"`
Params []interface{} `json:"params,omitempty"`
Device *DeviceInfo `json:"device,omitempty"`
Auth *RequestAuth `json:"auth,omitempty"`
Method string `json:"method"`
Params []interface{} `json:"params,omitempty"`
Device *DeviceInfo `json:"device,omitempty"`
Auth *RequestAuth `json:"auth,omitempty"`
}
type Response struct {
@@ -247,12 +271,12 @@ func (t *ISOTime) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &str); err != nil {
return err
}
parsed, err := time.Parse("2006-01-02T15:04:05.000Z", str)
if err != nil {
return err
}
*t = ISOTime(parsed)
return nil
}
@@ -282,7 +306,7 @@ func (b *Base64Bytes) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &str); err != nil {
return err
}
// Server uses URL-safe base64 encoding by default (ref: encoding.ts line 366: urlSafe = true)
// Try base64url decoding first
decoded, err := base64.URLEncoding.DecodeString(str)
@@ -297,7 +321,7 @@ func (b *Base64Bytes) UnmarshalJSON(data []byte) error {
}
}
}
*b = Base64Bytes(decoded)
return nil
}
@@ -313,5 +337,100 @@ func (b Base64Bytes) Bytes() []byte {
return []byte(b)
}
// ============================================================================
// Vault and VaultItem Structures
// ============================================================================
// VaultType represents the type of vault item
type VaultType int
const (
VaultTypeDefault VaultType = 0
VaultTypeLogin VaultType = 1
VaultTypeCard VaultType = 2
VaultTypeTerminusTotp VaultType = 3
VaultTypeOlaresSSHPassword VaultType = 4
)
// FieldType represents the type of field in a vault item
type FieldType string
const (
FieldTypeUsername FieldType = "username"
FieldTypePassword FieldType = "password"
FieldTypeApiSecret FieldType = "apiSecret"
FieldTypeMnemonic FieldType = "mnemonic"
FieldTypeUrl FieldType = "url"
FieldTypeEmail FieldType = "email"
FieldTypeDate FieldType = "date"
FieldTypeMonth FieldType = "month"
FieldTypeCredit FieldType = "credit"
FieldTypePhone FieldType = "phone"
FieldTypePin FieldType = "pin"
FieldTypeTotp FieldType = "totp"
FieldTypeNote FieldType = "note"
FieldTypeText FieldType = "text"
)
// Field represents a field in a vault item
type Field struct {
Name string `json:"name"`
Type FieldType `json:"type"`
Value string `json:"value"`
}
// VaultItem represents an item in a vault
type VaultItem struct {
ID string `json:"id"`
Name string `json:"name"`
Type VaultType `json:"type"`
Icon string `json:"icon,omitempty"`
Fields []Field `json:"fields"`
Tags []string `json:"tags"`
Updated string `json:"updated"` // ISO 8601 format
UpdatedBy string `json:"updatedBy"`
}
// Vault represents a vault containing items
type Vault struct {
Kind string `json:"kind"` // Always "vault" for Vault objects
ID string `json:"id"`
Name string `json:"name"`
Owner string `json:"owner"`
Created string `json:"created"` // ISO 8601 format
Updated string `json:"updated"` // ISO 8601 format
Revision string `json:"revision,omitempty"`
Items []VaultItem `json:"items,omitempty"`
KeyParams interface{} `json:"keyParams,omitempty"`
EncryptionParams interface{} `json:"encryptionParams,omitempty"`
Accessors interface{} `json:"accessors,omitempty"`
EncryptedData interface{} `json:"encryptedData,omitempty"`
Version string `json:"version,omitempty"` // Serialization version
}
// ItemTemplate represents a template for creating vault items
type ItemTemplate struct {
ID string `json:"id"`
Name string `json:"name"`
Icon string `json:"icon"`
Fields []Field `json:"fields"`
}
// GetAuthenticatorTemplate returns the authenticator template for TOTP items
func GetAuthenticatorTemplate() *ItemTemplate {
return &ItemTemplate{
ID: "authenticator",
Name: "Authenticator",
Icon: "authenticator",
Fields: []Field{
{
Name: "One-Time Password",
Type: FieldTypeTotp,
Value: "", // Will be set with MFA token
},
},
}
}
// JWS-related data structures removed, using Web5 library's jwt.Sign() method directly
// UserItem and JWSSignatureInput removed as they were not actually used

View File

@@ -189,25 +189,14 @@ func (u *UserStore) SignJWS(payload map[string]any) (string, error) {
const TerminusDefaultDomain = "olares.cn"
func (u *UserStore) GetTerminusURL() string {
array := strings.Split(u.terminusName, "@")
localURL := u.getLocalURL()
if len(array) == 2 {
return fmt.Sprintf("https://%s%s.%s", localURL, array[0], array[1])
} else {
return fmt.Sprintf("https://%s%s.%s", localURL, array[0], TerminusDefaultDomain)
}
}
func (u *UserStore) GetAuthURL() string {
array := strings.Split(u.terminusName, "@")
localURL := u.getLocalURL()
if len(array) == 2 {
return fmt.Sprintf("https://auth.%s%s.%s/", localURL, array[0], array[1])
return fmt.Sprintf("https://auth.%s%s.%s", localURL, array[0], array[1])
} else {
return fmt.Sprintf("https://auth.%s%s.%s/", localURL, array[0], TerminusDefaultDomain)
return fmt.Sprintf("https://auth.%s%s.%s", localURL, array[0], TerminusDefaultDomain)
}
}

View File

@@ -107,12 +107,18 @@ func (w *ActivationWizard) RunWizard() error {
case "wait_reset_password":
log.Println("🔐 Resetting password...")
// Directly perform password reset, no need for complex DNS waiting logic
if err := w.performPasswordReset(); err != nil {
return fmt.Errorf("password reset failed: %v", err)
status, err := w.authRequestTerminusInfo()
if err != nil {
log.Printf("failed to get terminus info by authurl: %v retry ...\n", err)
} else {
if status == "wait_reset_password" {
// Directly perform password reset, no need for complex DNS waiting logic
if err := w.performPasswordReset(); err != nil {
return fmt.Errorf("password reset failed: %v", err)
}
log.Println("✅ Password reset completed")
}
}
log.Println("✅ Password reset completed")
default:
log.Printf("⏳ Unknown status: %s, waiting...", status)
@@ -196,15 +202,11 @@ func (w *ActivationWizard) updateTerminusInfo() (string, error) {
// authRequestTerminusInfo backup Terminus information request
func (w *ActivationWizard) authRequestTerminusInfo() (string, error) {
// Use globalUserStore to generate correct terminus_url
var terminusURL string
if globalUserStore != nil {
terminusURL = globalUserStore.GetTerminusURL()
} else {
terminusURL = w.BaseURL
}
var terminusURL = globalUserStore.GetAuthURL()
// Build backup URL (usually terminus_url + '/api/olares-info')
url := fmt.Sprintf("%s/api/olares-info?t=%d", terminusURL, time.Now().UnixMilli())
url := fmt.Sprintf("%s/bfl/info/v1/olares-info?t=%d", terminusURL, time.Now().UnixMilli())
client := &http.Client{
Timeout: 5 * time.Second,
@@ -232,12 +234,15 @@ func (w *ActivationWizard) authRequestTerminusInfo() (string, error) {
return "", fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
}
var terminusInfo TerminusInfo
if err := json.Unmarshal(body, &terminusInfo); err != nil {
var response struct {
Data TerminusInfo `json:"data"`
}
if err := json.Unmarshal(body, &response); err != nil {
return "", fmt.Errorf("failed to parse response: %v", err)
}
return terminusInfo.WizardStatus, nil
return response.Data.WizardStatus, nil
}
// performPasswordReset performs password reset - simplified version

View File

@@ -26,4 +26,4 @@ build-linux-in-docker:
-w /olaresd \
-e DEBIAN_FRONTEND=noninteractive \
golang:1.24 \
sh -c "apt-get -y update; apt-get -y install libudev-dev; make build-linux"
sh -c "apt-get -y update; apt-get -y install libudev-dev libpcap-dev; make build-linux"

View File

@@ -5,7 +5,7 @@ FROM golang:1.23 as builder
WORKDIR /workspace
COPY go.mod go.sum ./
RUN apt update && apt install -y libudev-dev
RUN apt update && apt install -y libudev-dev libpcap-dev
RUN \
echo ">> Downloading go modules..." && \
go mod download

View File

@@ -5,7 +5,7 @@ go 1.24.2
toolchain go1.24.4
replace (
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.23
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.37
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
bytetrade.io/web3os/bfl => github.com/beclab/bfl v0.3.36
github.com/labstack/echo/v4 => github.com/eball/echo/v4 v4.13.4-patch
@@ -26,9 +26,10 @@ require (
github.com/containerd/containerd v1.7.28
github.com/distribution/distribution/v3 v3.0.0
github.com/dustin/go-humanize v1.0.1
github.com/eball/zeroconf v0.2.1
github.com/eball/zeroconf v0.2.2
github.com/godbus/dbus/v5 v5.1.0
github.com/gofiber/fiber/v2 v2.52.9
github.com/google/gopacket v1.1.19
github.com/hirochachacha/go-smb2 v1.1.0
github.com/jaypipes/ghw v0.13.0
github.com/jochenvg/go-udev v0.0.0-20171110120927-d6b62d56d37b
@@ -37,6 +38,7 @@ require (
github.com/labstack/echo/v4 v4.0.0-00010101000000-000000000000
github.com/libp2p/go-netroute v0.2.2
github.com/mackerelio/go-osstat v0.2.5
github.com/mdlayher/raw v0.1.0
github.com/muka/network_manager v0.0.0-20200903202308-ae5ede816e07
github.com/nxadm/tail v1.4.11
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
@@ -48,6 +50,7 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/spf13/pflag v1.0.7
github.com/txn2/txeh v1.5.5
github.com/vishvananda/netlink v1.3.0
go.opentelemetry.io/otel/trace v1.36.0
golang.org/x/crypto v0.41.0
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
@@ -110,7 +113,6 @@ require (
github.com/golang/snappy v0.0.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
@@ -125,6 +127,8 @@ require (
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93 // indirect
github.com/mdlayher/socket v0.2.1 // indirect
github.com/miekg/dns v1.1.55 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/locker v1.0.1 // indirect
@@ -162,6 +166,7 @@ require (
github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opencensus.io v0.24.0 // indirect

View File

@@ -26,8 +26,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beclab/Olares/cli v0.0.0-20251016092744-6241cceceb89 h1:5s9hXV8K3faToQtE9DbiM7O6jt5kIiEsLAaKn6F0UfA=
github.com/beclab/Olares/cli v0.0.0-20251016092744-6241cceceb89/go.mod h1:iEvZxM6PnFxFRppneTzV3hgr2tIxDnsI3dhp4pi7pFg=
github.com/beclab/app-service v0.4.23 h1:6kjpq7rie62FafQRBGXtM9MQD3CEMGmrOC7aGPbvLJY=
github.com/beclab/app-service v0.4.23/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
github.com/beclab/app-service v0.4.37 h1:gt60wQxgPWMc3oN94TNSdiQAvzqTyCv/OUP93jNSQTY=
github.com/beclab/app-service v0.4.37/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
github.com/beclab/bfl v0.3.36 h1:PgeSPGc+XoONiwFsKq9xX8rqcL4kVM1G/ut0lYYj/js=
github.com/beclab/bfl v0.3.36/go.mod h1:A82u38MxYk1C3Lqnm4iUUK4hBeY9HHIs+xU4V93OnJk=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -85,8 +85,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eball/echo/v4 v4.13.4-patch h1:5w83KQrEqrxhc1BO0BpRBHssC37vFrWualUM27Rt2sg=
github.com/eball/echo/v4 v4.13.4-patch/go.mod h1:ORgy8LWTq8knpwgaz538rAJMri7WgpoAD6H3zYccn84=
github.com/eball/zeroconf v0.2.1 h1:PZ89f6J2k2Z7q3oSzcZGFXJf97S7NPmj7H04ACw9v8c=
github.com/eball/zeroconf v0.2.1/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
github.com/eball/zeroconf v0.2.2 h1:y23X67tLFlU+b35LyM9THXGsdC88IUz803G+mzfeSeE=
github.com/eball/zeroconf v0.2.2/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
@@ -228,6 +228,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93 h1:elUwhY+HQaIV9kMgmsU9zOF413pDKoo2uFNypgP5SxM=
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93/go.mod h1:K9sWKMgN6wa78BbuJL+dT1ZZdiAfhkc2fb6XXLjHulk=
github.com/mdlayher/raw v0.1.0 h1:K4PFMVy+AFsp0Zdlrts7yNhxc/uXoPVHi9RzRvtZF2Y=
github.com/mdlayher/raw v0.1.0/go.mod h1:yXnxvs6c0XoF/aK52/H5PjsVHmWBCFfZUfoh/Y5s9Sg=
github.com/mdlayher/socket v0.2.1 h1:F2aaOwb53VsBE+ebRS9bLd7yPOfYUMC8lOODdCBDY6w=
github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
@@ -358,6 +364,10 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -458,8 +468,10 @@ golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@@ -1,6 +1,7 @@
package handlers
import (
"fmt"
"net/http"
"github.com/beclab/Olares/daemon/internel/client"
@@ -59,7 +60,18 @@ func (h *Handlers) RequireOwner(next func(ctx *fiber.Ctx) error) func(ctx *fiber
// get owner from release file
envOlaresID, err := utils.GetOlaresNameFromReleaseFile()
if err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, "failed to get Olares ID from release file")
return h.ErrJSON(ctx, http.StatusInternalServerError, fmt.Sprintf("failed to get Olares ID from release file: %v", err))
}
if envOlaresID == "" {
if isInstalled, err := state.IsTerminusInstalled(); err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, fmt.Sprintf("failed to check if Olares is installed: %v", err))
} else {
// not installed, skip owner check
if !isInstalled {
return next(ctx)
}
}
}
if c.OlaresID() != envOlaresID {

View File

@@ -4,6 +4,7 @@ import (
"errors"
"net"
"slices"
"strings"
"github.com/beclab/Olares/daemon/pkg/nets"
"github.com/eball/zeroconf"
@@ -17,6 +18,7 @@ type DNSConfig struct {
type instanceServer struct {
queryServer *zeroconf.Server
host *DNSConfig
aliases []string
}
type mDNSServer struct {
@@ -66,6 +68,14 @@ func (s *mDNSServer) StartAll() error {
return err
}
// add host alias
domainTokens := strings.Split(domain, ".")
alias := []string{strings.Join(domainTokens, "-") + ".local."}
// TODO: add more alias if needed
klog.Info("add host alias, ", alias[0])
server.AddHostAlias(alias[0])
s.servers[domain] = &instanceServer{
queryServer: server,
host: &DNSConfig{Domain: domain},

View File

@@ -0,0 +1,141 @@
package intranet
import "encoding/binary"
func ipv4Checksum(hdr []byte) uint16 {
var sum uint32
// header length is multiple of 2
for i := 0; i < len(hdr); i += 2 {
sum += uint32(binary.BigEndian.Uint16(hdr[i : i+2]))
}
for (sum >> 16) != 0 {
sum = (sum & 0xffff) + (sum >> 16)
}
return ^uint16(sum)
}
// fragmentIPv4 attempts to split an Ethernet frame carrying an IPv4 packet
// into multiple Ethernet frames where each IP fragment fits within the
// given interface MTU. mtu is the interface MTU (i.e., maximum IP packet
// size including IP header). Returns a slice of full ethernet frames ready
// to send. If the frame is not IPv4 or can't be fragmented (DF bit set)
// an error is returned.
func fragmentIPv4(frame []byte, mtu int) ([][]byte, error) {
// Need at least Ethernet + minimum IP header
if len(frame) < 14+20 {
return nil, fmtError("frame too short for IPv4")
}
ethType := binary.BigEndian.Uint16(frame[12:14])
const etherTypeIPv4 = 0x0800
if ethType != etherTypeIPv4 {
return nil, fmtError("not an IPv4 ethernet frame")
}
ipStart := 14
verIhl := frame[ipStart]
if verIhl>>4 != 4 {
return nil, fmtError("not IPv4")
}
ihl := int(verIhl & 0x0f)
ipHeaderLen := ihl * 4
if ipHeaderLen < 20 || len(frame) < ipStart+ipHeaderLen {
return nil, fmtError("invalid ip header length")
}
// Read total length from IP header
totalLen := int(binary.BigEndian.Uint16(frame[ipStart+2 : ipStart+4]))
if totalLen < ipHeaderLen {
return nil, fmtError("invalid total length")
}
payloadLen := totalLen - ipHeaderLen
if len(frame) < ipStart+ipHeaderLen+payloadLen {
// allow pcap frames with extra trailing bytes (FCS); but ensure payload present
if len(frame) < ipStart+ipHeaderLen {
return nil, fmtError("frame shorter than ip header")
}
// adjust payloadLen to available bytes
available := len(frame) - (ipStart + ipHeaderLen)
if available <= 0 {
return nil, fmtError("no ip payload available")
}
payloadLen = available
totalLen = ipHeaderLen + payloadLen
}
// Check DF (Don't Fragment)
flagsFrag := binary.BigEndian.Uint16(frame[ipStart+6 : ipStart+8])
const dfMask = 0x4000
if flagsFrag&dfMask != 0 {
return nil, fmtError("DF set; cannot fragment")
}
// Compute per-fragment payload size: mtu - ipHeaderLen. Must be multiple of 8.
if mtu <= ipHeaderLen {
return nil, fmtError("mtu too small for ip header")
}
maxPayload := mtu - ipHeaderLen
// Round down to multiple of 8
maxPayload = maxPayload &^ 7
if maxPayload <= 0 {
return nil, fmtError("mtu too small for fragmentation unit")
}
ipHeader := make([]byte, ipHeaderLen)
copy(ipHeader, frame[ipStart:ipStart+ipHeaderLen])
payload := make([]byte, payloadLen)
copy(payload, frame[ipStart+ipHeaderLen:ipStart+ipHeaderLen+payloadLen])
// Iterate and build fragments
var frags [][]byte
offset := 0
for offset < payloadLen {
chunk := maxPayload
if remaining := payloadLen - offset; remaining <= maxPayload {
chunk = remaining
}
// Create new IP header for fragment
newIP := make([]byte, ipHeaderLen)
copy(newIP, ipHeader)
// Set total length
binary.BigEndian.PutUint16(newIP[2:4], uint16(ipHeaderLen+chunk))
// Set flags+offset: preserve DF, set MF for non-last
origFlags := binary.BigEndian.Uint16(ipHeader[6:8])
df := origFlags & dfMask
var mf uint16
if offset+chunk < payloadLen {
mf = 0x2000
}
fragOffset := uint16(offset / 8)
combined := df | mf | (fragOffset & 0x1fff)
binary.BigEndian.PutUint16(newIP[6:8], combined)
// Zero checksum and compute
newIP[10] = 0
newIP[11] = 0
csum := ipv4Checksum(newIP)
binary.BigEndian.PutUint16(newIP[10:12], csum)
// Build ethernet frame: copy original ethernet header, but use the modified IP header + fragment payload
eth := make([]byte, 14)
copy(eth, frame[:14])
fragFrame := make([]byte, 14+ipHeaderLen+chunk)
copy(fragFrame[:14], eth)
copy(fragFrame[14:14+ipHeaderLen], newIP)
copy(fragFrame[14+ipHeaderLen:], payload[offset:offset+chunk])
frags = append(frags, fragFrame)
offset += chunk
}
return frags, nil
}
// fmtError is a tiny helper to produce errors without importing fmt across file
func fmtError(s string) error { return &simpleErr{s} }
type simpleErr struct{ s string }
func (e *simpleErr) Error() string { return e.s }

View File

@@ -0,0 +1,47 @@
//go:build !(linux && amd64)
// +build !linux !amd64
package intranet
import (
"errors"
"net"
)
type DSRProxy struct {
}
func NewDSRProxy() *DSRProxy {
return &DSRProxy{}
}
func (d *DSRProxy) WithVIP(vip string, intf string) error {
return nil
}
func (d *DSRProxy) WithBackend(backendIP string, backendMAC string) error {
return nil
}
func (d *DSRProxy) WithCalicoInterface(intf string) error {
return nil
}
func (d *DSRProxy) Close() {}
func (d *DSRProxy) Stop() error {
return nil
}
func (d *DSRProxy) start() error { return nil }
func (d *DSRProxy) Start() error {
return nil
}
// handleResponse processes response packets from backend, rewriting source IP back to VIP
func (d *DSRProxy) handleResponse(data []byte, conn net.PacketConn) {}
func (d *DSRProxy) regonfigure() error {
return errors.New("unsupported operation")
}

View File

@@ -0,0 +1,509 @@
//go:build linux && amd64
// +build linux,amd64
package intranet
import (
"fmt"
"syscall"
"bytes"
"encoding/binary"
"errors"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
"github.com/mdlayher/raw"
"k8s.io/klog/v2"
)
type DSRProxy struct {
vip net.IP
vipInterface *net.Interface
backendIP net.IP
backendMAC net.HardwareAddr
calicoInterface *net.Interface // calico interface for backend IP
configChanged bool
pcapHandle *pcap.Handle
responseConn *raw.Conn
backendConn *raw.Conn
closed bool
mu sync.Mutex
stopCh chan struct{}
requestPortMap *sync.Map // map[uint16]uint16
}
func NewDSRProxy() *DSRProxy {
return &DSRProxy{
stopCh: make(chan struct{}),
requestPortMap: new(sync.Map),
}
}
func (d *DSRProxy) WithVIP(vip string, intf string) error {
d.mu.Lock()
defer d.mu.Unlock()
var err error
if d.vip != nil && d.vip.String() == vip &&
d.vipInterface != nil && d.vipInterface.Name == intf {
return nil
}
d.configChanged = true
d.vip = net.ParseIP(vip)
d.vipInterface, err = net.InterfaceByName(intf)
if err != nil {
klog.Error("parse VIP interface failed:", err)
return err
}
return nil
}
func (d *DSRProxy) WithBackend(backendIP string, backendMAC string) error {
d.mu.Lock()
defer d.mu.Unlock()
var err error
if d.backendIP != nil && d.backendIP.String() == backendIP &&
d.backendMAC != nil && d.backendMAC.String() == backendMAC {
return nil
}
d.configChanged = true
d.backendIP = net.ParseIP(backendIP)
d.backendMAC, err = net.ParseMAC(backendMAC)
if err != nil {
klog.Error("parse backend MAC failed:", err)
return err
}
return nil
}
func (d *DSRProxy) WithCalicoInterface(intf string) error {
d.mu.Lock()
defer d.mu.Unlock()
var err error
if d.calicoInterface != nil && d.calicoInterface.Name == intf {
return nil
}
d.configChanged = true
d.calicoInterface, err = net.InterfaceByName(intf)
if err != nil {
klog.Error("parse calico interface failed:", err)
return err
}
return nil
}
func (d *DSRProxy) Close() {
if d.pcapHandle != nil {
d.pcapHandle.Close()
d.pcapHandle = nil
}
if d.responseConn != nil {
d.responseConn.Close()
d.responseConn = nil
}
if d.backendConn != nil {
d.backendConn.Close()
d.backendConn = nil
}
d.closed = true
}
func (d *DSRProxy) Stop() error {
d.mu.Lock()
defer d.mu.Unlock()
if !d.closed {
d.Close()
}
close(d.stopCh)
return nil
}
func (d *DSRProxy) start() error {
if err := func() error {
d.mu.Lock()
defer d.mu.Unlock()
if d.pcapHandle == nil || d.responseConn == nil || d.backendConn == nil {
return errors.New("dsr proxy not configured")
}
return nil
}(); err != nil {
return err
}
log.Printf("Will send requests via: %s, responses via: %s", d.calicoInterface.Name, d.vipInterface.Name)
packetSource := gopacket.NewPacketSource(d.pcapHandle, d.pcapHandle.LinkType())
packets := packetSource.Packets()
log.Println("start dsr proxy on", d.vipInterface.Name, "vip", d.vip)
for {
select {
case p, ok := <-packets:
if !ok {
klog.Error("read packets failed")
return errors.New("read packets error")
}
// raw packet bytes
data := p.Data()
// safety
if len(data) < 14 {
continue
}
// Determine if this is a request (to VIP) or response (from backend)
isResponse := false
if len(data) >= 14+20 {
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType == 0x0800 { // IPv4
ipStart := 14
srcIP := net.IP(data[ipStart+12 : ipStart+16])
dstIP := net.IP(data[ipStart+16 : ipStart+20])
protocol := data[ipStart+9]
// Check if this is a response from backend (direct or NAT'd)
// Case 1: Direct response from backend IP
if srcIP.Equal(d.backendIP) {
isResponse = true
log.Printf("=== RESPONSE PACKET from backend %s (direct) ===", d.backendIP)
break
}
// Case 2: NAT'd response from VIP with wrong source port
// This is UDP from VIP but destination port is not 53
if !isResponse && srcIP.Equal(d.vip) && protocol == 17 {
// Check UDP header
verIhl := data[ipStart]
ihl := int(verIhl & 0x0f)
ipHeaderLen := ihl * 4
if len(data) >= ipStart+ipHeaderLen+8 {
udpStart := ipStart + ipHeaderLen
srcPort := binary.BigEndian.Uint16(data[udpStart : udpStart+2])
dstPort := binary.BigEndian.Uint16(data[udpStart+2 : udpStart+4])
// If source is VIP, it's UDP, and source port is NOT 53
// but destination port suggests this is a DNS response (>1024)
// This is likely a NAT'd DNS response that we need to fix
if srcPort != 53 && dstPort > 1024 {
if _, ok := d.requestPortMap.Load(dstPort); !ok {
continue
}
d.requestPortMap.Delete(dstPort)
isResponse = true
log.Printf("=== RESPONSE PACKET from VIP (NAT'd, fixing port %d->53) ===", srcPort)
}
}
}
if !isResponse {
if dstIP.Equal(d.vip) {
log.Printf("=== REQUEST PACKET to VIP %s ===", d.vip)
} else {
continue
}
}
}
}
// Handle response packets (from backend to client)
if isResponse {
d.handleResponse(data, d.responseConn)
continue
}
// Skip packets that are already destined to backend MAC
// (these are packets we've already modified and re-injected)
// This prevents forwarding loops
if bytes.Equal(data[0:6], d.backendMAC) {
log.Printf("Skipping: packet already forwarded to backend MAC")
continue
}
log.Printf("Intercepted packet: src=%s, dst=%s, len=%d",
net.HardwareAddr(data[6:12]), net.HardwareAddr(data[0:6]), len(data))
// Debug: Print original packet details
if klog.V(8).Enabled() {
if len(data) >= 14+20 {
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType == 0x0800 {
ipStart := 14
srcIP := net.IP(data[ipStart+12 : ipStart+16])
dstIP := net.IP(data[ipStart+16 : ipStart+20])
oldChecksum := binary.BigEndian.Uint16(data[ipStart+10 : ipStart+12])
log.Printf("BEFORE: src_ip=%s, dst_ip=%s, ip_checksum=0x%04x", srcIP, dstIP, oldChecksum)
// Print first 20 bytes of IP header in hex
log.Printf("BEFORE IP header (hex): % x", data[ipStart:ipStart+20])
}
}
}
// Rewrite ethernet header: set destination MAC to backend container MAC
// Source MAC will be the send interface MAC (Calico veth host side)
copy(data[0:6], d.backendMAC) // dst = container MAC
copy(data[6:12], d.calicoInterface.HardwareAddr) // src = Calico veth host side MAC
// rewrite IP destination address (critical for backend to accept the packet)
if len(data) >= 14+20 {
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType == 0x0800 { // IPv4
ipStart := 14
verIhl := data[ipStart]
ihl := int(verIhl & 0x0f)
ipHeaderLen := ihl * 4
if ipHeaderLen >= 20 && len(data) >= ipStart+ipHeaderLen {
// Get protocol
protocol := data[ipStart+9]
// Replace destination IP with backend IP
oldDstIP := make([]byte, 4)
copy(oldDstIP, data[ipStart+16:ipStart+20])
srcIP := net.IP(data[ipStart+12 : ipStart+16])
copy(data[ipStart+16:ipStart+20], d.backendIP.To4())
log.Printf("Rewriting IP: src=%s, dst=%s->%s, proto=%d",
srcIP, net.IP(oldDstIP), d.backendIP, protocol)
// Recalculate IP checksum
data[ipStart+10] = 0
data[ipStart+11] = 0
csum := ipv4Checksum(data[ipStart : ipStart+ipHeaderLen])
binary.BigEndian.PutUint16(data[ipStart+10:ipStart+12], csum)
log.Printf("New IP checksum: 0x%04x", csum)
// For UDP (protocol 17), recalculate UDP checksum
if protocol == 17 && len(data) >= ipStart+ipHeaderLen+8 {
udpStart := ipStart + ipHeaderLen
// UDP checksum is optional for IPv4, can be set to 0
// But if present, we need to update it
oldChecksum := binary.BigEndian.Uint16(data[udpStart+6 : udpStart+8])
if oldChecksum != 0 {
// For simplicity, set UDP checksum to 0 (valid for IPv4)
data[udpStart+6] = 0
data[udpStart+7] = 0
log.Printf("UDP checksum set to 0 (was 0x%04x)", oldChecksum)
}
}
}
}
}
// Debug: Print modified packet details
if klog.V(8).Enabled() {
if len(data) >= 14+20 {
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType == 0x0800 {
ipStart := 14
srcIP := net.IP(data[ipStart+12 : ipStart+16])
dstIP := net.IP(data[ipStart+16 : ipStart+20])
newChecksum := binary.BigEndian.Uint16(data[ipStart+10 : ipStart+12])
log.Printf("AFTER: src_ip=%s, dst_ip=%s, ip_checksum=0x%04x", srcIP, dstIP, newChecksum)
// Print first 20 bytes of IP header in hex
log.Printf("AFTER IP header (hex): % x", data[ipStart:ipStart+20])
}
}
}
// Extract UDP source port for tracking
if len(data) >= 14+20 {
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType == 0x0800 { // IPv4
ipStart := 14
verIhl := data[ipStart]
ihl := int(verIhl & 0x0f)
ipHeaderLen := ihl * 4
protocol := data[ipStart+9]
// For UDP (protocol 17), extract source port
if protocol == 17 && len(data) >= ipStart+ipHeaderLen+2 {
udpStart := ipStart + ipHeaderLen
srcPort := binary.BigEndian.Uint16(data[udpStart : udpStart+2])
d.requestPortMap.Store(srcPort, 1)
}
}
}
// send modified frame
log.Printf("Forwarding to backend: MAC=%s, IP=%s", d.backendMAC, d.backendIP)
// If the frame is larger than the interface MTU + ethernet header,
// attempt IPv4 fragmentation and send fragments. For non-IPv4
// frames we can't fragment at L2, so skip them.
maxFrame := d.vipInterface.MTU + 14 // interface MTU (IP payload + IP header must fit in MTU) + ethernet header
if len(data) > maxFrame {
frags, err := fragmentIPv4(data, d.vipInterface.MTU)
if err != nil {
log.Printf("fragment error: %v, skipping frame (len=%d, max=%d)", err, len(data), maxFrame)
continue
}
addr := &raw.Addr{HardwareAddr: d.backendMAC}
for _, f := range frags {
if _, err := d.backendConn.WriteTo(f, addr); err != nil {
log.Printf("writeto err: %v", err)
}
}
continue
}
addr := &raw.Addr{HardwareAddr: d.backendMAC}
if _, err := d.backendConn.WriteTo(data, addr); err != nil {
log.Printf("writeto err: %v", err)
}
case <-d.stopCh:
log.Println("stopping")
return nil
}
}
}
func (d *DSRProxy) Start() error {
go func() {
var done bool
for !done {
if err := d.start(); err != nil {
time.Sleep(10 * time.Second)
} else {
done = true
}
}
}()
return nil
}
// handleResponse processes response packets from backend, rewriting source IP back to VIP
func (d *DSRProxy) handleResponse(data []byte, conn net.PacketConn) {
if len(data) < 14+20 {
return
}
ethType := binary.BigEndian.Uint16(data[12:14])
if ethType != 0x0800 { // Only handle IPv4
return
}
ipStart := 14
verIhl := data[ipStart]
ihl := int(verIhl & 0x0f)
ipHeaderLen := ihl * 4
if ipHeaderLen < 20 || len(data) < ipStart+ipHeaderLen {
return
}
srcIP := net.IP(data[ipStart+12 : ipStart+16])
dstIP := net.IP(data[ipStart+16 : ipStart+20])
protocol := data[ipStart+9]
log.Printf("Response BEFORE: src_ip=%s, dst_ip=%s, proto=%d", srcIP, dstIP, protocol)
// Rewrite source IP from backend IP to VIP (if needed)
if !srcIP.Equal(d.vip) {
copy(data[ipStart+12:ipStart+16], d.vip.To4())
log.Printf("Response: Rewriting src_ip %s -> %s", srcIP, d.vip)
}
// Fix UDP source port if it's not 53
if protocol == 17 && len(data) >= ipStart+ipHeaderLen+8 {
udpStart := ipStart + ipHeaderLen
srcPort := binary.BigEndian.Uint16(data[udpStart : udpStart+2])
if srcPort != 53 {
log.Printf("Response: Fixing UDP src_port %d -> 53", srcPort)
binary.BigEndian.PutUint16(data[udpStart:udpStart+2], 53)
}
// Set UDP checksum to 0 (optional for IPv4)
data[udpStart+6] = 0
data[udpStart+7] = 0
}
log.Printf("Response AFTER: src_ip=%s, dst_ip=%s", d.vip, dstIP)
// Recalculate IP checksum
data[ipStart+10] = 0
data[ipStart+11] = 0
csum := ipv4Checksum(data[ipStart : ipStart+ipHeaderLen])
binary.BigEndian.PutUint16(data[ipStart+10:ipStart+12], csum)
// Get destination MAC from original packet (client's MAC)
// The packet is already set up correctly for L2 routing back to client
// Just send it via the main interface
// Send back via main interface
addr := &raw.Addr{HardwareAddr: net.HardwareAddr(data[0:6])}
if _, err := conn.WriteTo(data, addr); err != nil {
log.Printf("response writeto err: %v", err)
} else {
log.Printf("Response sent back to client MAC=%s", net.HardwareAddr(data[0:6]))
}
}
func (d *DSRProxy) regonfigure() error {
d.mu.Lock()
defer d.mu.Unlock()
if !d.configChanged {
return nil
}
if !d.closed {
d.Close()
}
klog.Info("reconfigure DSR proxy")
klog.Infof("VIP: %s on interface %s", d.vip.String(), d.vipInterface.Name)
klog.Infof("Backend: %s with MAC %s", d.backendIP.String(), d.backendMAC.String())
klog.Infof("Calico interface: %s", d.calicoInterface.Name)
var err error
d.pcapHandle, err = pcap.OpenLive(d.vipInterface.Name, 65536, false, pcap.BlockForever)
if err != nil {
klog.Error("pcap openlive failed:", err)
return err
}
bpf := fmt.Sprintf("(dst host %s and dst port 53) or (src host %s and udp)",
d.vip.String(), d.vip.String())
if err := d.pcapHandle.SetBPFFilter(bpf); err != nil {
klog.Errorf("error: set bpf failed: %v", err)
return err
}
d.backendConn, err = raw.ListenPacket(d.calicoInterface, syscall.ETH_P_ALL, nil)
if err != nil {
klog.Errorf("raw listen on send interface: %v", err)
return err
}
d.responseConn, err = raw.ListenPacket(d.vipInterface, syscall.ETH_P_ALL, nil)
if err != nil {
klog.Errorf("raw listen on response interface: %v", err)
return err
}
d.closed = false
d.configChanged = false
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
@@ -24,6 +25,7 @@ var WSKey = key{}
type proxyServer struct {
proxy *echo.Echo
dnsServer string
stopped bool
}
func NewProxyServer() (*proxyServer, error) {
@@ -57,13 +59,28 @@ func (p *proxyServer) Start() error {
p.proxy.Use(
func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if strings.HasSuffix(c.Request().Host, ".olares.local") {
if c.IsWebSocket() {
ctx := c.Request().Context()
ctx = context.WithValue(ctx, WSKey, true)
r := c.Request().WithContext(ctx)
c.SetRequest(r)
if strings.HasSuffix(c.Request().Host, ".olares.local") ||
strings.HasSuffix(c.Request().Host, "-olares.local") {
ctx := c.Request().Context()
clientIp := ""
if ra := c.Request().RemoteAddr; ra != "" {
if h, p, err := net.SplitHostPort(ra); err == nil {
klog.Info("Intranet request from ", h, ":", p)
ctx = context.WithValue(ctx, proxyInfoCtxKey, proxyInfo{
SrcIP: h,
SrcPort: p,
})
clientIp = h
}
}
if c.IsWebSocket() {
ctx = context.WithValue(ctx, WSKey, true)
}
r := c.Request().WithContext(ctx)
if clientIp != "" {
r.Header.Set("X-Forwarded-For", clientIp)
}
c.SetRequest(r)
return next(c)
}
@@ -76,9 +93,14 @@ func (p *proxyServer) Start() error {
p.proxy.Use(middleware.ProxyWithConfig(config))
go func() {
err := p.proxy.Start(":80")
if err != nil {
klog.Error(err)
for !p.stopped {
p.proxy.ListenerNetwork = "tcp4"
err := p.proxy.Start("0.0.0.0:80")
if err != nil {
klog.Error(err)
}
time.Sleep(10 * time.Second)
}
}()
@@ -89,6 +111,7 @@ func (p *proxyServer) Close() error {
if p.proxy != nil {
return p.proxy.Close()
}
p.stopped = true
return nil
}
@@ -103,7 +126,25 @@ func (p *proxyServer) Next(c echo.Context) *middleware.ProxyTarget {
if c.IsWebSocket() {
scheme = "wss://"
}
proxyPass, err := url.Parse(scheme + c.Request().Host + ":443")
var (
proxyPass *url.URL
err error
)
requestHost := c.Request().Host
if strings.HasSuffix(requestHost, "-olares.local") {
// intranet request, and host parttern is appid-<username>-olares.local for windows and linux client
tokens := strings.Split(requestHost, "-")
if len(tokens) < 3 {
klog.Error("invalid intranet request host, ", requestHost)
return nil
}
requestHost = strings.Join(tokens, ".")
c.Request().Host = requestHost
proxyPass, err = url.Parse(scheme + requestHost + ":444")
} else {
proxyPass, err = url.Parse(scheme + c.Request().Host + ":444")
}
if err != nil {
klog.Error("parse proxy target error, ", err)
return nil
@@ -133,28 +174,67 @@ func (p *proxyServer) initTransport() http.RoundTripper {
return transport
}
type ctxKey string
const proxyInfoCtxKey ctxKey = "proxy-info"
type proxyInfo struct {
SrcIP string
SrcPort string
}
func (p *proxyServer) customDialContext(d *net.Dialer) func(ctx context.Context, network, addr string) (net.Conn, error) {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
_, port, _ := net.SplitHostPort(addr)
// Force proxying to localhost
klog.Info("addr: ", addr, " port: ", port, " network: ", network)
if port == "" {
port = "443"
port = "444"
}
newAddr := net.JoinHostPort("127.0.0.1", port)
hostname, err := os.Hostname()
if err != nil {
klog.Error("get hostname error, ", err)
hostname = "localhost"
} else {
hostname = hostname + ".cluster.local"
}
newAddr := net.JoinHostPort(hostname, port)
isWs := false
if v := ctx.Value(WSKey); v != nil {
isWs = v.(bool)
}
proxyDial := func(ctx context.Context, netDialer *net.Dialer, network, addr string) (net.Conn, error) {
conn, err := netDialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
if v := ctx.Value(proxyInfoCtxKey); v != nil {
if pi, ok := v.(proxyInfo); ok {
dstIP, dstPort := addrToIPPort(conn.RemoteAddr())
family := ipFamily(pi.SrcIP, dstIP) // TCP4 or TCP6
hdr := fmt.Sprintf("PROXY %s %s %s %s %s\r\n", family, pi.SrcIP, dstIP, pi.SrcPort, dstPort)
if _, werr := conn.Write([]byte(hdr)); werr != nil {
klog.Error("failed to write PROXY header: ", werr)
conn.Close()
return nil, werr
}
}
}
return conn, nil
}
if isWs {
klog.Info("WebSocket connection detected, using upgraded dialer")
return tlsDial(ctx, d, func(ctx context.Context, network, addr string) (net.Conn, error) {
return d.DialContext(ctx, network, newAddr)
return proxyDial(ctx, d, network, newAddr)
}, network, addr, &tls.Config{InsecureSkipVerify: true})
}
return d.DialContext(ctx, network, newAddr)
return proxyDial(ctx, d, network, newAddr)
}
}
@@ -210,3 +290,31 @@ func tlsDial(ctx context.Context, netDialer *net.Dialer, dialFunc func(ctx conte
}
return conn, nil
}
// addrToIPPort extracts ip and port strings from net.Addr (like "ip:port").
// Returns "0.0.0.0","0" on failure.
func addrToIPPort(a net.Addr) (string, string) {
if a == nil {
return "0.0.0.0", "0"
}
s := a.String()
if h, p, err := net.SplitHostPort(s); err == nil {
return h, p
}
// fallback: maybe already an IP
return s, "0"
}
// ipFamily returns "TCP4" if either IP is IPv4, else "TCP6".
// If parsing fails, default to TCP4 to maximize compatibility.
func ipFamily(a, b string) string {
ipa := net.ParseIP(strings.TrimSpace(a))
ipb := net.ParseIP(strings.TrimSpace(b))
if ipa != nil && ipa.To4() == nil {
return "TCP6"
}
if ipb != nil && ipb.To4() == nil {
return "TCP6"
}
return "TCP4"
}

View File

@@ -1,15 +1,25 @@
package intranet
import "k8s.io/klog/v2"
import (
"fmt"
"k8s.io/klog/v2"
)
type Server struct {
dnsServer *mDNSServer
proxyServer *proxyServer
dsrProxy *DSRProxy
started bool
}
type ServerOptions struct {
Hosts []DNSConfig
Hosts []DNSConfig
NodeIp string
NodeIface string
DnsPodIp string
DnsPodMac string
DnsPodCalicoIface string
}
func (s *Server) Close() {
@@ -25,6 +35,10 @@ func (s *Server) Close() {
s.proxyServer.Close()
}
if s.dsrProxy != nil {
s.dsrProxy.Stop()
}
s.started = false
klog.Info("Intranet server closed")
}
@@ -43,6 +57,7 @@ func NewServer() (*Server, error) {
return &Server{
dnsServer: dnsServer,
proxyServer: proxyServer,
dsrProxy: NewDSRProxy(),
}, nil
}
@@ -72,21 +87,66 @@ func (s *Server) Start(o *ServerOptions) error {
}
}
if s.dsrProxy != nil {
err := s.dsrProxy.Start()
if err != nil {
klog.Error("start intranet dsr proxy error, ", err)
return err
}
}
s.started = true
klog.Info("Intranet server started")
return nil
}
func (s *Server) Reload(o *ServerOptions) error {
var errs []error
if s.dnsServer != nil {
s.dnsServer.SetHosts(o.Hosts, false)
err := s.dnsServer.StartAll()
if err != nil {
klog.Error("reload intranet dns server error, ", err)
return err
errs = append(errs, err)
}
}
if s.dsrProxy != nil {
err := s.dsrProxy.WithBackend(o.DnsPodIp, o.DnsPodMac)
if err != nil {
klog.Error("reload dns dsr proxy error, ", err)
errs = append(errs, err)
}
if err == nil {
err = s.dsrProxy.WithCalicoInterface(o.DnsPodCalicoIface)
if err != nil {
klog.Error("reload dns dsr proxy backend interfaces error, ", err)
errs = append(errs, err)
}
}
if err == nil {
err = s.dsrProxy.WithVIP(o.NodeIp, o.NodeIface)
if err != nil {
klog.Error("reload dns dsr proxy vip interface error, ", err)
errs = append(errs, err)
}
}
if err == nil {
err = s.dsrProxy.regonfigure()
if err != nil {
klog.Error("reload dns dsr proxy regonfigure error, ", err)
errs = append(errs, err)
}
}
}
if len(errs) > 0 {
return fmt.Errorf("reload intranet server with %d errors", len(errs))
}
klog.Info("Intranet server reloaded")
return nil
}

View File

@@ -3,12 +3,17 @@ package intranet
import (
"context"
"fmt"
"os/exec"
"strings"
"github.com/beclab/Olares/daemon/internel/intranet"
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/nets"
"github.com/beclab/Olares/daemon/pkg/utils"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)
@@ -38,7 +43,7 @@ func (w *applicationWatcher) Watch(ctx context.Context) {
return
}
_, _, role, err := utils.GetThisNodeName(ctx, client)
_, nodeIp, role, err := utils.GetThisNodeName(ctx, client)
if err != nil {
klog.Error("failed to get this node role: ", err)
return
@@ -59,7 +64,7 @@ func (w *applicationWatcher) Watch(ctx context.Context) {
}
o, err := w.loadServerConfig(ctx)
o, err := w.loadServerConfig(ctx, nodeIp)
if err != nil {
klog.Error("load intranet server config error, ", err)
return
@@ -85,7 +90,7 @@ func (w *applicationWatcher) Watch(ctx context.Context) {
}
}
func (w *applicationWatcher) loadServerConfig(ctx context.Context) (*intranet.ServerOptions, error) {
func (w *applicationWatcher) loadServerConfig(ctx context.Context, nodeIp string) (*intranet.ServerOptions, error) {
if w.intranetServer == nil {
klog.Warning("intranet server is nil")
return nil, nil
@@ -154,10 +159,105 @@ func (w *applicationWatcher) loadServerConfig(ctx context.Context) (*intranet.Se
}
}
nodeIface, err := nets.GetInterfaceByIp(nodeIp)
if err != nil {
klog.Error("get node interface by ip error, ", err)
return nil, err
}
options := &intranet.ServerOptions{
Hosts: hosts,
Hosts: hosts,
NodeIp: nodeIp,
NodeIface: nodeIface.Name,
}
err = w.loadDnsPodConfig(ctx, options)
if err != nil {
klog.Error("load dns pod config error, ", err)
return nil, err
}
// reload intranet server config
return options, nil
}
func (w *applicationWatcher) loadDnsPodConfig(ctx context.Context, o *intranet.ServerOptions) error {
// try to find adguard dns pod ip and mac
k8sClient, err := utils.GetKubeClient()
if err != nil {
klog.Error("get kube client error, ", err)
return err
}
dnsPods, err := k8sClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
if err != nil {
klog.Error("list pods error, ", err)
return err
}
var dnsPodIp, dnsPodMac, calicoRouteIface string
const adguardDnsAppLabel = "applications.app.bytetrade.io/name"
for _, pod := range dnsPods.Items {
switch {
case pod.Labels[adguardDnsAppLabel] == "adguardhome", pod.Labels["k8s-app"] == "kube-dns":
dnsPodIp = pod.Status.PodIP
dnsPodMac, calicoRouteIface, err = getPodNeighborInfo(dnsPodIp)
if err != nil {
klog.Error("get adguard dns pod mac by ip error, ", err)
return err
}
}
if pod.Labels[adguardDnsAppLabel] == "adguardhome" {
o.DnsPodIp = dnsPodIp
o.DnsPodMac = dnsPodMac
o.DnsPodCalicoIface = calicoRouteIface
return nil
}
}
// not found adguard dns pod, but core dns pod exists
if dnsPodIp != "" {
o.DnsPodIp = dnsPodIp
o.DnsPodMac = dnsPodMac
o.DnsPodCalicoIface = calicoRouteIface
}
return nil
}
func getPodNeighborInfo(podIp string) (mac, iface string, err error) {
// family: unix.AF_INET for IPv4, unix.AF_INET6 for IPv6
neighs, err := netlink.NeighList(0, unix.AF_INET) // 0 => all links
if err != nil {
klog.Error("list neighbor error, ", err)
return
}
for _, n := range neighs {
if n.IP.String() == podIp {
mac = n.HardwareAddr.String()
if mac == "<nil>" {
mac = ""
}
if link, err := netlink.LinkByIndex(n.LinkIndex); err == nil {
iface = link.Attrs().Name
}
return
}
}
// try to refresh neighbor table
go func() {
cmd := exec.Command("ping", "-c", "3", podIp)
err := cmd.Run()
if err != nil {
klog.Error("ping pod ip to refresh neighbor table error, ", err)
return
}
}()
return "", "", fmt.Errorf("not found pod neighbor info for ip %s", podIp)
}

View File

@@ -8,11 +8,11 @@ import (
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/beclab/Olares/daemon/pkg/containerd"
"github.com/beclab/Olares/daemon/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
)
@@ -52,12 +52,6 @@ func (w *systemEnvWatcher) Watch(ctx context.Context) {
return
}
dc, err := utils.GetDynamicClient()
if err != nil {
klog.V(4).Infof("systemenv watcher: dynamic client not ready: %v", err)
return
}
execCtx, cancel := context.WithCancel(ctx)
w.cancel = cancel
w.running = true
@@ -69,7 +63,7 @@ func (w *systemEnvWatcher) Watch(ctx context.Context) {
klog.V(4).Info("systemenv watcher exited")
}()
startSystemEnvWatch(execCtx, dc, func(eventType watch.EventType, obj map[string]any) {
startSystemEnvWatch(execCtx, func(eventType watch.EventType, obj map[string]any) {
klog.V(5).Infof("systemenv event: %s", eventType)
if eventType != watch.Added && eventType != watch.Modified {
@@ -77,7 +71,7 @@ func (w *systemEnvWatcher) Watch(ctx context.Context) {
}
envName, _ := obj["envName"].(string)
if envName != "OLARES_SYSTEM_CDN_SERVICE" && envName != "OLARES_SYSTEM_REMOTE_SERVICE" {
if envName != "OLARES_SYSTEM_CDN_SERVICE" && envName != "OLARES_SYSTEM_REMOTE_SERVICE" && envName != "OLARES_SYSTEM_DOCKERHUB_SERVICE" {
return
}
@@ -104,6 +98,19 @@ func (w *systemEnvWatcher) Watch(ctx context.Context) {
commands.OLARES_REMOTE_SERVICE = val
klog.Infof("updated OLARES_REMOTE_SERVICE: %s -> %s", old, val)
}
case "OLARES_SYSTEM_DOCKERHUB_SERVICE":
if val != "" {
go func(endpoint string) {
if updated, err := containerd.EnsureRegistryMirror(execCtx, containerd.DefaultRegistryName, endpoint); err != nil {
klog.Errorf("failed to ensure docker.io mirror endpoint %s: %v", endpoint, err)
return
} else if updated {
klog.Infof("ensured docker.io mirror endpoint: %s", endpoint)
} else {
klog.V(5).Infof("docker.io mirror endpoint already present: %s", endpoint)
}
}(val)
}
}
})
}()
@@ -115,9 +122,14 @@ var systemEnvGVR = schema.GroupVersionResource{
Resource: "systemenvs",
}
func startSystemEnvWatch(ctx context.Context, dc dynamic.Interface, handle func(watch.EventType, map[string]any)) {
func startSystemEnvWatch(ctx context.Context, handle func(watch.EventType, map[string]any)) {
for {
// 1) List existing resources to establish initial state
dc, err := utils.GetDynamicClient()
if err != nil {
klog.V(4).Infof("systemenv watcher: dynamic client not ready: %v", err)
return
}
list, err := dc.Resource(systemEnvGVR).List(ctx, metav1.ListOptions{})
if err != nil {
select {

View File

@@ -136,22 +136,21 @@ func CheckCurrentStatus(ctx context.Context) error {
osType, osInfo, osArch, osVersion, _, err := GetMachineInfo(ctx)
if err != nil {
klog.Error("get machine info from terminus cli error, ", err)
return err
}
diskSize, err := utils.GetDiskSize()
diskSize, err := utils.GetNodeFilesystemTotalSize()
if err != nil {
return err
klog.Error("get node filesystem total size error, ", err)
}
gpu, err := utils.GetGpuInfo()
if err != nil {
return err
klog.Error("get gpu info error, ", err)
}
hostname, err := os.Hostname()
if err != nil {
return err
klog.Error("get hostname error, ", err)
}
CurrentState.OsArch = osArch

View File

@@ -258,7 +258,7 @@ type IpChangingValidator struct{}
func (i IpChangingValidator) ValidateOp(op commands.Interface) error {
switch op.OperationName() {
case commands.Reboot, commands.Shutdown, commands.Uninstall, commands.SetSSHPassword:
case commands.Reboot, commands.Shutdown, commands.SetSSHPassword:
return nil
}

View File

@@ -1,7 +1,9 @@
package containerd
import (
"context"
"fmt"
"net/url"
"strings"
"github.com/containerd/containerd/reference"
@@ -48,6 +50,77 @@ func GetRegistryMirror(ctx *fiber.Ctx) (*Mirror, error) {
return &mirror, nil
}
// EnsureRegistryMirror ensures the given endpoint is the first entry for the registry mirror.
// Returns updated=true if configuration changed and was persisted (containerd restarted),
// or updated=false if it was already first. Returns error on failure.
func EnsureRegistryMirror(ctx context.Context, registry string, endpoint string) (bool, error) {
if registry == "" {
registry = DefaultRegistryName
}
endpoint = strings.TrimSpace(endpoint)
if endpoint == "" {
return false, fmt.Errorf("endpoint is required")
}
u, err := url.ParseRequestURI(endpoint)
if err != nil || u == nil || u.Host == "" || (u.Scheme != "http" && u.Scheme != "https") {
return false, fmt.Errorf("invalid mirror endpoint: %s", endpoint)
}
endpoint = u.String()
config, err := getConfig()
if err != nil {
return false, err
}
criPluginConfig, err := getCRIPluginConfig(config)
if err != nil {
return false, err
}
if criPluginConfig.Registry.Mirrors == nil {
criPluginConfig.Registry.Mirrors = make(map[string]Mirror)
}
mirror := criPluginConfig.Registry.Mirrors[registry]
// Build a new endpoints list with the ensured endpoint at the front
// and without duplicates (preserving the order of the remaining items).
original := mirror.Endpoints
var others []string
for _, ep := range original {
if ep != endpoint {
others = append(others, ep)
}
}
newEndpoints := make([]string, 0, 1+len(others))
newEndpoints = append(newEndpoints, endpoint)
newEndpoints = append(newEndpoints, others...)
// If nothing changes (already first and unique), no update needed.
equal := len(original) == len(newEndpoints)
if equal {
for i := range original {
if original[i] != newEndpoints[i] {
equal = false
break
}
}
}
if equal {
return false, nil
}
mirror.Endpoints = newEndpoints
criPluginConfig.Registry.Mirrors[registry] = mirror
if err := updateCRIPluginConfig(config, criPluginConfig); err != nil {
return false, err
}
if err := restartContainerd(ctx); err != nil {
klog.Errorf("failed to restart containerd: %v", err)
return false, err
}
return true, nil
}
func UpdateRegistryMirror(ctx *fiber.Ctx) (*Mirror, error) {
registry := ctx.Params(ParamRegistryName)
if registry == "" {

View File

@@ -1,20 +1,20 @@
package utils
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
syscall "golang.org/x/sys/unix"
"k8s.io/klog/v2"
)
func GetDiskSize() (uint64, error) {
fs := syscall.Statfs_t{}
err := syscall.Statfs("/", &fs)
if err != nil {
klog.Error("get disk space size error, ", err)
return 0, err
}
size := fs.Blocks * uint64(fs.Bsize)
return size, nil
return GetDiskTotalBytesForPath("/")
}
func GetDiskAvailableSpace(path string) (uint64, error) {
@@ -28,3 +28,128 @@ func GetDiskAvailableSpace(path string) (uint64, error) {
available := fs.Bavail * uint64(fs.Bsize)
return available, nil
}
// Find the mount device for a given path (from /proc/mounts), choose the longest matching mount point
func deviceForPath(path string) (string, error) {
f, err := os.Open("/proc/mounts")
if err != nil {
return "", err
}
defer f.Close()
var bestDevice, bestMount string
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
// /proc/mounts: device mountpoint fs ... (space-separated, mountpoint may have \040 etc. escapes)
fields := strings.Fields(line)
if len(fields) < 2 {
continue
}
device := fields[0]
mount := fields[1]
// Handle escaped spaces (simple processing)
mount = strings.ReplaceAll(mount, "\\040", " ")
// Choose the longest matching mount prefix (to prevent nested mounts)
if strings.HasPrefix(path, mount) {
if len(mount) > len(bestMount) {
bestMount = mount
bestDevice = device
}
}
}
if bestDevice == "" {
return "", fmt.Errorf("no device found for path %s", path)
}
return bestDevice, nil
}
// Given a device path (e.g. /dev/sda1), find the top-level block device name (e.g. sda)
func topBlockDeviceName(devPath string) (string, error) {
name := filepath.Base(devPath) // e.g. sda1, nvme0n1p1, dm-0, mapper/xxx -> basename
// Handle LVM devices specially - they may not exist in /sys/class/block
if strings.HasPrefix(devPath, "/dev/mapper/") {
// For LVM devices, try to find the underlying physical device
// Check if it's a symlink to a dm-* device
if realPath, err := filepath.EvalSymlinks(devPath); err == nil {
if strings.HasPrefix(realPath, "/dev/dm-") {
// Use the dm-* device name directly
return filepath.Base(realPath), nil
}
}
// If we can't resolve the LVM device, return the original name
// This will cause diskSizeBySysfs to fail gracefully
return name, nil
}
sysPath := filepath.Join("/sys/class/block", name)
real, err := filepath.EvalSymlinks(sysPath)
if err != nil {
// Sometimes device paths may not be /dev/* (e.g. UUID paths), return error when lookup fails with basename directly
return "", err
}
// real might be .../block/sda/sda1, taking parent directory name gives us the top-level device sda
parent := filepath.Base(filepath.Dir(real))
// If parent equals name (no parent), then parent is itself
if parent == "" {
parent = name
}
return parent, nil
}
// Read /sys/class/block/<dev>/size (in sectors), multiply by 512 to get bytes
func diskSizeBySysfs(topDev string) (uint64, error) {
sizePath := filepath.Join("/sys/class/block", topDev, "size")
// Check if the device exists before trying to read it
if _, err := os.Stat(filepath.Join("/sys/class/block", topDev)); err != nil {
klog.V(4).Infof("Block device %s not found in /sys/class/block, skipping size calculation", topDev)
return 0, fmt.Errorf("block device %s not accessible: %w", topDev, err)
}
b, err := ioutil.ReadFile(sizePath)
if err != nil {
return 0, err
}
s := strings.TrimSpace(string(b))
sectors, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, err
}
const sectorSize = 512
return sectors * sectorSize, nil
}
// Comprehensive: given a path (mount point or path), return the total bytes of the associated physical device
func GetDiskTotalBytesForPath(path string) (uint64, error) {
abs, err := filepath.Abs(path)
if err != nil {
return 0, err
}
device, err := deviceForPath(abs)
if err != nil {
return 0, err
}
topDev, err := topBlockDeviceName(device)
if err != nil {
return 0, err
}
size, err := diskSizeBySysfs(topDev)
if err != nil {
// If sysfs method fails (e.g., for LVM devices), try alternative method
klog.V(4).Infof("Failed to get disk size via sysfs for %s, trying alternative method: %v", topDev, err)
// Try using statfs as fallback for the mount point
fs := syscall.Statfs_t{}
if statErr := syscall.Statfs(abs, &fs); statErr == nil {
total := fs.Blocks * uint64(fs.Bsize)
klog.V(4).Infof("Using statfs fallback for %s: %d bytes", abs, total)
return total, nil
}
// If both methods fail, return the original error
return 0, fmt.Errorf("failed to get disk size for device %s: %w", device, err)
}
return size, nil
}

View File

@@ -0,0 +1,156 @@
package utils
import (
"bufio"
"errors"
"fmt"
"os"
"regexp"
"strings"
unix "golang.org/x/sys/unix"
"k8s.io/klog/v2"
)
// FilesystemStat represents capacity and inode stats for a mounted filesystem.
type FilesystemStat struct {
Device string
MountPoint string
FSType string
SizeBytes uint64
FreeBytes uint64
AvailBytes uint64
Files uint64
FilesFree uint64
ReadOnly bool
}
const (
ignoredMountPointsPattern = "^/(dev|proc|sys|var/lib/docker/.+|var/lib/containerd/.+|var/lib/kubelet/.+)($|/)"
ignoredFSTypesPattern = "^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"
)
func GetNodeFilesystemTotalSize() (uint64, error) {
stats, err := GetFilesystemStats()
if err != nil {
return 0, fmt.Errorf("unable to get filesystem stats: %w", err)
}
var totalSize uint64
filteredStats := make(map[string]FilesystemStat)
for _, stat := range stats {
if !strings.HasPrefix(stat.Device, "/dev") || strings.HasPrefix(stat.Device, "/dev/loop") {
continue
}
if _, ok := filteredStats[stat.Device]; ok {
continue
}
filteredStats[stat.Device] = stat
}
for _, stat := range filteredStats {
totalSize += stat.SizeBytes
}
return totalSize, nil
}
// GetFilesystemStats returns filesystem stats for all mounts, filtered by built-in regex patterns.
func GetFilesystemStats() ([]FilesystemStat, error) {
mounts, err := readMountInfo()
if err != nil {
return nil, fmt.Errorf("unable to read mount info: %w", err)
}
mpFilter, err := regexp.Compile(ignoredMountPointsPattern)
if err != nil {
return nil, fmt.Errorf("invalid built-in mount points regex: %w", err)
}
fsFilter, err := regexp.Compile(ignoredFSTypesPattern)
if err != nil {
return nil, fmt.Errorf("invalid built-in fs types regex: %w", err)
}
var out []FilesystemStat
for _, m := range mounts {
if mpFilter.MatchString(m.mountPoint) {
continue
}
if fsFilter.MatchString(m.fsType) {
continue
}
var readOnly bool
for _, opt := range strings.Split(m.options, ",") {
if opt == "ro" {
readOnly = true
break
}
}
buf := new(unix.Statfs_t)
if err := unix.Statfs(m.mountPoint, buf); err != nil {
klog.Warningf("unable to statfs mount point %q: %v", m.mountPoint, err)
continue
}
bsize := uint64(buf.Bsize)
out = append(out, FilesystemStat{
Device: m.device,
MountPoint: m.mountPoint,
FSType: m.fsType,
SizeBytes: uint64(buf.Blocks) * bsize,
FreeBytes: uint64(buf.Bfree) * bsize,
AvailBytes: uint64(buf.Bavail) * bsize,
Files: uint64(buf.Files),
FilesFree: uint64(buf.Ffree),
ReadOnly: readOnly,
})
}
return out, nil
}
type mountInfo struct {
device string
mountPoint string
fsType string
options string
}
// readMountInfo parses /proc/1/mountinfo (or falls back to self) and returns essential fields.
func readMountInfo() ([]mountInfo, error) {
file, err := os.Open("/proc/1/mountinfo")
if errors.Is(err, os.ErrNotExist) {
file, err = os.Open("/proc/self/mountinfo")
}
if err != nil {
return nil, err
}
defer file.Close()
var mounts []mountInfo
scanner := bufio.NewScanner(file)
for scanner.Scan() {
parts := strings.Fields(scanner.Text())
if len(parts) < 10 {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
}
m := 5
for parts[m+1] != "-" {
m++
}
// Unescape as per fstab: \040 (space), \011 (tab).
mountPoint := strings.ReplaceAll(parts[4], "\\040", " ")
mountPoint = strings.ReplaceAll(mountPoint, "\\011", "\t")
mounts = append(mounts, mountInfo{
device: parts[m+3],
mountPoint: mountPoint,
fsType: parts[m+2],
options: parts[5],
})
}
return mounts, scanner.Err()
}

View File

@@ -4,6 +4,8 @@ import (
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/beclab/Olares/cli/pkg/web5/jws"
"github.com/beclab/Olares/daemon/pkg/commands"
@@ -21,6 +23,9 @@ func ValidateJWS(token string) (bool, string, error) {
// Validate the JWS token with a 20-minute expiration time
checkJWS, err := jws.CheckJWS(token, 20*60*1000)
if err != nil {
if strings.HasPrefix(err.Error(), "timestamp") {
err = fmt.Errorf("%v, server time: %s", err, time.Now().UTC().Format(time.RFC3339))
}
klog.Errorf("failed to check JWS: %v, on %s", err, jws.DIDGateURL)
return false, "", err
}

Some files were not shown because too many files have changed in this diff Show More