Compare commits
365 Commits
daemon/fix
...
fix/kvrock
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
573d60c071 | ||
|
|
0b5f927034 | ||
|
|
605b862937 | ||
|
|
0110413528 | ||
|
|
0726d70b58 | ||
|
|
8abf6d8b65 | ||
|
|
b0f495c37a | ||
|
|
4e9b8d840d | ||
|
|
57579813de | ||
|
|
97dd238c44 | ||
|
|
3095530d0d | ||
|
|
3e8120baf6 | ||
|
|
0685c4326b | ||
|
|
af9e1993d1 | ||
|
|
ba8868d771 | ||
|
|
7ee1d7cae1 | ||
|
|
cb17633f57 | ||
|
|
18e94af22b | ||
|
|
b81665afe1 | ||
|
|
acb0fae406 | ||
|
|
e5fef95f4e | ||
|
|
55fe22ed4c | ||
|
|
fee742d756 | ||
|
|
36b4e792f6 | ||
|
|
8810a7657e | ||
|
|
59d87c860b | ||
|
|
8cda14a78c | ||
|
|
a4c0161cb1 | ||
|
|
505a438fa3 | ||
|
|
1a794c9fc4 | ||
|
|
03e8dd0ac7 | ||
|
|
eea2dfb67a | ||
|
|
316ffe4f35 | ||
|
|
08a380df61 | ||
|
|
58e869604a | ||
|
|
a61dff75b9 | ||
|
|
0b9c1a09b9 | ||
|
|
3178e06349 | ||
|
|
69c341060b | ||
|
|
d56daad3f0 | ||
|
|
2b239284b3 | ||
|
|
e2e8b84eef | ||
|
|
7afb59cd3a | ||
|
|
6474487e75 | ||
|
|
3fd15d418b | ||
|
|
243ad15e66 | ||
|
|
56367c964e | ||
|
|
8911b33d3e | ||
|
|
f7c7939493 | ||
|
|
8eee97f779 | ||
|
|
d3c1a37378 | ||
|
|
4a8303d050 | ||
|
|
61df0056ba | ||
|
|
75c48ef5ee | ||
|
|
4fed6bd618 | ||
|
|
581e252f30 | ||
|
|
f1d479cf1d | ||
|
|
d070e53480 | ||
|
|
89719a8d48 | ||
|
|
085bef64b5 | ||
|
|
963ca8ab48 | ||
|
|
59922bc5cf | ||
|
|
1f4b3f94ca | ||
|
|
aa9e89c0c9 | ||
|
|
760aef5521 | ||
|
|
ca1d7ebd09 | ||
|
|
a282878cfe | ||
|
|
95ad815142 | ||
|
|
984582c520 | ||
|
|
d10e6f0e20 | ||
|
|
0db6227f98 | ||
|
|
46aa153989 | ||
|
|
3cfd619d9d | ||
|
|
82e3d7d2d4 | ||
|
|
9188718cb6 | ||
|
|
7f27a03e84 | ||
|
|
202a17dd6f | ||
|
|
fe6817ff78 | ||
|
|
3991bc2e08 | ||
|
|
c84e4deded | ||
|
|
3a19d380f3 | ||
|
|
21cf7466ee | ||
|
|
9a0db453d3 | ||
|
|
3021a88e70 | ||
|
|
232c277412 | ||
|
|
d5e0523c6a | ||
|
|
03641fb388 | ||
|
|
023208603c | ||
|
|
21d10c37b3 | ||
|
|
5be2c61091 | ||
|
|
da12178933 | ||
|
|
b6484e1a19 | ||
|
|
206c946408 | ||
|
|
c57c67db24 | ||
|
|
1ed26c8264 | ||
|
|
18ece294ce | ||
|
|
2f44ae273f | ||
|
|
a6457f0a2a | ||
|
|
3f6bc2bf36 | ||
|
|
f7248a1c74 | ||
|
|
54fc939ea3 | ||
|
|
420bb1d805 | ||
|
|
39c0d2c777 | ||
|
|
d8e3a64b61 | ||
|
|
78dbda300b | ||
|
|
16440bc3c5 | ||
|
|
f5b8d226c9 | ||
|
|
a80142cdd7 | ||
|
|
e69364d329 | ||
|
|
6facfd93ee | ||
|
|
7e9b0bcdc5 | ||
|
|
bb461e8573 | ||
|
|
926058cbd0 | ||
|
|
44d56f64e1 | ||
|
|
8074e7dee9 | ||
|
|
67af7ee3fa | ||
|
|
e6b3624bae | ||
|
|
c27c8a61f1 | ||
|
|
79e6d4b6e6 | ||
|
|
ea15f6d04b | ||
|
|
dffcafbfd2 | ||
|
|
e30afb517b | ||
|
|
97a701c7e4 | ||
|
|
24c68ada0b | ||
|
|
ec5358f9b0 | ||
|
|
03bb1ab2b8 | ||
|
|
d5754b8977 | ||
|
|
8017975124 | ||
|
|
66b77ed5a1 | ||
|
|
b990d50b01 | ||
|
|
f1890e304b | ||
|
|
587ea07a61 | ||
|
|
e185931214 | ||
|
|
78fe2b29d2 | ||
|
|
9fc92b4f32 | ||
|
|
d33a8b7d31 | ||
|
|
825a05b02f | ||
|
|
6aa9b08b63 | ||
|
|
dcb2505c8e | ||
|
|
4917a2d2ab | ||
|
|
aba1d3336d | ||
|
|
7c2c68e03b | ||
|
|
ff30a31748 | ||
|
|
3d8d351996 | ||
|
|
eea8f607fa | ||
|
|
d3f357eb13 | ||
|
|
e19ef85071 | ||
|
|
1e7cc5b6ad | ||
|
|
6e4c27136a | ||
|
|
afb1e5b9f7 | ||
|
|
ed90b16fd3 | ||
|
|
2901fcfd24 | ||
|
|
c918459a8e | ||
|
|
9d3c560648 | ||
|
|
c901c54716 | ||
|
|
d925999a70 | ||
|
|
aa5aa78677 | ||
|
|
fd37490fcd | ||
|
|
d55fb76a71 | ||
|
|
ba3954dc0f | ||
|
|
faf20cdf0b | ||
|
|
6321909582 | ||
|
|
355f7c4e69 | ||
|
|
2c3c949bc9 | ||
|
|
babf756bd5 | ||
|
|
c341e22f76 | ||
|
|
0a0e52dd3d | ||
|
|
081b4064a1 | ||
|
|
9a224ea780 | ||
|
|
ab3a6ba34e | ||
|
|
2ec8300663 | ||
|
|
8762f26c04 | ||
|
|
65e50afd27 | ||
|
|
aff0b38c0b | ||
|
|
fefd635f6c | ||
|
|
a8b410a0da | ||
|
|
841b5229e6 | ||
|
|
89421058bc | ||
|
|
4d5f69e9dc | ||
|
|
8cb7ee6aad | ||
|
|
ab62c06d07 | ||
|
|
d85c81ff57 | ||
|
|
94d07adf9c | ||
|
|
3eeefb18c2 | ||
|
|
34b58757ec | ||
|
|
0df243184c | ||
|
|
99420a8a48 | ||
|
|
b013bf6ea9 | ||
|
|
1bedb4d182 | ||
|
|
f844d1221e | ||
|
|
7950d1be7d | ||
|
|
ffdeb91dcd | ||
|
|
a356b13d5a | ||
|
|
db61f05fb6 | ||
|
|
26937ab505 | ||
|
|
3dc2132e72 | ||
|
|
b50f2bbf6c | ||
|
|
16a0a5556d | ||
|
|
32166687ec | ||
|
|
db3498e0a0 | ||
|
|
2dc70ede78 | ||
|
|
694f385d2b | ||
|
|
407c126419 | ||
|
|
18746c917e | ||
|
|
01324970b4 | ||
|
|
b068669c3c | ||
|
|
bc134283d9 | ||
|
|
9f3a0f3c32 | ||
|
|
ca1ab3fef9 | ||
|
|
b6394cc39c | ||
|
|
36915f5f03 | ||
|
|
1ad305f874 | ||
|
|
58cdd7de69 | ||
|
|
4cee006a1e | ||
|
|
7bbc53bef9 | ||
|
|
1432168ec0 | ||
|
|
534ae8dd3a | ||
|
|
0a25611cf5 | ||
|
|
17990b3558 | ||
|
|
cb80d04265 | ||
|
|
0194a493ab | ||
|
|
06e49cb638 | ||
|
|
93dea60906 | ||
|
|
177f955a6b | ||
|
|
324a0b4071 | ||
|
|
132d6432cc | ||
|
|
4c51efb0b7 | ||
|
|
8f0f2e5844 | ||
|
|
0ae1524682 | ||
|
|
b24ba06794 | ||
|
|
ec6ce88e08 | ||
|
|
7839bed160 | ||
|
|
39d3689d01 | ||
|
|
ef347ff8ef | ||
|
|
908629dd9a | ||
|
|
4cea6ab238 | ||
|
|
a0e8a69848 | ||
|
|
df2b5b4274 | ||
|
|
f18d3af3b4 | ||
|
|
b4a447b596 | ||
|
|
d329630509 | ||
|
|
1af84b046d | ||
|
|
84e8543309 | ||
|
|
09f7ecd295 | ||
|
|
1a8dbf0f2c | ||
|
|
3f1e695581 | ||
|
|
8881503ca6 | ||
|
|
317da8a13e | ||
|
|
316d719d64 | ||
|
|
01e1b79674 | ||
|
|
9b7ff997b9 | ||
|
|
6d5c2a5e2b | ||
|
|
d0185a484f | ||
|
|
aadacbf729 | ||
|
|
86290d1ce9 | ||
|
|
d5ddd59997 | ||
|
|
64883f1752 | ||
|
|
ef0b8d3180 | ||
|
|
101379e6ba | ||
|
|
80947af962 | ||
|
|
9ebb80a111 | ||
|
|
37e99b977c | ||
|
|
dcbc505e7a | ||
|
|
9f518d6c4b | ||
|
|
6f88df0570 | ||
|
|
f97c9521f3 | ||
|
|
61aa638be9 | ||
|
|
6285359f31 | ||
|
|
f72987d55f | ||
|
|
33292988bb | ||
|
|
261cd45535 | ||
|
|
f9994e7e88 | ||
|
|
b0ecfefa09 | ||
|
|
e1e4528db6 | ||
|
|
6eecd514e4 | ||
|
|
5b4464533b | ||
|
|
62233642ad | ||
|
|
26910b80b9 | ||
|
|
306c7a2480 | ||
|
|
d26f4f1ac2 | ||
|
|
1509ab6435 | ||
|
|
df0fcb1801 | ||
|
|
359a269e88 | ||
|
|
f621aeef54 | ||
|
|
10ce9b44fc | ||
|
|
6d5e66b73b | ||
|
|
2f701510e0 | ||
|
|
ec38cbd285 | ||
|
|
640d8c1bf4 | ||
|
|
c570cf8fc2 | ||
|
|
9e18f11822 | ||
|
|
121482528b | ||
|
|
ac482bceae | ||
|
|
3692f5ed7d | ||
|
|
ce32e32433 | ||
|
|
fdeea2f4a1 | ||
|
|
837aa2037f | ||
|
|
45065b03e3 | ||
|
|
195f8c6ec7 | ||
|
|
20202d1cdb | ||
|
|
e4d31241da | ||
|
|
83dc24df94 | ||
|
|
890eb8ea46 | ||
|
|
d57f01f88b | ||
|
|
3297f3088e | ||
|
|
f34ab4d5ce | ||
|
|
2f775e098e | ||
|
|
56600420f1 | ||
|
|
4e579bc934 | ||
|
|
8571da9761 | ||
|
|
0a591f7a3c | ||
|
|
84dec294da | ||
|
|
e3cb3e5a54 | ||
|
|
9fb31d52b7 | ||
|
|
5a7c8f539a | ||
|
|
9305b09717 | ||
|
|
25b2ff91af | ||
|
|
7f6091afb1 | ||
|
|
fe3acf669e | ||
|
|
18950cc43b | ||
|
|
d25bde12c3 | ||
|
|
f0542c3ea5 | ||
|
|
70185da4a7 | ||
|
|
1dc859f225 | ||
|
|
7a84a51940 | ||
|
|
d5122fac17 | ||
|
|
36167790df | ||
|
|
ad5e1328c5 | ||
|
|
e2b8cf1cf2 | ||
|
|
6f8d9f15b2 | ||
|
|
64215b478f | ||
|
|
f8faecdc36 | ||
|
|
656894e46a | ||
|
|
3caaa6b63b | ||
|
|
ad5acdbf1d | ||
|
|
24ef743d24 | ||
|
|
0e3e61afe3 | ||
|
|
de254bee66 | ||
|
|
96f2aa5b30 | ||
|
|
f86c4e5e52 | ||
|
|
05c2fe8c35 | ||
|
|
dcd8413dcf | ||
|
|
b4b13b0aa9 | ||
|
|
d8d4b6d9f9 | ||
|
|
2ebc4dc700 | ||
|
|
910334101c | ||
|
|
b53dc23d80 | ||
|
|
0325f41617 | ||
|
|
99176209ea | ||
|
|
694f349e10 | ||
|
|
ea872ca156 | ||
|
|
e5bdfa2840 | ||
|
|
0a474797a6 | ||
|
|
6215da6cc0 | ||
|
|
4c3cf83106 | ||
|
|
6d7c963898 | ||
|
|
1cf8dcda3b | ||
|
|
38c6f29023 | ||
|
|
fd08ef8816 | ||
|
|
a176a5dc7a | ||
|
|
e02fd1b2de | ||
|
|
1305ffe910 | ||
|
|
5a434b5b50 | ||
|
|
d8db9c458c | ||
|
|
861c5812b3 |
26
.github/workflows/check.yaml
vendored
26
.github/workflows/check.yaml
vendored
@@ -3,12 +3,28 @@ name: Lint and Test Charts
|
||||
on:
|
||||
push:
|
||||
branches: [ "main", "release-*" ]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
paths:
|
||||
- '!docs/**'
|
||||
- 'apps/.olares/**'
|
||||
- 'build/**'
|
||||
- 'cli/**'
|
||||
- 'daemon/**'
|
||||
- 'framework/**/.olares/**'
|
||||
- 'infrastructure/**/.olares/**'
|
||||
- 'platform/**/.olares/**'
|
||||
- 'vendor/**'
|
||||
pull_request_target:
|
||||
branches: [ "main", "release-*" ]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
paths:
|
||||
- '!docs/**'
|
||||
- 'apps/.olares/**'
|
||||
- 'build/**'
|
||||
- 'cli/**'
|
||||
- 'daemon/**'
|
||||
- 'framework/**/.olares/**'
|
||||
- 'infrastructure/**/.olares/**'
|
||||
- 'platform/**/.olares/**'
|
||||
- 'vendor/**'
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -59,7 +75,7 @@ jobs:
|
||||
steps:
|
||||
- id: generate
|
||||
run: |
|
||||
v=1.12.2-$(echo $RANDOM$RANDOM)
|
||||
v=1.12.3-$(echo $RANDOM$RANDOM)
|
||||
echo "version=$v" >> "$GITHUB_OUTPUT"
|
||||
|
||||
upload-cli:
|
||||
|
||||
32
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
32
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: App-Service Build test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
paths:
|
||||
- 'framework/app-service/**'
|
||||
- '!framework/app-service/.olares/**'
|
||||
- '!framework/app-service/README.md'
|
||||
- '!framework/app-service/PROJECT'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
paths:
|
||||
- 'framework/app-service/**'
|
||||
- '!framework/app-service/.olares/**'
|
||||
- '!framework/app-service/README.md'
|
||||
- '!framework/app-service/PROJECT'
|
||||
jobs:
|
||||
build0-main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y btrfs-progs libbtrfs-dev
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
- run: make build
|
||||
working-directory: framework/app-service
|
||||
62
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
62
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish app-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-amd64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/app-service:${{ github.event.inputs.tags }} --amend beclab/app-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/app-service:${{ github.event.inputs.tags }}
|
||||
63
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
63
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Publish image-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-amd64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile.image
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile.image
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/image-service:${{ github.event.inputs.tags }} --amend beclab/image-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/image-service:${{ github.event.inputs.tags }}
|
||||
|
||||
4
.github/workflows/release-daemon.yaml
vendored
4
.github/workflows/release-daemon.yaml
vendored
@@ -44,9 +44,9 @@ jobs:
|
||||
with:
|
||||
go-version: 1.22.1
|
||||
|
||||
- name: install udev-devel
|
||||
- name: install udev-devel and pcap-devel
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libudev-dev
|
||||
sudo apt update && sudo apt install -y libudev-dev libpcap-dev
|
||||
|
||||
- name: Install x86_64 cross-compiler
|
||||
run: sudo apt-get update && sudo apt-get install -y build-essential
|
||||
|
||||
2
.github/workflows/release-daily.yaml
vendored
2
.github/workflows/release-daily.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
- id: generate
|
||||
run: |
|
||||
v=1.12.2-$(date +"%Y%m%d")
|
||||
v=1.12.3-$(date +"%Y%m%d")
|
||||
echo "version=$v" >> "$GITHUB_OUTPUT"
|
||||
|
||||
release-id:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -37,4 +37,6 @@ docs/.vitepress/dist/
|
||||
docs/.vitepress/cache/
|
||||
node_modules
|
||||
.idea/
|
||||
cli/olares-cli*
|
||||
cli/olares-cli*
|
||||
|
||||
framework/app-service/bin
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">Website</a> ·
|
||||
<a href="https://docs.olares.com">Documentation</a> ·
|
||||
<a href="https://larepass.olares.com">Download LarePass</a> ·
|
||||
<a href="https://www.olares.com/larepass">Download LarePass</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olares Apps</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -33,7 +35,7 @@
|
||||

|
||||
We believe you have a fundamental right to control your digital life. The most effective way to uphold this right is by hosting your data locally, on your own hardware.
|
||||
|
||||
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, SD WebUI for image generation, and Mastodon for building censor free social space. Imagine the power of the cloud, but with you in complete command.
|
||||
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, ComfyUI for image generation, and Perplexica for private, AI-driven search and reasoning. Imagine the power of the cloud, but with you in complete command.
|
||||
|
||||
> 🌟 *Star us to receive instant notifications about new releases and updates.*
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">网站</a> ·
|
||||
<a href="https://docs.olares.com">文档</a> ·
|
||||
<a href="https://larepass.olares.com">下载 LarePass</a> ·
|
||||
<a href="https://www.olares.cn/larepass">下载 LarePass</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olares 应用</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -34,7 +36,7 @@
|
||||
|
||||
我们坚信,**您拥有掌控自己数字生活的基本权利**。维护这一权利最有效的方式,就是将您的数据托管在本地,在您自己的硬件上。
|
||||
|
||||
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 SD WebUI 用于图像生成,以及使用 Mastodon 构建不受审查的社交空间。Olares 让你坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
|
||||
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 ComfyUI 生成图像,以及使用 Perplexica 打造本地化、注重隐私的 AI 搜索与问答体验。Olares 让您坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
|
||||
|
||||
> 为 Olares 点亮 🌟 以及时获取新版本和更新的通知。
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">ウェブサイト</a> ·
|
||||
<a href="https://docs.olares.com">ドキュメント</a> ·
|
||||
<a href="https://larepass.olares.com">LarePassをダウンロード</a> ·
|
||||
<a href="https://www.olares.com/larepass">LarePassをダウンロード</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olaresアプリ</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -34,8 +36,7 @@
|
||||
|
||||
私たちは、あなたが自身のデジタルライフをコントロールする基本的な権利を有すると確信しています。この権利を守る最も効果的な方法は、あなたのデータをローカルの、あなた自身のハードウェア上でホストすることです。
|
||||
|
||||
Olaresは、あなたが自身のデジタル資産をローカルで容易に所有し管理できるよう設計された、オープンソースのパーソナルクラウドOSです。もはやパブリッククラウドサービスに依存する必要はありません。Olares上で、例えばOllamaを利用した大規模言語モデルのホスティング、SD WebUIによる画像生成、Mastodonを用いた検閲のないソーシャルスペースの構築など、強力なオープンソースの代替サービスやアプリケーションをローカルにデプロイできます。Olaresは、クラウドコンピューティングの絶大な力を活用しつつ、それを完全に自身のコントロール下に置くことを可能にします。
|
||||
|
||||
Olaresは、あなたが自身のデジタル資産をローカルで所有し管理できるように設計された、オープンソースのパーソナルクラウドOSです。パブリッククラウドサービスに依存する代わりに、Olares上で強力なオープンソースの代替をローカルにデプロイできます。例えば、LLMのホスティングにはOllama、画像生成にはComfyUI、そしてプライバシーを重視したAI駆動の検索と推論にはPerplexicaを利用できます。クラウドの力をそのままに、主導権は常にあなたの手に。
|
||||
> 🌟 *新しいリリースや更新についての通知を受け取るために、スターを付けてください。*
|
||||
|
||||
## アーキテクチャ
|
||||
@@ -44,7 +45,7 @@ Olaresは、あなたが自身のデジタル資産をローカルで容易に
|
||||
|
||||

|
||||
|
||||
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/manual/concepts/system-architecture.html)(英語版)をご参照ください。
|
||||
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/developer/concepts/system-architecture.html)(英語版)をご参照ください。
|
||||
|
||||
> 🔍**OlaresとNASの違いは何ですか?**
|
||||
>
|
||||
|
||||
@@ -51,6 +51,8 @@ rules:
|
||||
- "/provider/get_dataset_folder_status"
|
||||
- "/provider/update_dataset_folder_paths"
|
||||
- "/seahub/api/*"
|
||||
- "/system/configuration/encoding"
|
||||
- "/api/search/get_directory/"
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
|
||||
@@ -209,6 +209,21 @@ spec:
|
||||
port: 80
|
||||
targetPort: 91
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: share-fe-service
|
||||
namespace: user-space-{{ .Values.bfl.username }}
|
||||
spec:
|
||||
selector:
|
||||
app: olares-app
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: share
|
||||
port: 80
|
||||
targetPort: 92
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -220,12 +235,12 @@ metadata:
|
||||
applications.app.bytetrade.io/owner: '{{ .Values.bfl.username }}'
|
||||
applications.app.bytetrade.io/author: bytetrade.io
|
||||
annotations:
|
||||
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
|
||||
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName":"olares-app","entranceName":"share","thirdLevelDomain":"share"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
|
||||
applications.app.bytetrade.io/icon: https://app.cdn.olares.com/appstore/olaresapps/icon.png
|
||||
applications.app.bytetrade.io/title: 'Olares Apps'
|
||||
applications.app.bytetrade.io/version: '0.0.1'
|
||||
applications.app.bytetrade.io/policies: '{"policies":[{"entranceName":"dashboard","uriRegex":"/js/script.js", "level":"public"},{"entranceName":"dashboard","uriRegex":"/js/api/send", "level":"public"}]}'
|
||||
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
|
||||
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"share","authLevel":"public", "host":"share-fe-service", "port":80,"title":"Share","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png","invisible":true},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
@@ -253,7 +268,7 @@ spec:
|
||||
image: owncloudci/wait-for:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: check-auth
|
||||
- name: terminus-sidecar-init
|
||||
- name: olares-sidecar-init
|
||||
image: openservicemesh/init:v1.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
@@ -303,7 +318,7 @@ spec:
|
||||
chown -R 1000:1000 /uploadstemp && \
|
||||
chown -R 1000:1000 /appdata
|
||||
- name: olares-app-init
|
||||
image: beclab/system-frontend:v1.5.9
|
||||
image: beclab/system-frontend:v1.6.16
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -315,7 +330,7 @@ spec:
|
||||
name: www-dir
|
||||
|
||||
containers:
|
||||
- name: terminus-envoy-sidecar
|
||||
- name: olares-envoy-sidecar
|
||||
image: bytetrade/envoy:v1.25.11
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
@@ -329,7 +344,7 @@ spec:
|
||||
- name: tapr
|
||||
containerPort: 15080
|
||||
volumeMounts:
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
readOnly: true
|
||||
mountPath: /etc/envoy/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
@@ -352,6 +367,7 @@ spec:
|
||||
- containerPort: 89
|
||||
- containerPort: 90
|
||||
- containerPort: 91
|
||||
- containerPort: 92
|
||||
- containerPort: 8090
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -361,7 +377,7 @@ spec:
|
||||
cp -r /www/nginxs/* /etc/nginx/conf.d/
|
||||
nginx -g 'daemon off;'
|
||||
volumeMounts:
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
readOnly: true
|
||||
mountPath: /etc/envoy/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
@@ -424,7 +440,7 @@ spec:
|
||||
- name: NATS_SUBJECT_VAULT
|
||||
value: os.vault.{{ .Values.bfl.username}}
|
||||
- name: user-service
|
||||
image: beclab/user-service:v0.0.61
|
||||
image: beclab/user-service:v0.0.73
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
@@ -500,7 +516,7 @@ spec:
|
||||
hostPath:
|
||||
type: Directory
|
||||
path: '{{ .Values.userspace.userData }}'
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
configMap:
|
||||
name: user-service-sidecar-ws-configs
|
||||
items:
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.bfl.username }}:prometheus-k8s
|
||||
annotations:
|
||||
provider-registry-ref: {{ .Values.bfl.username }}/4ae9f19e
|
||||
provider-service-ref: http://prometheus-k8s.kubesphere-monitoring-system:9090
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "*"
|
||||
verbs: ["*"]
|
||||
@@ -9,4 +9,7 @@ metadata:
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/document/search*"
|
||||
- "/task/*"
|
||||
- "/search/*"
|
||||
- "/monitorsetting/*"
|
||||
verbs: ["*"]
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
- "/api/account/all"
|
||||
- "/api/cookie/retrieve"
|
||||
- "/api/cookie"
|
||||
- "/api/abilities"
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
@@ -56,4 +57,16 @@ metadata:
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/server/intent/send"
|
||||
verbs: ["*"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.bfl.username }}:dashboard
|
||||
annotations:
|
||||
provider-registry-ref: {{ .Values.bfl.username }}/dashboard
|
||||
provider-service-ref: prometheus-k8s.kubesphere-monitoring-system:9090
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "*"
|
||||
verbs: ["*"]
|
||||
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: wizard
|
||||
image: beclab/wizard:v1.5.7
|
||||
image: beclab/wizard:v1.6.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
@@ -7,10 +7,18 @@ function command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
if [[ x"$REPO_PATH" == x"" ]]; then
|
||||
export REPO_PATH="#__REPO_PATH__"
|
||||
fi
|
||||
|
||||
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
|
||||
export REPO_PATH="/"
|
||||
fi
|
||||
|
||||
if [[ x"$VERSION" == x"" ]]; then
|
||||
if [[ "$LOCAL_RELEASE" == "1" ]]; then
|
||||
ts=$(date +%Y%m%d%H%M%S)
|
||||
export VERSION="1.12.2-$ts"
|
||||
export VERSION="1.12.3-$ts"
|
||||
echo "will build and use a local release of Olares with version: $VERSION"
|
||||
echo ""
|
||||
else
|
||||
@@ -20,7 +28,7 @@ fi
|
||||
|
||||
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
|
||||
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
|
||||
echo "for example: VERSION=1.12.2-20241124 bash $0"
|
||||
echo "for example: VERSION=1.12.3-20241124 bash $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -92,13 +100,17 @@ if [[ "$LOCAL_RELEASE" == "1" ]]; then
|
||||
fi
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
else
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
|
||||
expected_vendor="main"
|
||||
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
|
||||
expected_vendor="OlaresOne"
|
||||
fi
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
echo "olares-cli already installed and is the expected version"
|
||||
echo ""
|
||||
else
|
||||
if [[ ! -f ${CLI_FILE} ]]; then
|
||||
CLI_URL="${cdn_url}/${CLI_FILE}"
|
||||
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
|
||||
|
||||
echo "downloading Olares installer from ${CLI_URL} ..."
|
||||
echo ""
|
||||
|
||||
@@ -7,6 +7,15 @@ function command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
if [[ x"$REPO_PATH" == x"" ]]; then
|
||||
export REPO_PATH="#__REPO_PATH__"
|
||||
fi
|
||||
|
||||
|
||||
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
|
||||
export REPO_PATH="/"
|
||||
fi
|
||||
|
||||
function read_tty() {
|
||||
echo -n $1
|
||||
read $2 < /dev/tty
|
||||
@@ -149,7 +158,7 @@ export VERSION="#__VERSION__"
|
||||
|
||||
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
|
||||
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
|
||||
echo "for example: VERSION=1.12.2-20241124 bash $0"
|
||||
echo "for example: VERSION=1.12.3-20241124 bash $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -172,15 +181,17 @@ else
|
||||
RELEASE_ID_SUFFIX=".$RELEASE_ID"
|
||||
fi
|
||||
CLI_FILE="olares-cli-v${VERSION}_linux_${ARCH}${RELEASE_ID_SUFFIX}.tar.gz"
|
||||
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
|
||||
expected_vendor="main"
|
||||
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
|
||||
expected_vendor="OlaresOne"
|
||||
fi
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
echo "olares-cli already installed and is the expected version"
|
||||
echo ""
|
||||
else
|
||||
if [[ ! -f ${CLI_FILE} ]]; then
|
||||
CLI_URL="${cdn_url}/${CLI_FILE}"
|
||||
|
||||
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
|
||||
echo "downloading Olares installer from ${CLI_URL} ..."
|
||||
echo ""
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-platform
|
||||
|
||||
---
|
||||
@@ -27,6 +28,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-framework
|
||||
|
||||
---
|
||||
@@ -37,6 +39,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-protected
|
||||
|
||||
|
||||
|
||||
@@ -66,6 +66,12 @@ if [ ! -z $RELEASE_ID ]; then
|
||||
sh -c "$SED 's/#__RELEASE_ID__/${RELEASE_ID}/' joincluster.sh"
|
||||
fi
|
||||
|
||||
# replace repo path placeholder in scripts if provided
|
||||
if [ ! -z "$REPO_PATH" ]; then
|
||||
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' install.sh"
|
||||
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' joincluster.sh"
|
||||
fi
|
||||
|
||||
$TAR --exclude=wizard/tools --exclude=.git -zcvf ${BASE_DIR}/../install-wizard-${VERSION}.tar.gz .
|
||||
|
||||
popd
|
||||
|
||||
@@ -21,6 +21,11 @@ systemEnvs:
|
||||
type: url
|
||||
editable: true
|
||||
required: true
|
||||
# docker hub mirror endpoint for docker.io registry
|
||||
- envName: OLARES_SYSTEM_DOCKERHUB_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
required: false
|
||||
# the legacy OLARES_ROOT_DIR
|
||||
- envName: OLARES_SYSTEM_ROOT_PATH
|
||||
default: /olares
|
||||
|
||||
@@ -12,8 +12,11 @@ userEnvs:
|
||||
- envName: OLARES_USER_TIMEZONE
|
||||
type: string
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_ENABLED
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_SERVER
|
||||
type: url
|
||||
type: domain
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_PORT
|
||||
type: number
|
||||
@@ -30,13 +33,14 @@ userEnvs:
|
||||
- envName: OLARES_USER_SMTP_SECURE
|
||||
type: bool
|
||||
editable: true
|
||||
default: "true"
|
||||
- envName: OLARES_USER_SMTP_USE_TLS
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_USE_SSL
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_SMTP_SECURITY_PROTOCOLS
|
||||
- envName: OLARES_USER_SMTP_SECURITY_PROTOCOLS
|
||||
type: string
|
||||
editable: true
|
||||
- envName: OLARES_USER_OPENAI_APIKEY
|
||||
@@ -51,15 +55,18 @@ userEnvs:
|
||||
- envName: OLARES_USER_HUGGINGFACE_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://huggingface.co/"
|
||||
- envName: OLARES_USER_HUGGINGFACE_TOKEN
|
||||
type: password
|
||||
editable: true
|
||||
- envName: OLARES_USER_PYPI_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://pypi.org/simple/"
|
||||
- envName: OLARES_USER_GITHUB_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://github.com/"
|
||||
- envName: OLARES_USER_GITHUB_TOKEN
|
||||
type: password
|
||||
editable: true
|
||||
|
||||
445
cli/cmd/ctl/disk/extend.go
Normal file
445
cli/cmd/ctl/disk/extend.go
Normal file
@@ -0,0 +1,445 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/beclab/Olares/cli/pkg/utils/lvm"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const defaultOlaresVGName = "olares-vg"
|
||||
|
||||
func NewExtendDiskCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "extend",
|
||||
Short: "extend disk operations",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// early return if no unmounted disks found
|
||||
unmountedDevices, err := lvm.FindUnmountedDevices()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding unmounted devices: %v\n", err)
|
||||
}
|
||||
|
||||
if len(unmountedDevices) == 0 {
|
||||
log.Println("No unmounted disks found to extend.")
|
||||
return
|
||||
}
|
||||
|
||||
// select volume group to extend
|
||||
currentVgs, err := lvm.FindCurrentLVM()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding current LVM: %v\n", err)
|
||||
}
|
||||
|
||||
if len(currentVgs) == 0 {
|
||||
log.Println("No valid volume groups found to extend.")
|
||||
return
|
||||
}
|
||||
|
||||
selectedVg, err := selectExtendingVG(currentVgs)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting volume group: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected volume group to extend: %s\n", selectedVg)
|
||||
|
||||
// select logical volume to extend
|
||||
lvInVg, err := lvm.FindLvByVgName(selectedVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
|
||||
}
|
||||
|
||||
if len(lvInVg) == 0 {
|
||||
log.Printf("No logical volumes found in volume group %s to extend.\n", selectedVg)
|
||||
return
|
||||
}
|
||||
|
||||
selectedLv, err := selectExtendingLV(selectedVg, lvInVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting logical volume: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected logical volume to extend: %s\n", selectedLv)
|
||||
|
||||
// select unmounted devices to create physical volume
|
||||
selectedDevice, err := selectExtendingDevices(unmountedDevices)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting unmounted device: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected unmounted device to use: %s\n", selectedDevice)
|
||||
|
||||
options := &LvmExtendOptions{
|
||||
VgName: selectedVg,
|
||||
DevicePath: selectedDevice,
|
||||
LvName: selectedLv,
|
||||
DeviceBlk: unmountedDevices[selectedDevice],
|
||||
}
|
||||
|
||||
log.Printf("Extending logical volume %s in volume group %s using device %s\n", options.LvName, options.VgName, options.DevicePath)
|
||||
cleanupNeeded, err := options.cleanupDiskParts()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during disk partition cleanup check: %v\n", err)
|
||||
}
|
||||
|
||||
if cleanupNeeded {
|
||||
do, err := options.destroyWarning()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during partition cleanup confirmation: %v\n", err)
|
||||
}
|
||||
if !do {
|
||||
log.Println("Operation aborted by user.")
|
||||
return
|
||||
}
|
||||
|
||||
err = options.deleteDevicePartitions()
|
||||
if err != nil {
|
||||
log.Fatalf("Error deleting device partitions: %v\n", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
do, err := options.makeDecision()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during extension confirmation: %v\n", err)
|
||||
}
|
||||
if !do {
|
||||
log.Println("Operation aborted by user.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = options.extendLVM()
|
||||
if err != nil {
|
||||
log.Fatalf("Error extending LVM: %v\n", err)
|
||||
}
|
||||
|
||||
log.Println("Disk extension completed successfully.")
|
||||
|
||||
// end of command run, and show result
|
||||
// show the result of the extension
|
||||
lvInVg, err = lvm.FindLvByVgName(selectedVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
|
||||
for idx, lv := range lvInVg {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type LvmExtendOptions struct {
|
||||
VgName string
|
||||
DevicePath string
|
||||
LvName string
|
||||
DeviceBlk *lvm.BlkPart
|
||||
}
|
||||
|
||||
func selectExtendingVG(vgs []*lvm.VgItem) (string, error) {
|
||||
// if only one vg, return it directly
|
||||
if len(vgs) == 1 {
|
||||
return vgs[0].VgName, nil
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple volume groups found. Please select one to extend:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tVG\tVSize\tVFree\n")
|
||||
for idx, vg := range vgs {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", idx+1, vg.VgName, vg.VgSize, vg.VgFree)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the volume group id to extend: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read volume group id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid volume group id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(vgs) {
|
||||
fmt.Printf("\ninvalid volume group id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return vgs[selectedIdx-1].VgName, nil
|
||||
}
|
||||
|
||||
func selectExtendingLV(vgName string, lvs []*lvm.LvItem) (string, error) {
|
||||
if len(lvs) == 1 {
|
||||
return lvs[0].LvName, nil
|
||||
}
|
||||
|
||||
if vgName == defaultOlaresVGName {
|
||||
selectedLv := ""
|
||||
for _, lv := range lvs {
|
||||
if lv.LvName == "root" {
|
||||
selectedLv = lv.LvName
|
||||
continue
|
||||
}
|
||||
|
||||
if lv.LvName == "data" {
|
||||
selectedLv = lv.LvName
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if selectedLv != "" {
|
||||
return selectedLv, nil
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple logical volumes found. Please select one to extend:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
|
||||
for idx, lv := range lvs {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the logical volume id to extend: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read logical volume id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid logical volume id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(lvs) {
|
||||
fmt.Printf("\ninvalid logical volume id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return lvs[selectedIdx-1].LvName, nil
|
||||
}
|
||||
|
||||
func selectExtendingDevices(unmountedDevices map[string]*lvm.BlkPart) (string, error) {
|
||||
if len(unmountedDevices) == 0 {
|
||||
return "", errors.New("no unmounted devices available for selection")
|
||||
}
|
||||
|
||||
if len(unmountedDevices) == 1 {
|
||||
for path := range unmountedDevices {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple unmounted devices found. Please select one to use:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tDevice\tSize\n")
|
||||
idx := 1
|
||||
devicePaths := make([]string, 0, len(unmountedDevices))
|
||||
for path, device := range unmountedDevices {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\n", idx, path, device.Size)
|
||||
devicePaths = append(devicePaths, path)
|
||||
idx++
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the device id to use: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read device id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid device id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(devicePaths) {
|
||||
fmt.Printf("\ninvalid device id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return devicePaths[selectedIdx-1], nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) destroyWarning() (bool, error) {
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Printf("WARNING: This will DESTROY all data on %s\n", o.DevicePath)
|
||||
LOOP:
|
||||
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
|
||||
}
|
||||
input = strings.ToUpper(strings.TrimSpace(input))
|
||||
if input != "YES" {
|
||||
goto LOOP
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) makeDecision() (bool, error) {
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Printf("NOTICE: Extending LVM will begin on device %s\n", o.DevicePath)
|
||||
LOOP:
|
||||
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
|
||||
}
|
||||
input = strings.ToUpper(strings.TrimSpace(input))
|
||||
if input != "YES" {
|
||||
goto LOOP
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) cleanupDiskParts() (bool, error) {
|
||||
if o.DeviceBlk == nil {
|
||||
return false, errors.New("device block is nil")
|
||||
}
|
||||
|
||||
if len(o.DeviceBlk.Children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) deleteDevicePartitions() error {
|
||||
log.Printf("Selected device %s has existing partitions. Cleaning up...\n", o.DevicePath)
|
||||
if o.DeviceBlk == nil {
|
||||
return errors.New("device block is nil")
|
||||
}
|
||||
|
||||
if len(o.DeviceBlk.Children) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Deleting existing partitions on device %s...\n", o.DevicePath)
|
||||
var partitions []string
|
||||
for _, part := range o.DeviceBlk.Children {
|
||||
partitions = append(partitions, "/dev/"+part.Name)
|
||||
}
|
||||
|
||||
vgs, err := lvm.FindVgsOnDevice(partitions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to find volume groups on device partitions")
|
||||
}
|
||||
|
||||
if len(vgs) > 0 {
|
||||
log.Println("existing volume group on device, delete it first")
|
||||
for _, vg := range vgs {
|
||||
lvs, err := lvm.FindLvByVgName(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to find logical volumes in volume group %s", vg.VgName)
|
||||
}
|
||||
|
||||
err = lvm.DeactivateLv(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to deactivate volume group %s", vg.VgName)
|
||||
}
|
||||
|
||||
for _, lv := range lvs {
|
||||
err = lvm.RemoveLv(lv.LvPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove logical volume %s", lv.LvPath)
|
||||
}
|
||||
}
|
||||
|
||||
err = lvm.RemoveVg(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove volume group %s", vg)
|
||||
}
|
||||
|
||||
err = lvm.RemovePv(vg.PvName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove physical volume %s", vg.PvName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
log.Printf("Deleting partitions on device %s...\n", o.DevicePath)
|
||||
err = lvm.DeleteDevicePartitions(o.DevicePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete partitions on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) extendLVM() error {
|
||||
log.Printf("Creating partition on device %s...\n", o.DevicePath)
|
||||
err := lvm.MakePartOnDevice(o.DevicePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create partition on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
log.Printf("Creating physical volume on device %s...\n", o.DevicePath)
|
||||
err = lvm.AddNewPV(o.DevicePath, o.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create physical volume on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
log.Printf("Extending volume group %s with logic volume %s on device %s...\n", o.VgName, o.LvName, o.DevicePath)
|
||||
err = lvm.ExtendLv(o.VgName, o.LvName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to extend logical volume %s in volume group %s", o.LvName, o.VgName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
cli/cmd/ctl/disk/listumounted.go
Normal file
34
cli/cmd/ctl/disk/listumounted.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/utils/lvm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewListUnmountedDisksCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-unmounted",
|
||||
Short: "List unmounted disks",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
unmountedDevices, err := lvm.FindUnmountedDevices()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding unmounted devices: %v\n", err)
|
||||
}
|
||||
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "Device\tSize\n")
|
||||
for path, device := range unmountedDevices {
|
||||
fmt.Fprintf(w, "%s\t%s\n", path, device.Size)
|
||||
}
|
||||
w.Flush()
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
15
cli/cmd/ctl/disk/root.go
Normal file
15
cli/cmd/ctl/disk/root.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package disk
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
func NewDiskCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disk",
|
||||
Short: "disk management operations",
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewListUnmountedDisksCommand())
|
||||
cmd.AddCommand(NewExtendDiskCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
23
cli/cmd/ctl/gpu/disable_nouveau.go
Normal file
23
cli/cmd/ctl/gpu/disable_nouveau.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewCmdDisableNouveau() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disable-nouveau",
|
||||
Short: "Blacklist and disable the nouveau kernel module",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := pipelines.DisableNouveau(); err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ func NewCmdGpu() *cobra.Command {
|
||||
rootGpuCmd.AddCommand(NewCmdUninstallpu())
|
||||
rootGpuCmd.AddCommand(NewCmdEnableGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdDisableGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdUpgradeGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdGpuStatus())
|
||||
rootGpuCmd.AddCommand(NewCmdDisableNouveau())
|
||||
return rootGpuCmd
|
||||
}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/options"
|
||||
"github.com/beclab/Olares/cli/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewCmdUpgradeGpu() *cobra.Command {
|
||||
o := options.NewInstallGpuOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "upgrade GPU drivers for Olares",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := pipelines.UpgradeGpuDrivers(o); err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func NewCmdRelease() *cobra.Command {
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
version = fmt.Sprintf("1.12.2-%s", time.Now().Format("20060102150405"))
|
||||
version = fmt.Sprintf("1.12.3-%s", time.Now().Format("20060102150405"))
|
||||
fmt.Printf("--version unspecified, using: %s\n", version)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package ctl
|
||||
|
||||
import (
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/disk"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/gpu"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/node"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/os"
|
||||
@@ -33,6 +34,7 @@ func NewDefaultCommand() *cobra.Command {
|
||||
cmds.AddCommand(node.NewNodeCommand())
|
||||
cmds.AddCommand(gpu.NewCmdGpu())
|
||||
cmds.AddCommand(user.NewUserCommand())
|
||||
cmds.AddCommand(disk.NewDiskCommand())
|
||||
|
||||
return cmds
|
||||
}
|
||||
|
||||
122
cli/cmd/ctl/user/activate.go
Normal file
122
cli/cmd/ctl/user/activate.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/wizard"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type activateUserOptions struct {
|
||||
Mnemonic string
|
||||
BflUrl string
|
||||
VaultUrl string
|
||||
Password string
|
||||
OlaresId string
|
||||
ResetPassword string
|
||||
|
||||
Location string
|
||||
Language string
|
||||
EnableTunnel bool
|
||||
Host string
|
||||
Jws string
|
||||
}
|
||||
|
||||
func NewCmdActivateUser() *cobra.Command {
|
||||
o := &activateUserOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "activate {Olares ID (e.g., user@example.com)}",
|
||||
Short: "activate a new user",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o.OlaresId = args[0]
|
||||
if err := o.Validate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := o.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *activateUserOptions) AddFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().StringVar(&o.Mnemonic, "mnemonic", "", "12-word mnemonic phrase, required for activation")
|
||||
cmd.Flags().StringVar(&o.BflUrl, "bfl", "http://127.0.0.1:30180", "Bfl URL (e.g., https://example.com, default: http://127.0.0.1:30180)")
|
||||
cmd.Flags().StringVar(&o.VaultUrl, "vault", "http://127.0.0.1:30180", "Vault URL (e.g., https://example.com, default: http://127.0.0.1:30181)")
|
||||
cmd.Flags().StringVarP(&o.Password, "password", "p", "", "OS password for authentication, required for activation")
|
||||
cmd.Flags().StringVar(&o.Location, "location", "Asia/Shanghai", "Timezone location (default: Asia/Shanghai)")
|
||||
cmd.Flags().StringVar(&o.Language, "language", "en-US", "System language (default: en-US)")
|
||||
cmd.Flags().BoolVar(&o.EnableTunnel, "enable-tunnel", false, "Enable tunnel mode (default: false)")
|
||||
cmd.Flags().StringVar(&o.Host, "host", "", "FRP host (only used when tunnel is enabled)")
|
||||
cmd.Flags().StringVar(&o.Jws, "jws", "", "FRP JWS token (only used when tunnel is enabled)")
|
||||
cmd.Flags().StringVar(&o.ResetPassword, "reset-password", "", "New password for resetting (required for password reset)")
|
||||
}
|
||||
|
||||
func (o *activateUserOptions) Validate() error {
|
||||
if o.OlaresId == "" {
|
||||
return fmt.Errorf("Olares ID is required")
|
||||
}
|
||||
if o.Password == "" {
|
||||
return fmt.Errorf("Password is required")
|
||||
}
|
||||
if o.Mnemonic == "" {
|
||||
return fmt.Errorf("Mnemonic is required")
|
||||
}
|
||||
if o.ResetPassword == "" {
|
||||
return fmt.Errorf("Reset password is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *activateUserOptions) Run() error {
|
||||
log.Println("=== TermiPass CLI - User Bind Terminus ===")
|
||||
|
||||
localName := c.OlaresId
|
||||
if strings.Contains(c.OlaresId, "@") {
|
||||
localName = strings.Split(c.OlaresId, "@")[0]
|
||||
}
|
||||
|
||||
log.Printf("Parameters:")
|
||||
log.Printf(" BflUrl: %s", c.BflUrl)
|
||||
log.Printf(" VaultUrl: %s", c.VaultUrl)
|
||||
log.Printf(" Terminus Name: %s", c.OlaresId)
|
||||
log.Printf(" Local Name: %s", localName)
|
||||
|
||||
log.Printf("Initializing global stores with mnemonic...")
|
||||
err := wizard.InitializeGlobalStores(c.Mnemonic, c.OlaresId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize global stores: %v", err)
|
||||
}
|
||||
|
||||
accessToken, err := wizard.UserBindTerminus(c.Mnemonic, c.BflUrl, c.VaultUrl, c.Password, c.OlaresId, localName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("user bind failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("✅ Vault activation completed successfully!")
|
||||
log.Printf("🚀 Starting system activation wizard...")
|
||||
|
||||
wizardConfig := wizard.CustomWizardConfig(c.Location, c.Language, c.EnableTunnel, c.Host, c.Jws, c.Password, c.ResetPassword)
|
||||
|
||||
log.Printf("Wizard configuration:")
|
||||
log.Printf(" Location: %s", wizardConfig.System.Location)
|
||||
log.Printf(" Language: %s", wizardConfig.System.Language)
|
||||
log.Printf(" Enable Tunnel: %t", c.EnableTunnel)
|
||||
if c.EnableTunnel && wizardConfig.System.FRP != nil {
|
||||
log.Printf(" FRP Host: %s", wizardConfig.System.FRP.Host)
|
||||
log.Printf(" FRP JWS: %s", wizardConfig.System.FRP.Jws)
|
||||
}
|
||||
|
||||
err = wizard.RunActivationWizard(c.BflUrl, accessToken, wizardConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("activation wizard failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("🎉 Complete Terminus activation finished successfully!")
|
||||
return nil
|
||||
}
|
||||
@@ -11,6 +11,7 @@ func NewUserCommand() *cobra.Command {
|
||||
cmd.AddCommand(NewCmdDeleteUser())
|
||||
cmd.AddCommand(NewCmdListUsers())
|
||||
cmd.AddCommand(NewCmdGetUser())
|
||||
cmd.AddCommand(NewCmdActivateUser())
|
||||
// cmd.AddCommand(NewCmdUpdateUserLimits())
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.24.2
|
||||
toolchain go1.24.6
|
||||
|
||||
replace (
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.10
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.41
|
||||
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
|
||||
github.com/containers/image/v5 => github.com/containers/image/v5 v5.21.1
|
||||
github.com/containers/storage => github.com/containers/storage v1.40.0
|
||||
@@ -46,6 +46,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/sys v0.35.0
|
||||
@@ -55,6 +56,7 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.18.6
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apiextensions-apiserver v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
@@ -214,7 +216,6 @@ require (
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.0 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
|
||||
@@ -45,8 +45,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beclab/api v0.0.2 h1:aD5RcMie2uqa/FZI7aQBa1F4yVEib8/x3IIZSLiHkBM=
|
||||
github.com/beclab/api v0.0.2/go.mod h1:ESZLe8cf4934QFkU6cqbskKfiTyNk67i1qbv/ctS6js=
|
||||
github.com/beclab/app-service v0.4.10 h1:0CT8sl5K+qwQsrKO6FYxbUFNXcRJVkkErw3sB7V7OQw=
|
||||
github.com/beclab/app-service v0.4.10/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beclab/app-service v0.4.41 h1:WSIXEqHSAepHweBooPkc+pedVaGGn335RugNwixkciY=
|
||||
github.com/beclab/app-service v0.4.41/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
@@ -468,6 +468,8 @@ github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8O
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
|
||||
@@ -447,6 +447,7 @@ var (
|
||||
"/etc/kubekey",
|
||||
"/etc/kke/version",
|
||||
"/etc/systemd/system/olares-swap.service",
|
||||
"/tmp/vgpulock",
|
||||
}
|
||||
|
||||
networkResetCmds = []string{
|
||||
|
||||
@@ -78,6 +78,8 @@ func (m *RunPrechecksModule) Init() {
|
||||
new(SystemdCheck),
|
||||
new(RequiredPortsCheck),
|
||||
new(ConflictingContainerdCheck),
|
||||
new(NvidiaCardArchChecker),
|
||||
new(NouveauChecker),
|
||||
new(CudaChecker),
|
||||
}
|
||||
runPreChecks := &task.LocalTask{
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -114,7 +115,7 @@ func (t *RequiredPortsCheck) Check(runtime connector.Runtime) error {
|
||||
defer l.Close()
|
||||
}
|
||||
if len(unbindablePorts) > 0 {
|
||||
return fmt.Errorf("port %v required by Olares cannot be bound", unbindablePorts)
|
||||
return fmt.Errorf("port %v required by Olares cannot be bound, you can check which process using the command `sudo netstat -tlnp`", unbindablePorts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -133,13 +134,15 @@ func (t *ConflictingContainerdCheck) Check(runtime connector.Runtime) error {
|
||||
if kubeRuntime.Arg.IsCloudInstance {
|
||||
return nil
|
||||
}
|
||||
fixMSG := "\nIf it is installed as a component of Docker, it should be uninstalled per the official doc https://docs.docker.com/engine/install/ubuntu/#uninstall-old-versions"
|
||||
fixMSG += "\nIf it is left over from a previous installation of Olares, clean it up using the command `sudo olares-cli uninstall --all`"
|
||||
containerdBin, err := util.GetCommand("containerd")
|
||||
if err == nil && containerdBin != "" {
|
||||
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function", containerdBin)
|
||||
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function%s", containerdBin, fixMSG)
|
||||
}
|
||||
containerdSocket := "/run/containerd/containerd.sock"
|
||||
if util.IsExist(containerdSocket) {
|
||||
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function", containerdSocket)
|
||||
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function%s", containerdSocket, fixMSG)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -269,20 +272,104 @@ func (t *ValidResolvConfCheck) Check(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type CudaChecker struct {
|
||||
CudaCheckTask
|
||||
type NvidiaCardArchChecker struct{}
|
||||
|
||||
func (t *NvidiaCardArchChecker) Name() string {
|
||||
return "NvidiaCardArch"
|
||||
}
|
||||
|
||||
func (c *CudaChecker) Check(runtime connector.Runtime) error {
|
||||
err := c.CudaCheckTask.Execute(runtime)
|
||||
func (t *NvidiaCardArchChecker) Check(runtime connector.Runtime) error {
|
||||
supportedArchs := []string{"Blackwell", "Hopper", "Ada Lovelace", "Ampere", "Turing"}
|
||||
model, arch, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil
|
||||
}
|
||||
if !slices.Contains(supportedArchs, arch) {
|
||||
return fmt.Errorf("unsupported NVIDIA card %s of architecture: %s, Olares only supports the following architectures: %s", model, arch, strings.Join(supportedArchs, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// the command `precheck` will check the cuda version,
|
||||
// only if the cuda is installed and the current version is not supported, it will return an error
|
||||
if err == ErrCudaInstalled {
|
||||
// NouveauChecker checks whether nouveau is loaded and has modeset=1 or -1.
|
||||
// This check only runs when an NVIDIA GPU is present.
|
||||
type NouveauChecker struct{}
|
||||
|
||||
func (n *NouveauChecker) Name() string {
|
||||
return "NouveauKernelModule"
|
||||
}
|
||||
|
||||
func (n *NouveauChecker) Check(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
fmt.Println("Error detecting NVIDIA card:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
if !util.IsExist("/sys/module/nouveau") {
|
||||
return nil
|
||||
}
|
||||
|
||||
const modesetPath = "/sys/module/nouveau/parameters/modeset"
|
||||
data, err := os.ReadFile(modesetPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading modeset parameter of nouveau kernel module by reading file %s: %v", modesetPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
val := strings.TrimSpace(string(data))
|
||||
if val == "1" || val == "-1" {
|
||||
return fmt.Errorf("detected nouveau kernel module loaded with modeset=%s; this conflicts with the NVIDIA driver that Olares will install, please disable it by running `sudo olares-cli gpu disable-nouveau`, REBOOT your machine, and try again", val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CudaChecker struct{}
|
||||
|
||||
func (c *CudaChecker) Name() string {
|
||||
return "CUDA"
|
||||
}
|
||||
|
||||
func (c *CudaChecker) Check(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if st == nil || !st.Installed {
|
||||
if st != nil && st.Running {
|
||||
return ErrKernelDriverUninstalledButRunning
|
||||
}
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
}
|
||||
if st.Mismatch {
|
||||
return ErrDriverLibraryVersionMismatch
|
||||
}
|
||||
if st.InstallMethod != utils.GPUDriverInstallMethodRunfile && !runtime.GetSystemInfo().IsWsl() {
|
||||
return ErrNotInstalledByRunfile
|
||||
}
|
||||
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", st.DriverVersion, st.CudaVersion)
|
||||
oldestVer := semver.MustParse(supportedCudaVersions[0])
|
||||
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
|
||||
currentVer := semver.MustParse(st.CudaVersion)
|
||||
if oldestVer.GreaterThan(currentVer) {
|
||||
return ErrUnsupportedCudaVersion
|
||||
}
|
||||
if newestVer.LessThan(currentVer) {
|
||||
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@@ -474,44 +561,8 @@ func (t *RemoveWSLChattr) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it, REBOOT your machine, and try again")
|
||||
var ErrCudaInstalled = errors.New("cuda is installed")
|
||||
var supportedCudaVersions = []string{"12.8", common.CurrentVerifiedCudaVersion}
|
||||
|
||||
// CudaCheckTask checks the cuda version, if the current version is not supported, it will return an error
|
||||
// before executing the command `olares-cli gpu install`, we need to check the cuda version
|
||||
// if the cuda if not installed, it will return nil and the command can be executed.
|
||||
// if the cuda is installed and the version is unsupported, the command can not be executed,
|
||||
// or the cuda version is supported, executing the command is unnecessary.
|
||||
type CudaCheckTask struct{}
|
||||
|
||||
func (t *CudaCheckTask) Name() string {
|
||||
return "Cuda"
|
||||
}
|
||||
|
||||
func (t *CudaCheckTask) Execute(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
|
||||
info, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !installed:
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
default:
|
||||
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", info.DriverVersion, info.CudaVersion)
|
||||
oldestVer := semver.MustParse(supportedCudaVersions[0])
|
||||
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
|
||||
currentVer := semver.MustParse(info.CudaVersion)
|
||||
if oldestVer.GreaterThan(currentVer) {
|
||||
return ErrUnsupportedCudaVersion
|
||||
}
|
||||
if newestVer.LessThan(currentVer) {
|
||||
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
|
||||
}
|
||||
return ErrCudaInstalled
|
||||
}
|
||||
}
|
||||
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
|
||||
var ErrKernelDriverUninstalledButRunning = errors.New("NVIDIA driver is uninstalled, but the kernel driver is still running, please REBOOT your machine, and try again")
|
||||
var ErrNotInstalledByRunfile = errors.New("NVIDIA driver is installed, but not installed by runfile, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
|
||||
var ErrDriverLibraryVersionMismatch = errors.New("NVIDIA driver is installed, but the library version with the running version is mismatched, please REBOOT your machine, and try again")
|
||||
var supportedCudaVersions = []string{common.CurrentVerifiedCudaVersion}
|
||||
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
DefaultKubernetesVersion = ""
|
||||
DefaultKubeSphereVersion = "v3.3.0"
|
||||
DefaultTokenMaxAge = 31536000
|
||||
CurrentVerifiedCudaVersion = "12.9"
|
||||
CurrentVerifiedCudaVersion = "13.0"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -279,30 +279,29 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
|
||||
ENV_OLARES_VERSION = "OLARES_VERSION"
|
||||
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
|
||||
ENV_KUBE_TYPE = "KUBE_TYPE"
|
||||
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
|
||||
ENV_NVIDIA_CONTAINER_REPO_MIRROR = "NVIDIA_CONTAINER_REPO_MIRROR"
|
||||
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
|
||||
ENV_STORAGE = "STORAGE"
|
||||
ENV_S3_BUCKET = "S3_BUCKET"
|
||||
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
|
||||
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
|
||||
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
|
||||
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
|
||||
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
|
||||
ENV_BACKUP_SECRET = "BACKUP_SECRET"
|
||||
ENV_CLUSTER_ID = "CLUSTER_ID"
|
||||
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
|
||||
ENV_TOKEN_MAX_AGE = "TOKEN_MAX_AGE"
|
||||
ENV_HOST_IP = "HOST_IP"
|
||||
ENV_PREINSTALL = "PREINSTALL"
|
||||
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
|
||||
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
|
||||
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
|
||||
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
|
||||
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
|
||||
ENV_OLARES_VERSION = "OLARES_VERSION"
|
||||
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
|
||||
ENV_KUBE_TYPE = "KUBE_TYPE"
|
||||
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
|
||||
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
|
||||
ENV_STORAGE = "STORAGE"
|
||||
ENV_S3_BUCKET = "S3_BUCKET"
|
||||
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
|
||||
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
|
||||
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
|
||||
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
|
||||
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
|
||||
ENV_BACKUP_SECRET = "BACKUP_SECRET"
|
||||
ENV_CLUSTER_ID = "CLUSTER_ID"
|
||||
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
|
||||
ENV_TOKEN_MAX_AGE = "TOKEN_MAX_AGE"
|
||||
ENV_HOST_IP = "HOST_IP"
|
||||
ENV_PREINSTALL = "PREINSTALL"
|
||||
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
|
||||
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
|
||||
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
|
||||
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
|
||||
|
||||
ENV_CONTAINER = "container"
|
||||
ENV_CONTAINER_MODE = "CONTAINER_MODE" // running in docker container
|
||||
|
||||
@@ -58,7 +58,7 @@ func NewLocalRuntime(debug, ingoreErr bool) (LocalRuntime, error) {
|
||||
host.Address = ""
|
||||
host.InternalAddress = ""
|
||||
host.Port = 22
|
||||
host.User = u.Name
|
||||
host.User = u.Username
|
||||
host.Password = ""
|
||||
host.PrivateKeyPath = fmt.Sprintf("%s/.ssh/id_rsa", u.HomeDir)
|
||||
host.Arch = ""
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"github.com/beclab/Olares/cli/pkg/container"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/container"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
@@ -20,11 +21,6 @@ type InstallDriversModule struct {
|
||||
// 1. no card is found (which skips the driver installation)
|
||||
// 2. no driver is found (which skips the container toolkit installation)
|
||||
FailOnNoInstallation bool
|
||||
|
||||
// currently, this is only used to skip the nvidia-smi check after driver upgrade
|
||||
// because the nvidia-smi will not work after upgrade (Failed to initialize NVML: Driver/library version mismatch)
|
||||
// otherwise, always check the driver is running properly after installation to fail early and avoid other issues
|
||||
SkipNVMLCheckAfterInstall bool
|
||||
}
|
||||
|
||||
func (m *InstallDriversModule) IsSkip() bool {
|
||||
@@ -34,14 +30,14 @@ func (m *InstallDriversModule) IsSkip() bool {
|
||||
func (m *InstallDriversModule) Init() {
|
||||
m.Name = "InstallGPUDriver"
|
||||
|
||||
installCudaDeps := &task.RemoteTask{
|
||||
Name: "InstallCudaKeyRing",
|
||||
installCudaDriver := &task.RemoteTask{ // not for WSL
|
||||
Name: "InstallNvidiaDriver",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(CudaNotInstalled),
|
||||
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
|
||||
},
|
||||
Action: &InstallCudaDeps{
|
||||
Action: &InstallCudaDriver{
|
||||
ManifestAction: manifest.ManifestAction{
|
||||
Manifest: m.Manifest,
|
||||
BaseDir: m.BaseDir,
|
||||
@@ -51,20 +47,7 @@ func (m *InstallDriversModule) Init() {
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
installCudaDriver := &task.RemoteTask{ // not for WSL
|
||||
Name: "InstallNvidiaDriver",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(CudaNotInstalled),
|
||||
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
|
||||
},
|
||||
Action: &InstallCudaDriver{SkipNVMLCheckAfterInstall: m.SkipNVMLCheckAfterInstall},
|
||||
Parallel: false,
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
installCudaDeps,
|
||||
installCudaDriver,
|
||||
}
|
||||
}
|
||||
@@ -364,13 +347,20 @@ func (l *UninstallCudaModule) Init() {
|
||||
|
||||
}
|
||||
|
||||
type ExitIfNoDriverUpgradeNeededModule struct {
|
||||
type DisableNouveauModule struct {
|
||||
common.KubeModule
|
||||
}
|
||||
|
||||
func (l *ExitIfNoDriverUpgradeNeededModule) Init() {
|
||||
l.Tasks = append(l.Tasks, &task.LocalTask{
|
||||
Action: new(ExitIfNoDriverUpgradeNeeded),
|
||||
})
|
||||
func (m *DisableNouveauModule) Init() {
|
||||
m.Name = "DisableNouveau"
|
||||
|
||||
writeBlacklist := &task.LocalTask{
|
||||
Name: "WriteNouveauBlacklist",
|
||||
Action: new(WriteNouveauBlacklist),
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
writeBlacklist,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ package gpu
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
|
||||
"github.com/beclab/Olares/cli/pkg/clientset"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -32,35 +34,33 @@ func (p *GPUEnablePrepare) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
|
||||
type CudaInstalled struct {
|
||||
common.KubePrepare
|
||||
precheck.CudaCheckTask
|
||||
FailOnNoInstallation bool
|
||||
}
|
||||
|
||||
func (p *CudaInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
err := p.CudaCheckTask.Execute(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
if err == precheck.ErrCudaInstalled {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if st == nil || !st.Installed {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type CudaNotInstalled struct {
|
||||
common.KubePrepare
|
||||
precheck.CudaCheckTask
|
||||
}
|
||||
|
||||
func (p *CudaNotInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
err := p.CudaCheckTask.Execute(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
if err == precheck.ErrCudaInstalled {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
if st == nil || !st.Installed {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type K8sNodeInstalled struct {
|
||||
@@ -97,9 +97,6 @@ type NvidiaGraphicsCard struct {
|
||||
}
|
||||
|
||||
func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, err error) {
|
||||
if runtime.RemoteHost().GetOs() == common.Darwin {
|
||||
return false, nil
|
||||
}
|
||||
defer func() {
|
||||
if !p.ExitOnNotFound {
|
||||
return
|
||||
@@ -109,20 +106,15 @@ func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, er
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
output, err := runtime.GetRunner().SudoCmd(
|
||||
"lspci | grep -i -e vga -e 3d | grep -i nvidia", false, false)
|
||||
// an empty grep also results in the exit code to be 1
|
||||
// and thus a non-nil err
|
||||
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
logger.Debug("try to find nvidia graphics card error ", err)
|
||||
logger.Debug("ignore card driver installation")
|
||||
logger.Debugf("detect NVIDIA GPU error: %v", err)
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
logger.Info("found nvidia graphics card: ", output)
|
||||
}
|
||||
return output != "", nil
|
||||
logger.Infof("found NVIDIA GPU: %s", model)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type ContainerdInstalled struct {
|
||||
|
||||
@@ -10,11 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/apis/kubekey/v1alpha2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
|
||||
"github.com/beclab/Olares/cli/pkg/clientset"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
@@ -39,10 +36,7 @@ type CheckWslGPU struct {
|
||||
|
||||
func (t *CheckWslGPU) CheckNvidiaSmiFileExists() bool {
|
||||
var nvidiaSmiFile = "/usr/lib/wsl/lib/nvidia-smi"
|
||||
if !util.IsExist(nvidiaSmiFile) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return util.IsExist(nvidiaSmiFile)
|
||||
}
|
||||
|
||||
func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
|
||||
@@ -66,88 +60,41 @@ func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
|
||||
runtime.Arg.SetGPU(true)
|
||||
}
|
||||
|
||||
type InstallCudaDeps struct {
|
||||
type InstallCudaDriver struct {
|
||||
common.KubeAction
|
||||
manifest.ManifestAction
|
||||
}
|
||||
|
||||
func (t *InstallCudaDeps) Execute(runtime connector.Runtime) error {
|
||||
var systemInfo = runtime.GetSystemInfo()
|
||||
var cudaKeyringVersion string
|
||||
var osVersion string
|
||||
switch {
|
||||
case systemInfo.IsUbuntu():
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_0
|
||||
if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu24) || systemInfo.IsUbuntuVersionEqual(connector.Ubuntu25) {
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
|
||||
osVersion = "24.04"
|
||||
} else if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu22) {
|
||||
osVersion = "22.04"
|
||||
} else {
|
||||
osVersion = "20.04"
|
||||
}
|
||||
case systemInfo.IsDebian():
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
|
||||
if systemInfo.IsDebianVersionEqual(connector.Debian12) {
|
||||
osVersion = connector.Debian12.String()
|
||||
} else {
|
||||
osVersion = connector.Debian11.String()
|
||||
}
|
||||
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
|
||||
_, _ = runtime.GetRunner().SudoCmd("apt-get update", false, true)
|
||||
// install build deps for dkms
|
||||
if _, err := runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends dkms build-essential linux-headers-$(uname -r)", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install kernel build dependencies for nvidia runfile")
|
||||
}
|
||||
var fileId = fmt.Sprintf("%s-%s_cuda-keyring_%s-1",
|
||||
strings.ToLower(systemInfo.GetOsPlatformFamily()), osVersion, cudaKeyringVersion)
|
||||
|
||||
cudakeyring, err := t.Manifest.Get(fileId)
|
||||
// fetch runfile from manifest
|
||||
item, err := t.Manifest.Get("cuda-driver")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := cudakeyring.FilePath(t.BaseDir)
|
||||
var exists = util.IsExist(path)
|
||||
if !exists {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", cudakeyring.Filename, path)
|
||||
runfile := item.FilePath(t.BaseDir)
|
||||
if !util.IsExist(runfile) {
|
||||
return fmt.Errorf("failed to find %s binary in %s", item.Filename, runfile)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("dpkg -i --force all %s", path), false, true); err != nil {
|
||||
return err
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", runfile), false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to chmod +x runfile")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type InstallCudaDriver struct {
|
||||
common.KubeAction
|
||||
|
||||
SkipNVMLCheckAfterInstall bool
|
||||
}
|
||||
|
||||
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get update")
|
||||
}
|
||||
|
||||
if runtime.GetSystemInfo().IsDebian() {
|
||||
_, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-open", false, true)
|
||||
return errors.Wrap(err, "failed to apt-get install nvidia-open")
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-kernel-open-575", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-kernel-open-575")
|
||||
}
|
||||
|
||||
if t.SkipNVMLCheckAfterInstall {
|
||||
return nil
|
||||
// execute runfile with required flags
|
||||
cmd := fmt.Sprintf("sh %s -z --no-x-check --allow-installation-with-running-driver --no-check-for-alternate-installs --dkms --rebuild-initramfs -s", runfile)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install nvidia driver via runfile")
|
||||
}
|
||||
|
||||
// now that the nvidia driver is installed,
|
||||
// the nvidia-smi should work correctly,
|
||||
// if not, a manual reboot is needed by the user
|
||||
_, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check nvidia driver status by executing nvidia-smi: %v", err)
|
||||
}
|
||||
|
||||
if !installed {
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil || st == nil || !st.Installed || st.Mismatch {
|
||||
logger.Error("ERROR: nvidia driver has been installed, but is not running properly, please reboot the machine and try again")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -170,7 +117,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
keyPath := gpgkey.FilePath(t.BaseDir)
|
||||
|
||||
if !util.IsExist(keyPath) {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", gpgkey.Filename, keyPath)
|
||||
return fmt.Errorf("failed to find %s binary in %s", gpgkey.Filename, keyPath)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 /usr/share/keyrings", false, true); err != nil {
|
||||
@@ -190,7 +137,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
libPath := libnvidia.FilePath(t.BaseDir)
|
||||
|
||||
if !util.IsExist(libPath) {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", libnvidia.Filename, libPath)
|
||||
return fmt.Errorf("failed to find %s binary in %s", libnvidia.Filename, libPath)
|
||||
}
|
||||
|
||||
// remove any conflicting libnvidia-container.list
|
||||
@@ -209,19 +156,30 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
return err
|
||||
}
|
||||
|
||||
mirrorRepo := os.Getenv(common.ENV_NVIDIA_CONTAINER_REPO_MIRROR)
|
||||
if mirrorRepo == "" {
|
||||
// decide mirror based on OLARES_SYSTEM_CDN_SERVICE
|
||||
var mirrorHost string
|
||||
cdnService := os.Getenv(common.ENV_OLARES_CDN_SERVICE)
|
||||
if cdnService != "" {
|
||||
cdnRaw := cdnService
|
||||
if !strings.HasPrefix(cdnRaw, "http") {
|
||||
cdnRaw = "https://" + cdnRaw
|
||||
}
|
||||
if cdnURL, err := url.Parse(cdnRaw); err == nil {
|
||||
host := cdnURL.Host
|
||||
if host == "" {
|
||||
host = cdnService
|
||||
}
|
||||
if strings.HasSuffix(host, "olares.cn") {
|
||||
mirrorHost = "mirrors.ustc.edu.cn"
|
||||
}
|
||||
} else if strings.HasSuffix(cdnService, "olares.cn") {
|
||||
mirrorHost = "mirrors.ustc.edu.cn"
|
||||
}
|
||||
}
|
||||
if mirrorHost == "" {
|
||||
return nil
|
||||
}
|
||||
mirrorRepoRawURL := mirrorRepo
|
||||
if !strings.HasPrefix(mirrorRepoRawURL, "http") {
|
||||
mirrorRepoRawURL = "https://" + mirrorRepoRawURL
|
||||
}
|
||||
mirrorRepoURL, err := url.Parse(mirrorRepoRawURL)
|
||||
if err != nil || mirrorRepoURL.Host == "" {
|
||||
return fmt.Errorf("invalid mirror for nvidia container: %s", mirrorRepo)
|
||||
}
|
||||
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorRepoURL.Host, dstPath)
|
||||
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorHost, dstPath)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to switch nvidia container repo to mirror site")
|
||||
}
|
||||
@@ -233,9 +191,21 @@ type InstallNvidiaContainerToolkit struct {
|
||||
}
|
||||
|
||||
func (t *InstallNvidiaContainerToolkit) Execute(runtime connector.Runtime) error {
|
||||
containerdDropInDir := "/etc/containerd/config.d"
|
||||
containerdConfigFile := "/etc/containerd/config.toml"
|
||||
if util.IsExist(containerdDropInDir) {
|
||||
if err := os.RemoveAll(containerdDropInDir); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove containerd drop-in directory")
|
||||
}
|
||||
}
|
||||
if util.IsExist(containerdConfigFile) {
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("sed -i '/^import/d' %s", containerdConfigFile), false, false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove import section from containerd config file")
|
||||
}
|
||||
}
|
||||
logger.Debugf("install nvidia-container-toolkit")
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y nvidia-container-toolkit jq", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-container-toolkit")
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y --allow-downgrades nvidia-container-toolkit=1.17.9-1 nvidia-container-toolkit-base=1.17.9-1 jq", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to apt-get install nvidia-container-toolkit")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -382,7 +352,7 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
|
||||
|
||||
lines := strings.Split(res, "\n")
|
||||
|
||||
if lines == nil || len(lines) == 0 {
|
||||
if len(lines) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, line := range lines {
|
||||
@@ -403,7 +373,6 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
|
||||
|
||||
type UpdateNodeLabels struct {
|
||||
common.KubeAction
|
||||
precheck.CudaCheckTask
|
||||
}
|
||||
|
||||
func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
|
||||
@@ -412,32 +381,26 @@ func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
|
||||
return errors.Wrap(errors.WithStack(err), "kubeclient create error")
|
||||
}
|
||||
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !installed {
|
||||
logger.Info("nvidia-smi not exists")
|
||||
if st == nil || !st.Installed {
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
supported := "false"
|
||||
|
||||
err = u.CudaCheckTask.Execute(runtime)
|
||||
switch {
|
||||
case err == precheck.ErrCudaInstalled:
|
||||
if st.Installed {
|
||||
supported = "true"
|
||||
case err == precheck.ErrUnsupportedCudaVersion:
|
||||
// bypass
|
||||
case err != nil:
|
||||
return err
|
||||
case err == nil:
|
||||
// impossible
|
||||
logger.Warn("check impossible")
|
||||
}
|
||||
|
||||
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &gpuInfo.DriverVersion, &gpuInfo.CudaVersion, &supported)
|
||||
driverVersion := st.DriverVersion
|
||||
if st.Mismatch && st.LibraryVersion != "" {
|
||||
driverVersion = st.LibraryVersion
|
||||
}
|
||||
|
||||
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &driverVersion, &st.CudaVersion, &supported)
|
||||
}
|
||||
|
||||
type RemoveNodeLabels struct {
|
||||
@@ -586,16 +549,44 @@ type UninstallNvidiaDrivers struct {
|
||||
}
|
||||
|
||||
func (t *UninstallNvidiaDrivers) Execute(runtime connector.Runtime) error {
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove nvidia*", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove nvidia*")
|
||||
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
|
||||
_, _ = runtime.GetRunner().SudoCmd("dpkg --configure -a || true", false, true)
|
||||
listCmd := "dpkg -l | awk '/^(ii|i[UuFHWt]|rc|..R)/ {print $2}' | grep nvidia | grep -v container"
|
||||
pkgs, _ := runtime.GetRunner().SudoCmd(listCmd, false, false)
|
||||
pkgs = strings.ReplaceAll(pkgs, "\n", " ")
|
||||
pkgs = strings.TrimSpace(pkgs)
|
||||
if pkgs != "" {
|
||||
removeCmd := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get -y --auto-remove --purge remove %s", pkgs)
|
||||
if _, err := runtime.GetRunner().SudoCmd(removeCmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to remove nvidia packages via apt-get")
|
||||
}
|
||||
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove libnvidia*", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove libnvidia*")
|
||||
// also try to uninstall runfile-installed drivers if present
|
||||
if out, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-uninstall && echo yes || true", false, false); strings.TrimSpace(out) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-uninstall -s", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-uninstall")
|
||||
}
|
||||
} else if out2, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-installer && echo yes || true", false, false); strings.TrimSpace(out2) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-installer --uninstall -s", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-installer --uninstall")
|
||||
}
|
||||
}
|
||||
|
||||
// clean up any leftover dkms-installed kernel modules for nvidia if present
|
||||
// only remove .ko files under updates/dkms to avoid removing other modules
|
||||
checkLeftoverCmd := "sh -c 'test -d /lib/modules/$(uname -r)/updates/dkms && find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name \"nvidia*.ko\" -print -quit | grep -q . && echo yes || true'"
|
||||
if out, _ := runtime.GetRunner().SudoCmd(checkLeftoverCmd, false, false); strings.TrimSpace(out) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name 'nvidia*.ko' -print -delete", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove leftover nvidia dkms kernel modules")
|
||||
}
|
||||
// refresh module dependency maps
|
||||
if _, err := runtime.GetRunner().SudoCmd("depmod -a $(uname -r)", false, true); err != nil {
|
||||
logger.Error("Failed to refresh module dependency maps: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("uninstall nvidia drivers success, please reboot the system to take effect if you reinstall the new nvidia drivers")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -604,19 +595,43 @@ type PrintGpuStatus struct {
|
||||
}
|
||||
|
||||
func (t *PrintGpuStatus) Execute(runtime connector.Runtime) error {
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !installed {
|
||||
logger.Info("cuda not exists")
|
||||
if st == nil {
|
||||
logger.Info("no NVIDIA GPU status available")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Infof("GPU Driver Version: %s", gpuInfo.DriverVersion)
|
||||
logger.Infof("CUDA Version: %s", gpuInfo.CudaVersion)
|
||||
|
||||
// basic status
|
||||
logger.Infof("Installed: %t", st.Installed)
|
||||
if st.Installed {
|
||||
logger.Infof("Install method: %s", st.InstallMethod)
|
||||
}
|
||||
logger.Infof("Running: %t", st.Running)
|
||||
// running (kernel) driver version
|
||||
if st.Running && strings.TrimSpace(st.DriverVersion) != "" {
|
||||
logger.Infof("Running driver version (kernel): %s", st.DriverVersion)
|
||||
}
|
||||
// userland info from nvidia-smi (when available)
|
||||
if st.Installed {
|
||||
if st.Info != nil && strings.TrimSpace(st.Info.DriverVersion) != "" {
|
||||
logger.Infof("Installed driver version (nvidia-smi): %s", st.Info.DriverVersion)
|
||||
}
|
||||
if strings.TrimSpace(st.CudaVersion) != "" {
|
||||
logger.Infof("CUDA version (nvidia-smi): %s", st.CudaVersion)
|
||||
}
|
||||
if st.Mismatch {
|
||||
if strings.TrimSpace(st.LibraryVersion) != "" {
|
||||
logger.Warnf("Driver/library version mismatch, NVML library version: %s", st.LibraryVersion)
|
||||
} else {
|
||||
logger.Warn("Driver/library version mismatch detected")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !st.Installed && !st.Running {
|
||||
logger.Info("no NVIDIA driver detected (neither installed nor running)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -688,31 +703,35 @@ func (t *RestartPlugin) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExitIfNoDriverUpgradeNeeded struct {
|
||||
type WriteNouveauBlacklist struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *ExitIfNoDriverUpgradeNeeded) Execute(runtime connector.Runtime) error {
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
if err != nil {
|
||||
logger.Warn("error checking whether the GPU need upgrade:")
|
||||
logger.Warn(err.Error())
|
||||
logger.Warn("assuming an upgrade is needed and continue upgrading")
|
||||
func (t *WriteNouveauBlacklist) Execute(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
if !installed {
|
||||
logger.Info("GPU driver not installed, will just install it")
|
||||
return nil
|
||||
const dir = "/usr/lib/modprobe.d"
|
||||
const dst = "/usr/lib/modprobe.d/olares-disable-nouveau.conf"
|
||||
const content = "blacklist nouveau\nblacklist lbm-nouveau\nalias nouveau off\nalias lbm-nouveau off\n"
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 "+dir, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to ensure /usr/lib/modprobe.d exists")
|
||||
}
|
||||
installedVersion, err := semver.NewVersion(gpuInfo.CudaVersion)
|
||||
if err != nil {
|
||||
logger.Warn("error parsing the current CUDA version of GPU driver \"%s\": %v", gpuInfo.CudaVersion, err)
|
||||
logger.Warn("assuming an upgrade is needed and continue installing")
|
||||
return nil
|
||||
|
||||
tmpPath := path.Join(runtime.GetBaseDir(), cc.PackageCacheDir, "gpu", "olares-disable-nouveau.conf")
|
||||
if err := os.MkdirAll(path.Dir(tmpPath), 0755); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to create temp dir for nouveau blacklist")
|
||||
}
|
||||
targetVersion, _ := semver.NewVersion(common.CurrentVerifiedCudaVersion)
|
||||
if !targetVersion.GreaterThan(installedVersion) {
|
||||
logger.Info("current GPU driver version is up to date, no need to upgrade")
|
||||
if err := util.WriteFile(tmpPath, []byte(content), 0644); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to write temp nouveau blacklist file")
|
||||
}
|
||||
if err := runtime.GetRunner().SudoScp(tmpPath, dst); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install nouveau blacklist file")
|
||||
}
|
||||
|
||||
if out, _ := runtime.GetRunner().SudoCmd("test -d /sys/module/nouveau && echo loaded || true", false, false); strings.TrimSpace(out) == "loaded" {
|
||||
logger.Infof("the disable file for nouveau kernel module has been written, but the nouveau kernel module is currently loaded. Please REBOOT your machine to make the disabling effective.")
|
||||
os.Exit(0)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -202,12 +202,17 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
|
||||
"runtime-request-timeout": "5m",
|
||||
"image-gc-high-threshold": "91",
|
||||
"image-gc-low-threshold": "90",
|
||||
"housekeeping_interval": "5s",
|
||||
}
|
||||
defaultKubeProxyArgs := map[string]string{
|
||||
"proxy-mode": "ipvs",
|
||||
}
|
||||
|
||||
kubeApiserverArgs, _ := util.GetArgs(map[string]string{}, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
defaultKubeApiServerArgs := map[string]string{
|
||||
"service-node-port-range": "445-32767",
|
||||
}
|
||||
|
||||
kubeApiserverArgs, _ := util.GetArgs(defaultKubeApiServerArgs, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
kubeControllerManager, _ := util.GetArgs(map[string]string{
|
||||
"terminated-pod-gc-threshold": "1",
|
||||
}, g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
|
||||
|
||||
@@ -162,17 +162,19 @@ var (
|
||||
}
|
||||
|
||||
ApiServerArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"service-node-port-range": "445-32767",
|
||||
}
|
||||
ApiServerSecurityArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"service-node-port-range": "445-32767",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
// --enable-admission-plugins=EventRateLimit must have a configuration file
|
||||
"enable-admission-plugins": "AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity",
|
||||
// "audit-log-path": "/var/log/apiserver/audit.log", // need audit policy
|
||||
|
||||
@@ -44,14 +44,14 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/ks-config/templates": &vfsgen۰DirInfo{
|
||||
name: "templates",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 949729535, time.UTC),
|
||||
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269518280, time.UTC),
|
||||
},
|
||||
"/build/ks-config/templates/kubesphere-config.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kubesphere-config.yaml",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 949762785, time.UTC),
|
||||
uncompressedSize: 418,
|
||||
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269634566, time.UTC),
|
||||
uncompressedSize: 419,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x84\x90\xb1\x6a\xc3\x30\x10\x86\x77\x3d\xc5\xbd\x80\x1d\x87\x94\x82\x6f\xed\xd0\x29\xd0\xa5\xdd\x2f\xf6\xd9\x3e\x22\x9d\x84\x74\x36\x04\xfa\xf0\xa5\xc6\xa9\xd3\xa1\x74\x14\xdf\xaf\xff\xfb\x39\x4a\xf2\xc1\xb9\x48\x54\x84\xe5\xe8\xae\xa2\x3d\xc2\x4b\xd4\x41\xc6\x33\x25\x17\xd8\xa8\x27\x23\x74\x00\x4a\x81\x11\xae\xf3\x85\x4b\x9a\x38\x73\xd5\xad\xb1\x8d\x94\x44\xdd\x6f\x5c\x6e\xc5\x38\xb8\xfb\xf7\x9d\xd4\x37\x0a\x1e\xe1\xd3\x01\x00\x84\xa8\x62\x31\x8b\x8e\xb8\xbe\x01\x58\xfb\x14\x45\x0d\x61\x32\x4b\x78\x38\xa4\x1c\x03\xdb\xc4\x73\xa9\x62\xe2\x4c\xc6\x7d\xfd\xe0\xd9\x1b\x36\x65\x5d\x96\x0e\xdb\xa6\x6d\x7e\x0a\xe9\xe2\xf9\xf5\xed\xfd\xbc\xbb\x60\x20\x5f\x78\x0d\x68\x34\x19\xa4\x23\xfb\x3e\xc2\x5f\x1b\x1e\x43\x55\x20\xa5\x91\x73\x55\x96\xee\xff\x21\xc7\xb6\x69\x4f\x6b\xad\x71\x0e\xa2\xe4\xef\x12\x09\x34\x32\x02\xf9\x24\xca\x78\xaa\x8f\x4f\x1b\x30\x09\x1c\x67\x43\x78\x6e\x1a\xf7\x15\x00\x00\xff\xff\xa8\x81\xab\xba\xa2\x01\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x84\x90\xc1\x6a\x83\x40\x10\x86\xef\xfb\x14\xf3\x02\x1a\x6d\x0a\xc5\xb9\xf6\xd0\x53\xa0\x97\xf6\x3e\xd1\x51\x87\xb8\xb3\xcb\xee\x28\x04\xfa\xf0\xa5\x62\x6a\x7a\x28\x39\x2e\xdf\xbf\xff\xf7\x33\x14\xe5\x93\x53\x96\xa0\x08\x4b\xed\x2e\xa2\x1d\xc2\x6b\xd0\x5e\x86\x13\x45\xe7\xd9\xa8\x23\x23\x74\x00\x4a\x9e\x11\x2e\xf3\x99\x73\x1c\x39\x71\xd1\xae\xb1\x8d\xe4\x48\xed\x5f\x9c\xaf\xd9\xd8\xbb\xdb\xf7\x9d\x94\x57\xf2\x13\xc2\x97\x03\x00\xf0\x41\xc5\x42\x12\x1d\x70\x7d\x03\xb0\x76\x31\x88\x1a\xc2\x68\x16\xf1\x70\x88\x29\x78\xb6\x91\xe7\x5c\x84\xc8\x89\x8c\xbb\xf2\xce\xb3\x37\x6c\xca\x32\x2f\x2d\x36\x55\x53\xfd\x16\xd2\x79\xe2\xb7\xf7\x8f\xd3\xee\x82\x9e\xa6\xcc\x6b\x40\x83\x49\x2f\x2d\xd9\xcf\x11\xfe\xdb\x70\x1f\x2a\x3c\x29\x0d\x9c\x8a\xbc\xb4\x8f\x87\xd4\x4d\xd5\x1c\xd7\x5a\xe3\xe4\x45\x69\xba\x49\xc4\xd3\xc0\x08\x34\x45\x51\xc6\x63\x59\x3f\x6f\xc0\xc4\x73\x98\x0d\xe1\xe5\xa9\xaa\xdc\x77\x00\x00\x00\xff\xff\x8a\xb9\xa0\x58\xa3\x01\x00\x00"),
|
||||
},
|
||||
"/build/ks-config/values.yaml": &vfsgen۰FileInfo{
|
||||
name: "values.yaml",
|
||||
@@ -78,7 +78,7 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/ks-core/templates": &vfsgen۰DirInfo{
|
||||
name: "templates",
|
||||
modTime: time.Date(2025, 9, 4, 11, 58, 15, 814111089, time.UTC),
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 165722497, time.UTC),
|
||||
},
|
||||
"/build/ks-core/templates/NOTES.txt": &vfsgen۰FileInfo{
|
||||
name: "NOTES.txt",
|
||||
@@ -94,10 +94,10 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/ks-core/templates/ks-apiserver.yml": &vfsgen۰CompressedFileInfo{
|
||||
name: "ks-apiserver.yml",
|
||||
modTime: time.Date(2025, 9, 4, 11, 58, 15, 814831582, time.UTC),
|
||||
uncompressedSize: 3060,
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 165886081, time.UTC),
|
||||
uncompressedSize: 3111,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xbc\x56\x4d\x6f\xe3\x36\x13\xbe\xeb\x57\x0c\xf2\x9e\x25\x3b\xc1\xbe\x45\x96\x40\x0f\x41\xb2\x68\x83\x36\xad\x11\xa7\x01\x7a\xa4\xc9\xb1\x44\x98\x22\x59\x72\xe4\x46\x70\xf7\xbf\x17\xb4\x6c\x99\xb2\x94\xd4\xc5\x02\xd5\x49\x98\xe1\x7c\x3d\x33\xf3\x90\xdc\xa9\x57\xf4\x41\x59\xc3\x80\x3b\x17\x66\xdb\xeb\x6c\xa3\x8c\x64\xf0\x80\x4e\xdb\xb6\x46\x43\x59\x8d\xc4\x25\x27\xce\x32\x00\xcd\x57\xa8\x43\xfc\x83\x68\xc0\x60\x13\x72\xee\x54\x40\xbf\x45\xbf\x97\x92\x42\xcf\x60\xc5\xc5\x06\x8d\xdc\x4b\xb6\xc7\x08\xbb\x1d\x14\xf7\x15\xf7\x54\xdc\x39\x77\x88\x0b\x5f\xbf\x66\x00\x86\xd7\x78\xe6\x2b\x38\x14\x31\x4e\x20\xcf\x09\xcb\xb6\x8b\xe9\xad\xd6\xca\x94\xbf\x39\xc9\x09\x3b\x11\x40\xcd\xdf\x96\x8d\x2f\x91\xc1\xbc\x4b\xa1\x75\xc8\xe0\x39\x3d\x9a\x01\x78\x74\x5a\x09\x1e\xba\x3c\x9e\x51\x23\x0f\x58\x3c\x77\xd2\x7b\xdb\x18\xea\x72\x09\xa8\x51\x90\xf5\x9d\xf7\x9a\x93\xa8\x7e\x4e\xaa\x7e\xaf\xee\xa9\xca\x01\xfe\x77\x49\xf5\x84\xb5\xd3\x7d\x3d\x29\xdc\xf1\xd3\x83\xe0\xef\x87\x9f\x4e\xe0\xc2\x14\x00\x8e\x78\xc7\x6f\xb7\xcb\xe1\x4f\x45\x15\x14\xaf\x5c\x37\x18\x0a\x55\xf3\x12\x17\x8d\xd6\x4b\x14\x1e\x29\x1c\x8d\x00\xce\x35\xa7\x3c\xa3\x13\xb2\xbf\xf3\x5a\x43\x01\x7f\x81\x51\x46\xa2\x21\xb8\x3d\xd9\xc6\x13\x68\xe4\x49\x20\xac\x21\xae\x0c\xfa\xde\x4d\x0e\xc2\xd6\x35\x37\xf2\xe4\x37\x9f\x2e\x3e\x87\x3c\xd7\xb6\x24\x1b\x48\xa2\xf7\xdf\x93\x6f\xb0\x57\xee\xb3\x64\xb0\x42\xa1\xf9\x6a\x96\xda\xb3\x79\x31\x2f\x6e\x3e\x0d\x4f\xc6\x7a\x16\x56\x2b\xd1\x76\x98\xa5\x28\x14\xae\x57\x9e\x12\x9f\x9c\xe0\xa3\xca\x59\x9f\xe2\x92\x9f\xca\x5c\x58\x4f\x0c\x3e\xcf\x3f\xcf\x7b\x2d\x80\xf3\x96\xac\xb0\x9a\xc1\xcb\xfd\xa2\x97\x7b\x0c\xb6\xf1\x02\x13\x47\x43\x88\x0f\x39\xf6\xe1\x8b\xde\x22\x01\xff\xfa\x26\xcd\x79\x6b\x75\x53\xe3\x53\x1c\xfd\x41\x7e\x75\x94\x2c\x38\x55\x0c\x66\x48\x62\xb6\x69\x56\x18\x5c\x85\x1e\x67\x49\xf0\x43\xc5\xbd\x2e\x17\xd6\xac\x55\xf9\x81\x1f\x6d\x05\xd7\xa4\x6a\x1c\x79\xa9\x6c\xa0\xfc\x4c\xe3\x91\xcb\x5f\x8d\x6e\x19\x0c\x5a\x19\x6b\x56\xeb\x89\x7a\xf1\x8d\x3c\x7f\x4d\x4a\x4a\x4b\xfd\x07\xac\xc6\xb6\x93\x03\x3b\x31\xb2\x00\x5a\x6d\xd1\x60\x08\x0b\x6f\x57\x98\x76\x67\xcd\x95\x6e\x3c\xbe\x54\x1e\x43\x65\xb5\x64\x70\x9b\x68\x2b\x22\xf7\x03\x52\x6a\x00\xe0\x3a\xb0\x36\x31\xb3\xd9\x61\x6d\x87\x07\xa6\x26\x06\x20\x88\x0a\x23\x8e\x3f\xbe\xbc\x2c\x12\x85\x32\x8a\x14\xd7\x0f\xa8\x79\xbb\x44\x61\x8d\x0c\x0c\xae\xff\x9f\x9c\x88\x98\xdb\x86\xc6\xca\x08\x8c\x12\x78\x27\x44\xc4\xe3\x97\x7d\x97\x76\x3b\x50\x46\xe8\x46\x22\x5c\x6d\x42\x2e\xac\xc7\x62\x7c\xee\x0a\x8a\xe1\x8a\x0f\x98\x84\xac\x46\xcf\x49\x59\x93\xf4\x27\x11\x7e\x13\x7f\x8c\xa2\x19\x2b\x71\x79\xe0\xf3\xd3\xb1\x54\xfa\x4d\xf1\xf8\x7a\x1d\x21\x6e\x59\xb2\xeb\xf2\xce\x90\xba\x1b\x29\xe2\x3c\xff\xd1\x28\x8f\xf2\xa1\xf1\xca\x94\x4b\x51\xa1\x6c\xe2\x05\xf5\x58\x1a\xdb\x8b\xbf\xbc\xa1\x68\x22\x10\xa9\x65\xcc\xcb\x59\x6d\xcb\xf6\x27\x6c\xbb\x95\xf3\x06\x29\x32\x92\x9d\xc5\xd5\x89\x4b\x34\x98\x87\xfd\x9d\x31\x2e\xf1\x78\x5d\x92\xa8\xbe\xbc\x39\x8f\x21\x0c\x21\xef\x09\x36\x86\xe1\xce\x9d\x29\x00\xac\x8b\x7d\xb2\x9e\xc1\xa3\x19\x29\xb7\x7b\xcc\xd9\x48\xfe\x0e\x61\x1f\xb7\x3f\x38\x2e\xce\xcd\xf2\x94\x57\x42\x1b\x08\xeb\x6c\xb2\xb9\x47\xa0\x07\x54\x9c\xc8\x59\xb6\xdb\xa5\x5d\x3d\xf2\xe0\x3c\x1a\x8c\xfa\xd9\x11\xe2\xe0\xf2\x89\x94\xf6\xc4\x5d\x9a\x9e\xc4\x35\x6f\x34\x3d\x59\x89\x0c\x3e\xdd\xcc\xff\x05\x27\x7e\xac\xcf\xf7\x3c\xb8\x27\xcc\x6c\xc4\x09\xef\x11\x68\xf7\xd8\xb9\xba\xca\x3e\x66\xd4\x4b\x68\x33\x0c\x99\xee\x22\xba\x4c\x99\xf2\xbb\xc9\x55\xc9\xf3\x3c\xcb\xd2\x87\x66\xff\xc6\x5c\x76\xdc\x31\x78\x60\x72\x63\x2c\xa5\x54\x30\x9c\x76\xe1\x91\x13\xca\x7c\xd5\xa6\x30\x46\xcd\xd9\x8c\xfd\xf7\x0f\xd5\xfe\x9a\xcf\x0f\x2c\x7d\xdb\x4d\xc6\xf8\x3e\x27\xee\x4b\xa4\xc1\xe5\x3f\x7c\x74\x5e\x9a\xf0\x65\xaf\xcb\xfd\x80\xdc\xeb\x26\x10\xfa\xc7\x45\xf6\x77\x00\x00\x00\xff\xff\x30\xa7\xa6\x88\xf4\x0b\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xbc\x56\xd1\x6e\xeb\x36\x0f\xbe\xf7\x53\x10\xf9\xaf\xed\xa4\xc5\xf9\x87\x1e\x03\xbb\x28\xd2\x83\xad\xd8\xba\x05\x4d\x57\x60\x97\x8c\xcc\xc4\x42\x64\x49\x93\xe8\xac\x46\x76\xde\x7d\x50\x9c\x38\x72\xec\x76\x19\x0e\x30\x5f\x19\xa4\x28\x92\x1f\xc9\x8f\x42\x2b\x5f\xc9\x79\x69\x74\x0e\x68\xad\x9f\xee\x6e\x92\xad\xd4\x45\x0e\x0f\x64\x95\x69\x2a\xd2\x9c\x54\xc4\x58\x20\x63\x9e\x00\x28\x5c\x91\xf2\xe1\x0f\x82\x41\x0e\x5b\x9f\xa2\x95\x9e\xdc\x8e\xdc\x41\xca\x92\x5c\x0e\x2b\x14\x5b\xd2\xc5\x41\xb2\x3b\x79\xd8\xef\x21\x9b\x97\xe8\x38\xbb\xb7\xf6\xe8\x17\xbe\x7e\x4d\x00\x34\x56\x74\x71\x97\xb7\x24\x82\x1f\xcf\x0e\x99\x36\x4d\xeb\xd3\x19\xa5\xa4\xde\xfc\x66\x0b\x64\x6a\x45\x00\x15\xbe\x2d\x6b\xb7\xa1\x1c\x66\x6d\x08\x8d\xa5\x1c\x9e\xe3\xa3\x09\x80\x23\xab\xa4\x40\xdf\xc6\xf1\x4c\x8a\xd0\x53\xf6\xdc\x4a\xe7\xa6\xd6\xdc\xc6\xe2\x49\x91\x60\xe3\xda\xdb\x2b\x64\x51\xfe\x1c\x65\xfd\x5e\xde\x63\x99\x03\xfc\xef\x9a\xec\x99\x2a\xab\xba\x7c\x62\xb8\xc3\xa7\x7a\xce\xdf\x77\x3f\x1e\xc0\x95\x21\x00\x9c\xf0\x0e\xdf\x7e\x9f\xc2\x9f\x92\x4b\xc8\x5e\x51\xd5\xe4\x33\x59\xe1\x86\x16\xb5\x52\x4b\x12\x8e\xd8\x9f\x8c\x00\x2e\x35\xe7\x38\xc3\x25\x6c\x7e\xc7\x4a\x41\x06\x7f\x81\x96\xba\x20\xcd\x70\x77\xb6\x0d\x27\x48\x17\x67\x81\x75\xd2\x38\xc9\xcd\x5c\xa1\xf7\xbf\x1c\x7a\x62\xe2\x1b\xcf\x54\xa5\x42\xd5\x9e\xc9\xa5\xc2\x49\x96\x02\xd5\xe4\x68\x22\x8c\x66\x94\x9a\x5c\xe7\x39\x05\x61\xaa\x0a\x75\x71\x0e\x25\x1d\xc7\x2b\x85\x34\x55\x66\xc3\xc6\x73\x41\xce\x7d\xcf\xae\xa6\x4e\x79\x48\x2c\x87\x15\x09\x85\xab\x69\x6c\x9f\xcf\xb2\x59\x76\xfb\xa9\x7f\x32\x40\xb0\x30\x4a\x8a\xa6\x85\x39\x06\x2e\xb3\x9d\xf2\x9c\xeb\x68\xd3\x9f\x54\xd6\xb8\x18\xca\xf4\x9c\xe6\xc2\x38\xce\xe1\xf3\xec\xf3\xac\xd3\x06\xdc\x0c\x1b\x61\x54\x0e\x2f\xf3\x45\x27\x77\xe4\x4d\xed\x04\x45\x17\xf5\xab\x72\x8c\xb1\x73\x9f\x75\x16\x51\xbd\x6e\x6e\xe3\x98\x77\x46\xd5\x15\x3d\x85\x69\xe9\xc5\x57\x05\xc9\x02\xb9\xcc\x61\x4a\x2c\xa6\xdb\x7a\x45\xde\x96\xe4\x68\x1a\x39\x3f\x66\xdc\xe9\x52\x61\xf4\x5a\x6e\x3e\xb8\x47\x19\x81\x8a\x65\x45\x83\x5b\x4a\xe3\x39\xbd\xd0\x38\xc2\xe2\x57\xad\x9a\x1c\x7a\xa5\x0c\x39\xcb\xf5\x48\xbe\xf4\xc6\x0e\x5f\xa3\x94\xe2\x54\xff\x01\xab\xa1\xed\x68\x8f\x8f\x74\x39\x80\x92\x3b\xd2\xe4\xfd\xc2\x99\x15\xc5\xd5\x59\xa3\x54\xb5\xa3\x97\xd2\x91\x2f\x8d\x2a\x72\xb8\x8b\xb4\x25\xb3\xfd\x81\x38\x36\x00\xb0\x2d\x58\xdb\x10\xd9\xf4\x38\xe9\xfd\x03\x63\x1d\x03\xe0\x45\x49\x01\xc7\x1f\x5f\x5e\x16\x91\x42\x6a\xc9\x12\xd5\x03\x29\x6c\x96\x24\x8c\x2e\x7c\x0e\x37\xff\x8f\x4e\x04\xcc\x4d\xcd\x43\x65\x00\x46\x0a\xba\x17\x22\xe0\xd1\x8e\xef\x7e\x0f\x52\x0b\x55\x17\x04\x93\xad\x4f\x85\x71\x94\x0d\xcf\x4d\x20\xeb\xb3\x42\x8f\x7c\xd8\x28\x72\xc8\xd2\xe8\xa8\x3e\x91\xf0\x9b\x28\x67\xe0\x4d\x9b\x82\x96\xc7\x15\x70\x3e\x16\x4b\xbf\xc9\x1f\xae\xd7\x01\xe2\x26\x8f\x66\xbd\xb8\xd7\x2c\xef\x07\x8a\xd0\xcf\x7f\xd4\xd2\x51\xf1\x50\x3b\xa9\x37\x4b\x51\x52\x51\x87\x9d\xf6\xb8\xd1\xa6\x13\x7f\x79\x23\x51\x07\x20\x62\xcb\x10\x97\x35\xca\x6c\x9a\x9f\xa8\x69\x47\xce\x69\xe2\xc0\x48\x66\x1a\x46\x27\x0c\x51\xaf\x1f\x0e\x6b\x66\x98\xe2\x69\xc3\xb2\x28\xbf\xbc\x59\x47\xde\xf7\x21\xef\x08\x36\xb8\x41\x6b\x2f\x14\x00\xc6\x86\x3a\x19\x97\xc3\xa3\x1e\x28\x77\x07\xcc\xf3\x81\xfc\x1d\xc2\x3e\x4d\xbf\xb7\x28\x2e\xcd\xd2\x98\x57\xda\xa5\x91\x8c\x16\xf7\x04\x74\x8f\x8a\x23\x79\x9e\xec\xf7\x71\x55\x4f\x3c\x38\x0b\x06\x83\x7a\xb6\x84\xd8\x5b\x3e\x81\xd2\x9e\xd0\xc6\xe1\x15\xb4\xc6\x5a\xf1\x93\x29\x28\x87\x4f\xb7\xb3\x7f\xc1\x89\x1f\xeb\xd3\x03\x0f\x1e\x08\x33\x19\x70\xc2\x7b\x04\xda\xbe\x8f\x26\x93\xe4\x63\x46\xbd\x86\x36\x7d\x9f\xe9\xae\xa2\xcb\x98\x29\xbf\x1b\x1d\x95\x34\x4d\x93\x24\x7e\x9b\x76\xcf\xd2\x65\xcb\x1d\xbd\x37\x29\x6a\x6d\x38\xa6\x82\x7e\xb7\x0b\x47\xc8\x54\xa4\xab\x26\x86\x31\x68\x2e\x7a\xec\xbf\x7f\xdb\x76\x6b\x3e\x3d\xb2\xf4\x5d\xdb\x19\xc3\x7d\xce\xe8\x36\xc4\xbd\xe5\xdf\x7f\xa7\x5e\x1b\xf0\x75\x0f\xd2\x43\x83\xcc\xdb\x27\xd7\xe3\x22\xf9\x3b\x00\x00\xff\xff\x06\x39\x6d\x22\x27\x0c\x00\x00"),
|
||||
},
|
||||
"/build/ks-core/values.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "values.yaml",
|
||||
@@ -239,7 +239,7 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/kube-state-metrics": &vfsgen۰DirInfo{
|
||||
name: "kube-state-metrics",
|
||||
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643530930, time.UTC),
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 166070457, time.UTC),
|
||||
},
|
||||
"/build/prometheus/kube-state-metrics/kube-state-metrics-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kube-state-metrics-clusterRole.yaml",
|
||||
@@ -257,10 +257,10 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/kube-state-metrics/kube-state-metrics-deployment.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kube-state-metrics-deployment.yaml",
|
||||
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643763923, time.UTC),
|
||||
uncompressedSize: 4124,
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 166198083, time.UTC),
|
||||
uncompressedSize: 4175,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x5b\x6f\xdb\x46\x13\x7d\xd7\xaf\x58\xf8\xc3\x07\xc8\x75\x48\x5d\x12\xa7\x0e\x01\xa1\x70\x15\x23\x0e\x10\xbb\x82\x15\xb7\x0f\x45\x41\xac\x96\x23\x69\xeb\xbd\x75\x77\x28\x8b\x80\x7e\x7c\xb1\xbc\x8a\xb6\x24\x47\x46\x80\x3e\xb4\x7a\x12\x67\x66\xcf\xce\x9e\x99\x33\x4b\x52\xc3\x7f\x05\xeb\xb8\x56\x11\xa1\xc6\xb8\xde\x6a\xd0\x79\xe0\x2a\x89\xc8\x47\x30\x42\x67\x12\x14\x76\x24\x20\x4d\x28\xd2\xa8\x43\x88\xa0\x33\x10\xce\xff\x23\x7e\x41\xf8\x90\xce\xc0\x2a\x40\x70\x21\xd7\x3d\xa6\xa5\xd1\x0a\x14\x46\x04\xd6\x46\x5b\x04\xbb\x27\x52\x51\x09\x11\xf1\xb6\xc0\x21\x45\x08\x24\xa0\xe5\xcc\xed\x09\x37\xd4\x62\xa0\xe7\xe5\x0a\x63\xb5\x04\x5c\x42\xba\x2f\x7c\x55\x9d\x69\x18\xbe\x0d\xfb\x01\xac\x31\x1c\x90\xff\x11\xd4\x89\xee\x10\x72\x60\x6f\xef\x72\x86\xb2\xd2\xef\xcc\x12\x2c\x04\x52\x2b\x8e\xda\x72\xb5\x08\x5c\xe6\x10\x64\xc7\x19\x60\x9e\x04\x0b\x46\x70\x46\x5d\x44\x06\x1d\x42\x1c\x08\x60\xa8\x6d\x41\x8f\xa4\xc8\x96\x5f\xb6\xf8\x3a\x86\xb1\xa3\x39\x3b\x92\x35\x87\x96\x22\x2c\xb2\xa8\xa4\x85\xfc\x94\xa3\x58\x2d\x04\x57\x8b\x7b\x93\x50\x84\x2a\x6b\x49\xd7\xd3\xd4\x2e\x20\x22\xc3\xf3\xff\x37\xb6\x7b\x45\x57\x94\x0b\x3a\x13\x5b\x1e\xcc\x0c\x44\xe4\x6e\x1b\xa6\x43\x08\x82\x34\xa2\x46\xdc\xee\xa7\x3c\x71\xa5\x34\x52\xe4\x5a\xd5\x44\x91\x3c\x65\x86\xe2\xc9\x89\x12\x98\xd3\x54\x60\xc0\xb4\x42\xca\x15\xd8\x03\x7c\x88\x16\xf5\xc7\x91\xff\x0a\xfa\x8f\x6e\xdb\xd7\xb4\x2e\x21\x55\xe7\xf9\xdf\x4a\x8b\x54\x42\x7d\xc4\xa0\xec\xec\xa5\x76\x18\x20\x97\x50\x6f\xe3\x2d\x13\x8a\xcb\x86\x0c\x42\x8c\x7f\x26\x3d\x40\xd6\x13\x9a\x51\xd1\x5a\x50\x15\xf2\xe4\xa4\x34\xd5\x84\x6f\xed\x46\xed\x62\x8b\xde\x80\x04\x81\xdf\x67\x34\x18\xfe\x18\xf6\xc3\x7e\x38\x68\xb9\x3c\xbd\xa3\x8b\xfe\x45\xdb\x8a\x20\xc0\xd3\x98\x1d\x5a\xda\x04\x55\x20\xc3\x96\xbf\xa8\x43\x90\x80\xca\x04\x77\x38\xf2\x84\xc6\x46\x27\x71\x9d\x74\xec\x0b\x96\xba\x38\xfc\x01\xc1\x4a\xae\x28\x42\x12\x5b\xa0\x4e\xab\x37\x79\x74\x78\x16\x97\xc4\xd7\xcf\xcc\x82\x0f\x2b\x9e\x93\x7a\x1a\xc6\x5d\x5f\x81\xd8\xd0\xd4\x41\xb2\xc9\xff\x57\x5a\x8a\x4b\xf5\xa4\x79\xdb\xc7\xe1\xd9\x69\xb1\x18\x54\x62\x34\xf7\x4b\xb9\x9a\xeb\x0d\x4d\x12\x0b\xce\x35\xfe\x3f\xf5\xac\x74\xe9\x47\x05\xb6\x00\xed\x1a\x6a\xa9\x10\x20\xb8\x93\x1b\xca\x90\xaf\x7c\x16\x34\x11\x5c\x41\xec\x80\x69\x95\xb8\xd3\x4d\x79\xae\x6e\x11\xb0\x09\xcf\x62\x5f\xc6\xd3\x12\x98\x59\xad\x1a\xf0\x8a\x83\xb3\x62\x83\x7a\xfb\x7a\xe6\xc5\xdd\x32\xc4\x2c\xa9\x83\xd2\x6b\x3c\x2d\x0e\x41\x61\xd1\x6d\x25\x18\xa3\x86\x32\x8e\x59\x03\xf3\x34\x90\x09\xca\x65\xdc\xb5\xe0\x74\x6a\x99\xa7\x63\x43\x19\x6b\x1d\xdc\x01\xb3\x80\x71\xd7\xf7\x5a\x6d\xb2\x2b\x9e\xa7\x52\xe4\xd8\x64\x5d\x06\x70\xb5\xc8\xd9\x2b\xd2\xf0\x4d\xbc\x41\xe1\x4a\x67\x35\x8e\x3d\xe8\xb3\xe3\x16\xec\x56\xd9\xea\x24\xe1\xce\xa6\xc6\x8f\x9d\x59\x9a\x2c\x00\xeb\x26\x39\x6b\x81\xf9\x00\xdf\x48\xbe\xb8\x60\x6b\xaf\xd2\x89\x4f\x66\xae\x8b\xc7\xee\xd2\xd0\x4d\xb3\xfd\x66\xe7\xe2\x53\xdf\x57\x0b\x50\x60\x73\xc7\xae\x26\x2e\x86\x56\x40\x85\xd0\x8f\x79\x33\xd7\xd5\x71\xa3\xdf\x9b\x2b\xc9\x4f\x8a\x47\x6d\x1f\x72\xcf\x9b\x59\x86\x80\x96\x26\xb9\x59\xb9\x20\x3f\xe8\x1f\x35\x3c\x97\xd4\x0f\xef\x99\x2f\xc9\xac\xf7\x7c\x80\x45\xab\xad\x59\xd3\x5e\x35\x49\x85\x98\x68\xc1\x59\x16\x91\xcf\xf3\x5b\x8d\x13\x0b\xce\xbf\x13\x54\x51\x2f\x0e\xc5\xaa\xfe\x6e\x7b\xf6\x08\x2e\x39\xb6\x2c\x84\x30\x93\x46\xe4\x64\x70\xd2\x32\x4a\x90\xda\x66\x11\xb9\xf8\xc4\xb7\xec\x16\xfe\x4a\xc1\xed\x04\x18\xf4\xfb\x72\x27\xc2\xe0\xbc\x7f\xd3\x60\x38\x60\xa9\xe5\x98\x8d\xb5\x42\x58\xe3\x36\x90\x4d\xd5\xa5\xbb\x77\xfe\x6a\x79\x7f\x7e\xfe\xf6\x5d\xed\x2a\xda\xfa\x46\xa7\x0a\x5b\x63\x4f\x7a\xcb\xe4\xf0\x34\xdd\x37\x98\xfd\x59\x68\xf2\x8b\x12\x59\x44\xd0\xa6\x70\x60\xb6\x0a\xbd\x40\xed\x30\x01\x6b\x5b\xf6\xfc\x28\x10\x88\x5c\x7b\x41\x39\x5c\x46\xd1\xc5\xbb\x77\x6f\xdb\x53\x54\xb8\x80\x71\xdf\x3d\x81\x4b\x39\x82\x1b\x7d\xfd\x32\x8d\xaf\xc6\x1f\xaf\xaf\xe2\xbb\xe9\x65\xfc\xdb\xe7\xaf\xd7\xf1\xe5\xd5\x34\x1e\x0c\x2f\xe2\x4f\xe3\x9b\x78\x7a\x7d\x39\x3c\x7f\xff\xa6\x89\xba\x1a\x7f\x7c\x21\xee\x19\xce\xf8\xe7\xf1\x37\xe1\xec\x8c\x3b\x80\xd6\x3a\x59\x6a\x1c\x5a\xa0\x72\xb4\x44\x34\x51\xaf\x57\xdf\x22\x91\xbf\x6f\x7a\x87\x74\x60\x67\x94\xf9\x7b\x79\x9d\x45\xfd\x70\xf0\x21\xec\xbf\xbe\xfd\x1b\xa8\x40\x52\xde\x88\xdb\xdf\x59\xad\x52\xd6\xb7\xd2\x44\x5b\x8c\x48\xab\x50\x75\xa7\x20\x1a\xd7\xc6\xf9\x3e\x42\x1a\xf4\xb7\x65\xf0\x82\x94\x76\x2b\x69\x78\x8c\x90\x3e\x59\x9d\x9a\x42\x49\xc3\xa7\xbe\x5b\xad\xee\xb4\xc6\x56\xe7\xef\x50\xe0\xf0\xfb\x89\xe2\xc3\x7f\xa2\xc8\x45\x31\xfc\x07\x44\xe1\x40\xcc\xbf\x59\x14\x1f\xf6\x8b\xa2\x85\xf3\xaf\x17\x85\x7f\x0d\x99\xb6\x3e\x3b\xab\x8f\xa7\xe6\x93\x42\xbb\x88\x08\xae\xd2\x75\xa7\x4a\x2f\x7f\xc7\xba\x64\xcc\xdf\x5c\xb7\xfb\x6e\xf0\xbf\x03\x00\x00\xff\xff\x22\x0f\x2a\x59\x1c\x10\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x6f\x6f\xdb\xb6\x13\x7e\xef\x4f\x41\xe4\x87\x1f\xe0\x2c\x95\xfc\xa7\x4d\x97\x0a\x30\x86\x2c\x0d\xda\x02\x6d\x16\x34\xed\xf6\x62\x18\x04\x9a\xba\xd8\x5c\x29\x1e\xc7\x3b\xb9\x11\xe0\x0f\x3f\x50\x7f\xad\xd6\x71\x9b\xa2\xc0\x5e\x6c\x7e\x65\xdd\x1d\x1f\x1e\x9f\xbb\xe7\x28\x49\xa7\x7f\x05\x4f\x1a\x6d\x22\xa4\x73\x34\xd9\xcc\x46\x1f\xb4\xcd\x12\xf1\x1c\x9c\xc1\x32\x07\xcb\xa3\x1c\x58\x66\x92\x65\x32\x12\xc2\xc8\x25\x18\x0a\xff\x44\x58\x10\x7f\x28\x96\xe0\x2d\x30\x50\xac\x71\xa2\x30\x77\x68\xc1\x72\x22\xe0\xce\xa1\x67\xf0\xf7\x44\x5a\x99\x43\x22\x82\x2d\x22\x96\x0c\x51\x0e\xec\xb5\xa2\x7b\xc2\x9d\xf4\x1c\xe1\x6d\xb3\xc2\x79\xcc\x81\xd7\x50\xdc\x17\xbe\x69\xcf\x34\x8f\x1f\xc7\xd3\x08\xee\x38\x9e\x89\xff\x09\xc6\x0c\x47\x42\x1c\xd8\x3b\xb8\xc8\x49\xd5\xf8\xc9\xad\xc1\x43\x94\xa3\xd5\x8c\x5e\xdb\x55\x44\x25\x31\xe4\x23\x72\xa0\x02\x09\x1e\x9c\xd1\x4a\x52\x22\x66\x23\x21\x08\x0c\x28\x46\x5f\xd3\x93\x4b\x56\xeb\xd7\x3b\x7c\x3d\x84\xb1\x07\x73\xf6\x40\xd6\x88\xbd\x64\x58\x95\x49\x43\x8b\xf8\xa9\x42\xf1\x68\x8c\xb6\xab\xf7\x2e\x93\x0c\x6d\xd6\xb9\xbc\xbb\x29\xfc\x0a\x12\x31\x3f\xfd\x7f\x6f\x7b\x6f\xe5\x46\x6a\x23\x97\x66\xc7\xc3\xa5\x83\x44\xbc\xdd\x85\x19\x09\xc1\x90\x3b\xd3\x21\xee\xf6\x53\x95\xb8\xb5\xc8\x92\x35\xda\x8e\x28\x51\xa5\xac\xd8\x7c\x72\xa2\x0c\x6e\x65\x61\x38\x52\x68\x59\x6a\x0b\xfe\x00\x1f\x66\x40\xfd\xc3\xc8\xff\x06\xfa\x1f\xdc\xb6\xdf\xd2\xba\x42\xb4\x9d\x17\x7e\x1b\x34\x45\x0e\xdd\x11\xa3\xa6\xb3\xd7\x48\x1c\xb1\xce\xa1\xdb\x26\x58\xae\x25\xaf\x7b\x32\x84\x70\xe1\x59\x4c\x80\xd5\xc4\xa0\x92\x66\xb0\xa0\x2d\xe4\xd1\x51\x63\x72\x5e\xa3\xd7\x5c\x5e\x18\x49\x74\x55\x6d\x73\x54\x6b\x21\x52\xa6\x20\x06\x1f\x29\xaf\x59\x2b\x69\xda\x25\x5d\x8d\x76\x12\x94\x7e\xb5\x53\x91\x48\x44\x51\x48\x6d\x31\x9b\xff\x18\x4f\xe3\x69\x3c\x1b\xb8\x42\x45\x16\x67\xd3\xb3\xa1\x95\xc1\x40\x60\xbe\x3c\xb4\xb4\x0f\x6a\x41\xe6\x03\x7f\x5d\xba\x28\x03\x5b\x1a\x4d\xbc\x08\x35\x48\x1d\x66\x69\x97\x74\x1a\x6a\x5c\x50\x1a\xff\xc0\xe0\x73\x6d\x25\x43\x96\x7a\x90\x84\xf6\x51\x15\x1d\x9f\xa4\x4d\xad\xba\x67\xe5\x21\x84\xd5\xcf\x59\x37\x40\xd3\x71\x28\x5a\xea\x64\x41\x90\x6d\xab\xff\xad\xfc\xd2\x46\x70\x45\xa5\x94\x34\x3e\x39\xae\x17\x83\xcd\x1c\xea\xb0\x54\xdb\x5b\xdc\xca\x2c\xf3\x40\xd4\xfb\xff\xc4\x65\xe3\xc2\x8f\x16\x7c\x0d\x3a\x76\xd2\x4b\x63\xc0\x68\xca\xb7\x52\xb1\xde\x84\x2c\x64\x66\xb4\x85\x94\x40\xa1\xcd\xe8\x78\xdb\x9c\x6b\x5c\x07\x6c\xe3\x93\x34\x54\xfe\xb8\x01\x56\x1e\x6d\x0f\xde\x72\x70\x52\x6f\xd0\x6d\xdf\x8d\xc9\x74\xdc\x84\xb8\xb5\x24\x68\xbc\x2e\xd0\x42\x0c\x96\xeb\x06\x6d\xc0\x94\x74\x52\x69\x2e\x7b\x98\x4f\x03\x95\x91\x3a\x4f\xc7\x1e\x08\x0b\xaf\x02\x1d\x5b\xa9\xd4\xe0\xe0\x04\xca\x03\xa7\xe3\xd0\x9e\x9d\xc9\x6f\x74\x95\x4a\x9d\x63\x9f\x75\x13\xa0\xed\xaa\x62\xaf\x4e\x23\xf4\xfd\x96\x0d\x35\xce\x76\x82\x07\xd0\xcf\x8e\x5b\xb3\xdb\x66\x8b\x59\xa6\xc9\x17\x2e\x4c\xaa\x65\x91\xad\x80\xbb\x26\x39\x19\x80\x85\x80\xd0\x48\xa1\xb8\xe0\x3b\xaf\xc5\x2c\x24\x73\x8b\xf5\xe3\x78\xed\xe4\xb6\xdf\x7e\xbb\x77\xf1\x71\xe8\xab\x15\x58\xf0\x95\x63\x5f\x13\xd7\x73\x2e\x92\xc6\xe0\xc7\xaa\x99\xbb\xea\xd0\xe2\xf7\xfe\x16\x0b\xc3\xe5\x23\xfa\x0f\x95\xe7\xd1\xb2\x64\x60\x2f\xb3\xca\x6c\x29\xaa\x0e\xfa\x47\x07\xaf\x73\x19\xe6\xfd\x32\x94\x64\x39\xf9\x7c\xe6\x25\x9b\x9d\xf1\x34\x5c\x75\x5d\x18\x73\x8d\x46\xab\x32\x11\xaf\x6e\xaf\x90\xaf\x3d\x50\x78\x8d\x68\xa3\xbe\x38\x47\xdb\xfa\xd3\xee\xb8\x32\x3a\xd7\x3c\xb0\x08\xa1\x5c\x91\x88\xa3\xd9\xd1\xc0\x98\x43\x8e\xbe\x4c\xc4\xd9\x0b\xbd\x63\xf7\xf0\x57\x01\xb4\x17\x60\x36\x9d\xe6\x7b\x11\x66\xa7\xd3\x37\x3d\x06\x81\x2a\xaa\x09\x88\x96\xe1\x8e\x77\x81\x7c\x61\xcf\xe9\x3d\x85\xdb\xe8\xe9\xe9\xe9\xe3\x27\x9d\xab\x6e\xeb\x37\x58\x58\x1e\x8c\xbd\x3c\x58\xae\x0f\x0f\xe0\xfb\x66\x79\x38\x8b\xcc\x7e\xb1\xa6\x4c\x04\xfb\x02\x0e\xcc\x56\x83\x2b\x46\xe2\x0c\xbc\x1f\xd8\xab\xa3\x40\x64\x2a\xed\x45\xcd\x70\x59\x24\x67\x4f\x9e\x3c\x1e\x4e\x51\x43\x91\xd2\xa1\x7b\x22\x2a\x34\x03\x2d\xde\xbd\xbe\x49\x2f\x2f\x9e\xbf\xbc\x4c\xdf\xde\x9c\xa7\xbf\xbd\x7a\xf7\x32\x3d\xbf\xbc\x49\x67\xf3\xb3\xf4\xc5\xc5\x9b\xf4\xe6\xe5\xf9\xfc\xf4\xe9\xa3\x3e\xea\xf2\xe2\xf9\x17\xe2\x3e\xc3\xb9\xf8\xf9\xe2\xab\x70\xf6\xc6\x1d\x40\x1b\x9c\xac\x70\xc4\x1e\x64\xbe\x58\x33\xbb\x64\x32\xe9\x6e\x91\x24\xdc\x37\x93\x43\x3a\xf0\x4b\xa9\xc2\x55\x7e\x57\x26\xd3\x78\xf6\x2c\x9e\x7e\x7b\xfb\xf7\x50\x51\x2e\x75\x2f\xee\x70\x67\x0d\x4a\xd9\xdd\x4a\xd7\xe8\x39\x11\x83\x42\x75\x9d\xc2\xec\x68\x88\xf3\x7d\x84\x34\x9b\xee\xca\xe0\x0b\x52\xda\xaf\xa4\xf9\x43\x84\xf4\xc2\x63\xe1\x6a\x25\xcd\x3f\xf5\x5d\xa1\x7d\x8b\xc8\x83\xce\xdf\xa3\xc0\xf9\xf7\x13\xc5\xb3\xff\x44\x51\x89\x62\xfe\x0f\x88\x82\xc0\xdc\x7e\xb5\x28\x9e\xdd\x2f\x8a\x01\xce\xbf\x5e\x14\xe1\x35\xe4\x66\xf0\xa5\xda\x7e\x6f\xf5\x5f\x21\x48\x89\x30\xda\x16\x77\xa3\x36\xbd\xea\x1d\xeb\x5c\xa9\x70\x73\x5d\xdd\x77\x83\xff\x1d\x00\x00\xff\xff\x16\xa9\x3b\xd7\x4f\x10\x00\x00"),
|
||||
},
|
||||
"/build/prometheus/kube-state-metrics/kube-state-metrics-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kube-state-metrics-prometheusRule.yaml",
|
||||
@@ -292,7 +292,7 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/kubernetes": &vfsgen۰DirInfo{
|
||||
name: "kubernetes",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 953644484, time.UTC),
|
||||
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269747477, time.UTC),
|
||||
},
|
||||
"/build/prometheus/kubernetes/kubernetes-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kubernetes-prometheusRule.yaml",
|
||||
@@ -345,14 +345,14 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/kubernetes/kubernetes-serviceMonitorKubelet.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "kubernetes-serviceMonitorKubelet.yaml",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 953683110, time.UTC),
|
||||
uncompressedSize: 1717,
|
||||
modTime: time.Date(2025, 11, 17, 9, 8, 56, 269943343, time.UTC),
|
||||
uncompressedSize: 1718,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xe4\x55\xb1\x6e\xdb\x30\x10\xdd\xfd\x15\x1c\xdb\x04\x96\x90\x55\x6b\x81\x4e\x69\x87\xa6\xc8\x4a\xd0\xd4\xb3\xc4\x5a\xe2\x11\x77\x27\xb7\x06\xf2\xf1\x05\x29\xd9\x71\xec\x18\x45\xc7\xa2\x9b\xfd\xee\xf1\xf8\xee\xdd\x03\xe5\x52\x78\x06\x4b\xa0\xd8\x98\x91\x62\x50\xe2\x10\xbb\xca\x13\x83\xa4\xf2\x34\xd6\xfb\x87\xd5\x2e\xc4\xb6\x31\x4f\xe0\x7d\xf0\xf8\x32\xb3\x56\x23\xd4\xb5\x4e\x5d\xb3\x32\x66\x70\x1b\x0c\x92\x7f\x19\xe3\x52\xaa\x76\xd3\x06\x1c\xa1\x90\x2a\x50\x1d\xdd\x88\xc6\x64\x6c\x80\xde\xe0\x24\xc7\xba\xa6\xed\x4c\x5b\x27\xa6\x11\xda\x63\x92\x1b\xf4\x3d\x62\x4b\x3c\xb3\x25\xf5\x60\xac\x8c\xb9\xbc\x27\xff\x97\xe4\x3c\xce\x79\xeb\xd7\x29\xd7\x72\x10\xc5\xb8\x92\x04\x9f\xb5\x23\xb6\x89\x42\xd4\x32\xc8\xda\x6c\xe0\x18\xfc\x9d\x76\x88\x9f\xc3\x80\xc6\xd4\x7b\xc7\x35\x4f\xb1\x16\x78\x86\x4a\xfd\x56\x93\xcc\xfe\x38\xef\x69\x8a\x5a\x6b\x3e\x58\xe4\xf7\x14\x89\x1f\x67\x8b\x8c\xf2\x84\x82\x86\xa8\xe0\xbd\x1b\x1a\xf3\x30\x16\x60\x84\x72\xf0\xdf\x50\xcc\x0c\xb1\x5b\xfc\x5c\x1b\xe7\xb5\xec\x67\x07\xa4\x02\x19\xc3\xe8\xf0\xeb\x34\xab\x8d\xd4\xc2\xe6\x71\x5f\x8e\x08\x4f\x31\x86\xd8\x59\x4f\x51\x5d\x88\x60\x5b\x54\x5d\xd5\x13\xb5\x17\x95\x3d\x0d\xd3\x08\x2b\xea\x54\xaa\xbb\x13\x9c\x06\x74\x96\x31\x04\x51\xdb\x4e\xec\xb2\x24\x2b\xf0\x14\x5b\xb1\xd5\xfd\xa2\x4b\x68\x62\x8f\xc7\xb3\x38\xe4\x01\x6c\xd1\x66\x6d\x01\x12\xb1\x36\xa6\x57\x4d\xb2\x9e\x47\x9e\x97\xcc\xd7\x83\xdf\xea\xb6\x1c\xb3\xc9\x69\xbf\x74\x35\x46\x1d\x77\xd0\x42\x6e\xcc\x39\xe3\xc2\xc5\x72\x4b\xcb\x74\x61\xe5\x87\x65\x7d\x2f\xc7\x14\x7c\x2c\x75\xf1\x3d\x72\xaa\x8a\xde\x82\xe8\x20\x9f\x28\x6e\x43\x77\x54\x14\xa2\xc0\x4f\x8c\xa7\x5d\x48\xcf\xe0\xb0\x3d\x9c\xb6\xfc\x4f\x84\xe8\x2c\x22\x69\xb2\x93\xb8\x0e\xa7\xc5\x2a\xa9\x1b\x5e\x5e\x19\x23\x46\xe2\xc3\x42\xda\x1c\x14\x72\x5d\xf4\xce\xf7\x38\x83\x23\xf4\x27\xf1\xce\x56\xf7\xf3\x89\x5b\x4d\x33\x29\x47\x52\xa0\x57\xad\xb3\x32\xbf\x15\x5b\xdd\x25\x70\xa0\x77\x94\x25\x26\x0f\x11\xe4\xcc\xbe\xa2\xda\x33\x5c\x2b\xd5\xdd\x5f\xe4\xd3\x69\xdf\x98\x7a\x49\x50\xed\x5d\xbb\x0f\x42\xfc\xdf\x65\xf7\x07\x6d\x16\x41\xef\xbf\xe8\xe7\x0f\xec\x13\x06\x78\x25\x9e\xdb\x8e\x4e\x7d\xff\x35\xd7\x8e\x66\x94\x47\x7d\x79\x6d\x8d\x91\x6b\xf6\x5b\x9f\xfe\xf4\x09\xf9\x1d\x00\x00\xff\xff\xff\x8d\xbb\xd9\xb5\x06\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xe4\x55\xb1\x8e\xdb\x3a\x10\xec\xfd\x15\x2c\xdf\xbb\x83\xa5\x77\xad\xda\x07\xa4\xba\xa4\xc8\x05\xd7\x12\x34\x35\x96\x18\x4b\x5c\x62\x77\xe5\xc4\xc0\x7d\x7c\x40\x4a\xf6\xf9\xec\x33\x82\xa4\x48\x93\xce\x9e\x1d\x2e\x67\x67\x07\x94\x4b\xe1\x19\x2c\x81\x62\x63\x46\x8a\x41\x89\x43\xec\x2a\x4f\x0c\x92\xca\xd3\x58\xef\x1f\x56\xbb\x10\xdb\xc6\x3c\x81\xf7\xc1\xe3\xe3\xcc\x5a\x8d\x50\xd7\x3a\x75\xcd\xca\x98\xc1\x6d\x30\x48\xfe\x65\x8c\x4b\xa9\xda\x4d\x1b\x70\x84\x42\xaa\x40\x75\x74\x23\x1a\x93\xb1\x01\x7a\x83\x93\x1c\xeb\x9a\xb6\x33\x6d\x9d\x98\x46\x68\x8f\x49\x6e\xd0\xf7\x88\x2d\xf1\xcc\x96\xd4\x83\xb1\x32\xe6\xf2\x9e\xfc\x5f\x92\xf3\x38\xe7\xad\x5f\xa7\x5c\xcb\x41\x14\xe3\x4a\x12\x7c\xd6\x8e\xd8\x26\x0a\x51\xcb\x20\x6b\xb3\x81\x63\xf0\x17\xda\x21\x7e\x08\x03\x1a\x53\xef\x1d\xd7\x3c\xc5\x5a\xe0\x19\x2a\xf5\x5b\x4d\x32\xfb\xe3\xbc\xa7\x29\x6a\xad\xf9\x60\x91\xdf\x53\x24\x7e\x9c\x2d\x32\xca\x13\x0a\x1a\xa2\x82\xf7\x6e\x68\xcc\xc3\x58\x80\x11\xca\xc1\x7f\x46\x31\x33\xc4\x6e\xf1\x73\x6d\x9c\xd7\xb2\x9f\x1d\x90\x0a\x64\x0c\xa3\xc3\xf7\xd3\xac\x36\x52\x0b\x9b\xc7\x7d\x39\x22\x3c\xc5\x18\x62\x67\x3d\x45\x75\x21\x82\x6d\x51\x75\x55\x4f\xd4\x5e\x54\xf6\x34\x4c\x23\xac\xa8\x53\xa9\xee\x4e\x70\x1a\xd0\x59\xc6\x10\x44\x6d\x3b\xb1\xcb\x92\xac\xc0\x53\x6c\xc5\x56\xf7\x8b\x2e\xa1\x89\x3d\x1e\xcf\xe2\x90\x07\xb0\x45\x9b\xb5\x05\x48\xc4\xda\x98\x5e\x35\xc9\x7a\x1e\x79\x5e\x32\x5f\x0f\x7e\xab\xdb\x72\xcc\x26\xa7\xfd\xd2\xd5\x18\x75\xdc\x41\x0b\xb9\x31\xe7\x8c\x0b\x17\xcb\x2d\x2d\xd3\x85\x95\xff\x2c\xeb\x7b\x39\xa6\xe0\xdf\x52\x17\xdf\x23\xa7\xaa\xe8\x2d\x88\x0e\xf2\x3f\xc5\x6d\xe8\x8e\x8a\x42\x14\xf8\x89\xf1\xb4\x0b\xe9\x19\x1c\xb6\x87\xd3\x96\xff\x64\x88\xfe\x93\xdf\x4d\xd1\x59\x46\xd2\x64\x27\x71\x1d\x4e\x9b\x55\x52\x37\xbc\xbc\x32\x46\x8c\xc4\x87\x85\xb4\x39\x28\xe4\xba\xe8\x9d\xef\x71\x06\x47\xe8\x37\xe2\x9d\xad\xee\xe7\x13\xb7\x9a\x66\x52\xce\xa4\x40\xaf\x5a\x67\x65\x7e\x2b\xb6\xba\x4b\xe0\x40\xef\x28\x4b\x4c\x1e\x22\xc8\xa1\x7d\x45\xb5\x67\xb8\x56\xaa\xbb\x5f\x08\xa8\xd3\xbe\x31\xf5\x12\xa1\xda\xbb\x76\x1f\x84\xf8\xaf\x0b\xef\x57\xda\x2c\x82\xde\x7f\xd2\xcf\x5f\xd8\x27\x0c\xf0\x4a\x3c\xb7\x1d\x9d\xfa\xfe\x53\xae\x1d\xcd\x28\xaf\xfa\xf2\xdc\x1a\x23\xd7\xec\xb7\x3e\xfd\xec\x1b\xf2\x23\x00\x00\xff\xff\xa4\x9b\xbc\x26\xb6\x06\x00\x00"),
|
||||
},
|
||||
"/build/prometheus/node-exporter": &vfsgen۰DirInfo{
|
||||
name: "node-exporter",
|
||||
modTime: time.Date(2025, 9, 30, 6, 56, 49, 643873836, time.UTC),
|
||||
modTime: time.Date(2025, 12, 3, 13, 36, 55, 20776255, time.UTC),
|
||||
},
|
||||
"/build/prometheus/node-exporter/node-exporter-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "node-exporter-clusterRole.yaml",
|
||||
@@ -370,10 +370,10 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/node-exporter/node-exporter-daemonset.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "node-exporter-daemonset.yaml",
|
||||
modTime: time.Date(2025, 9, 30, 6, 56, 49, 644065414, time.UTC),
|
||||
uncompressedSize: 3536,
|
||||
modTime: time.Date(2025, 12, 3, 13, 36, 55, 20659302, time.UTC),
|
||||
uncompressedSize: 3587,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x56\x51\x6f\xdb\x38\x12\x7e\xcf\xaf\x20\x8a\x3e\x24\xb8\x93\x6c\xa7\x4d\xaf\x15\x90\x87\x5c\xe2\x6b\x02\xb4\x39\xa3\xce\xee\x3e\x2c\xb6\x06\x4d\x8d\x6c\x22\x14\x87\xe5\x0c\x5d\x1b\xd0\x8f\x5f\xd0\x8a\x1d\x49\x76\xdc\x18\xd8\x97\x58\x19\x0e\x3f\x72\x66\xbe\xf9\x86\xd2\xe9\xdf\xc1\x93\x46\x9b\x09\xe9\x1c\xf5\x16\x83\x93\x47\x6d\xf3\x4c\xdc\x48\x28\xd1\x8e\x81\x4f\x4a\x60\x99\x4b\x96\xd9\x89\x10\x46\x4e\xc1\x50\xfc\x12\xd1\x3f\x7d\x0c\x53\xf0\x16\x18\x28\xd5\xd8\x53\x58\x3a\xb4\x60\x39\x13\xb0\x74\xe8\x19\xfc\x0b\x9e\x56\x96\x90\x09\x8b\x39\x24\xbf\xf0\x74\xd2\x73\x82\x45\x26\xa2\x39\x71\x1e\x4b\xe0\x39\x04\x7a\xc1\x7d\xb1\x89\x66\x90\xbe\x4b\x07\x27\x42\xec\x3f\x29\x5a\xc9\x49\x05\x35\x2e\xb9\x39\x78\x48\x4a\xb4\x9a\xd1\x6b\x3b\x4b\x68\x45\x0c\xe5\x09\x39\x50\x31\x5a\x02\x03\x8a\xd1\xd7\x91\x97\x92\xd5\xfc\x4b\x23\x15\xc7\x24\xe3\x98\x74\x1c\x99\x10\x86\xd2\x19\xc9\xf0\x74\xcb\x46\xdd\xd6\x50\xd6\x22\x4b\xd6\x68\xb7\xb7\x16\x6b\x10\xc5\xa6\x73\x46\x0e\x85\x0c\x86\x13\x85\x96\xa5\xb6\xe0\xf7\x5f\xce\xb4\x52\x70\x5c\x12\x8e\x4b\xc3\xd1\xcc\x78\x25\x3b\x84\xd8\x54\x78\xbd\xa1\x28\xb4\xd5\xbc\x7a\x8e\x28\xde\xe5\x6a\xc7\x2a\x84\x87\x1f\x41\x7b\xc8\x6f\x42\x64\xcb\x58\xcd\x21\x0f\x46\xdb\xd9\xdd\xcc\xe2\xd6\x3c\x5c\x82\x0a\x31\xdf\xcd\x9d\x35\xe6\xf8\x89\x4f\x0f\xe0\x4b\x6a\x2f\x27\x35\xbd\x86\x4b\xe7\x81\xa8\x5d\xad\x8d\xc7\x23\xac\x9e\xf2\xe4\xd1\x40\x27\x48\xc8\x67\xd0\xd9\x21\x04\x3a\xf0\x32\x12\x58\xdc\x20\xd0\x3d\xf2\x70\xa9\x89\x9f\xdc\xb6\x65\xde\x1e\x95\x08\xe9\x67\x8d\x83\x13\x91\x24\x3f\x61\x9a\x1a\x4d\x0c\x36\x91\x79\x1e\x2f\x77\x39\x38\xff\x4f\xda\x4f\xfb\xe9\x20\xfb\x34\xe8\xf7\x5b\xde\x4e\xf2\x3c\xa5\x15\x15\x74\xd9\x9b\x23\x71\x8f\x56\xb4\xeb\xe0\x11\x79\xeb\x11\xff\x69\xb9\x58\x4c\x14\x9a\x3a\x53\xe9\x4f\x5d\xe8\xd6\xea\xf3\x92\x72\x21\xd5\xb6\xc0\x17\x96\x2d\xf0\x4f\xf4\x8f\x13\x8f\x81\xe1\x65\x9f\x1c\x16\xe9\x53\x60\xc9\x01\xb4\x42\x1b\xa8\xd5\x21\xd5\x75\xb5\x93\x12\x83\xe5\xc4\xa1\xb6\x4c\x97\xdf\x7b\xa7\x39\x2c\x2a\xe7\x51\x55\xb4\xa2\x6a\x21\x7d\xcf\xe8\x69\x2f\x47\xf5\x08\xbe\x97\xfe\xeb\xec\xf4\x6d\xd5\x3b\x7b\x3d\x7a\x41\x09\xaf\x1c\xd0\xe5\xf7\x53\x19\x18\x0b\xaa\xa6\xda\x16\x25\x4f\x4a\x4d\xaa\x52\x33\x8f\xc1\x55\x0a\x6d\xa1\x67\x05\x55\x39\x4c\x43\xfd\xbb\x70\xbc\xfe\xe1\xd2\x15\x54\x15\x81\x62\xa3\x57\xf3\x30\x03\x36\xd3\x82\xaa\xf2\x47\x80\x00\x15\x2e\xc0\x1b\xb9\xaa\x2f\x1c\xff\x14\x54\x39\x62\xf4\x50\x79\xa7\x26\x4e\x3b\x28\xa8\x22\x50\xc1\x6b\x5e\xc5\xcf\x58\xd4\x8a\xbd\x54\x50\xd0\xd9\xdb\x6d\x20\xba\x94\x33\xc8\xc4\x14\x94\x91\xd3\x5e\xab\x8b\xb3\x48\x92\xf7\xcf\x5d\x75\xa0\xcf\x37\x07\x5d\xa3\x65\x58\x72\x93\xfb\xce\xeb\x85\x36\x30\x83\x3c\x13\xec\x43\x93\xe4\x3e\xd8\x2b\xfa\x8d\xa2\x50\x3d\xd3\xd0\x03\x61\xf0\x0a\x5a\x0d\x64\x74\xa9\xb9\xd3\x52\xca\x85\x4c\xbc\x19\xbc\x69\x19\x4b\x28\xd1\xaf\x32\x71\xd1\xef\x7f\xd5\x9d\xd6\x07\xda\x0b\x31\xe8\x9f\x97\x7b\x31\x06\x1f\x9b\x18\x0b\x34\xa1\x84\xaf\x91\x34\xad\x16\x5b\xd3\x68\x24\x79\x9e\x89\xba\x21\x62\x35\x1a\x78\x75\xda\x3a\x46\x0f\x32\xff\xbf\x35\xab\x4e\x4a\xf6\xa0\x35\x1b\x70\x03\xd6\xb6\xbd\x1e\xab\xd5\xaa\xe2\x69\xdd\xa3\x93\xb3\xf5\x80\xc9\xc4\x2d\x12\x3f\xe0\xf5\x46\x58\x76\x0e\xee\x00\xec\x3d\x79\x9f\x08\x19\x9c\x31\x12\xe7\xe0\x7d\xcb\xbe\xa6\x0d\x24\x1d\x7d\xfa\xf3\xed\xe9\xdd\xe8\xec\xaf\x5d\x75\x62\x43\x89\xd2\x71\xea\x27\x14\x34\x03\x5d\x3e\x7c\x19\x4f\x86\xd7\x37\xb7\xc3\xc9\xb7\xf1\xd5\xe4\x8f\xbb\x87\xdb\xc9\xd5\x70\x3c\x19\x9c\x7f\x9c\x7c\xbe\xfe\x3a\x19\xdf\x5e\x9d\x5f\x7c\xf8\xf7\xb3\xd7\xf0\xfa\xe6\x17\x7e\x3b\x38\xd7\xff\xbd\x7e\x15\xce\x5e\xbf\x03\x68\xad\xc8\x82\x23\xf6\x20\xcb\xcb\x39\xb3\xcb\x7a\xbd\xb6\x44\xf7\xb6\xbe\x60\x17\xcd\xcc\xd6\x55\xb9\x1b\x35\x6a\xb2\x90\x26\xc0\xff\x3c\x96\x6d\xa6\x17\x1a\x4c\xfe\x0d\x8a\xee\x54\x5a\xdb\x6b\x8e\x10\x4b\x0e\x94\x3a\xcc\x1b\x80\x6d\x79\x58\xcf\x6c\x3f\x95\x2a\x0e\xee\xe5\x2a\xeb\xa7\x83\x4f\x69\xbf\xed\x3c\x0a\xc6\x8c\xd0\x68\xb5\xca\xc4\x5d\x71\x8f\x3c\xf2\x40\x60\xb9\xa3\x23\x1d\xa8\xed\x6a\x14\x96\x16\x7b\xb6\x63\x6e\x84\x9e\x33\xd1\x22\x85\x10\x91\xd7\x7b\x17\xea\x53\x62\x3a\xe9\x1f\x96\x96\xc1\x31\xd2\xb2\x5f\x59\xce\x9b\x08\x07\xb4\x73\x2d\x90\x9f\xe3\xa8\xc8\xc4\x87\x8b\x8b\x77\xe7\xdd\xb5\x7b\xb4\xdf\x10\xf9\x90\xb2\x36\xf7\xc5\x6c\xdd\xd7\x73\xb5\xb5\x65\x9d\xc5\xbb\x9b\x96\xad\xf9\xe0\x69\xbf\x3c\x9f\x1f\x2d\x48\x99\x30\xda\x86\xe5\xc9\x2f\x42\xe9\xdc\xe7\xfd\xd6\xdf\x2f\xb4\x82\x2b\xa5\xa2\x14\xdd\xbf\x38\x60\x18\x4d\x7c\x08\x35\x9f\x55\x49\xe3\x71\xb4\x7e\x15\x6d\xca\x5c\xeb\x74\xc3\x6f\x1d\x5c\x64\x78\x73\x28\xd5\xaa\xd8\x52\xe5\x1d\xa1\x3e\xb8\xb5\xa9\xc1\x5d\x55\x3e\xb8\xf1\x64\x57\x52\xff\x0e\x00\x00\xff\xff\x5d\xda\x6d\x1d\xd0\x0d\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x56\x51\x6f\xdb\x36\x10\x7e\xcf\xaf\x20\x82\x3e\x24\xd8\x24\xdb\xe9\xd2\xb5\x02\xf2\x90\x39\x59\x13\xa0\xcd\x82\x26\xdb\x1e\x86\xd5\xa0\xa9\x93\x4d\x84\xe2\xb1\xbc\xa3\x1b\x03\xfa\xf1\x03\xa5\xd8\x91\x64\xc7\x8d\x81\xbd\xc4\xca\xf1\xe3\x47\xde\xf1\xbb\x8f\x94\x4e\xff\x05\x9e\x34\xda\x4c\x48\xe7\x68\xb0\x18\x1d\x3c\x68\x9b\x67\xe2\x42\x42\x89\xf6\x0e\xf8\xa0\x04\x96\xb9\x64\x99\x1d\x08\x61\xe4\x14\x0c\xc5\x2f\x11\xf1\xe9\x43\x98\x82\xb7\xc0\x40\xa9\xc6\x81\xc2\xd2\xa1\x05\xcb\x99\x80\x47\x87\x9e\xc1\xbf\x80\xb4\xb2\x84\x4c\x58\xcc\x21\xf9\x01\xd2\x49\xcf\x09\x16\x99\x88\xe1\xc4\x79\x2c\x81\xe7\x10\xe8\x05\xf8\x62\x95\xcd\x28\x7d\x9b\x8e\x0e\x84\xd8\xbe\x52\x8c\x92\x93\x0a\x1a\x5e\x72\x73\xf0\x90\x94\x68\x35\xa3\xd7\x76\x96\xd0\x92\x18\xca\x03\x72\xa0\x62\xb6\x04\x06\x14\xa3\x6f\x32\x2f\x25\xab\xf9\xa7\x56\x29\xf6\x29\xc6\x3e\xe5\xd8\xb3\x20\x0c\xa5\x33\x92\xe1\x69\x97\xad\x73\xab\xa9\xac\x45\x96\xac\xd1\xae\x77\x2d\x6a\x12\xc5\xa6\xb7\x46\x0e\x85\x0c\x86\x13\x85\x96\xa5\xb6\xe0\xb7\x6f\xce\x74\x4a\xb0\x5f\x11\xf6\x2b\xc3\xde\xca\x78\xa5\x3a\x84\x58\x9d\x70\x3d\xa1\x28\xb4\xd5\xbc\x7c\xce\x28\xee\xe5\x7c\x23\x2a\x84\x87\x6f\x41\x7b\xc8\x2f\x42\x54\xcb\x9d\x9a\x43\x1e\x8c\xb6\xb3\xeb\x99\xc5\x75\xf8\xf2\x11\x54\x88\xf5\x6e\xcf\x6c\x38\xef\x9e\xf4\x74\x0f\xbe\xa4\xee\x70\xd2\xc8\xeb\xf2\xd1\x79\x20\xea\x9e\xd6\x0a\xf1\x00\xcb\xa7\x3a\x79\x34\xd0\x4b\x12\xf2\x19\xf4\x66\x08\x81\x0e\xbc\x8c\x02\x16\x17\x08\x74\x83\x7c\xf9\xa8\x89\x9f\x60\xce\x6b\xf4\x9a\x97\x63\x23\x89\x6e\xea\x43\x38\x6c\x1a\x20\x51\x26\x10\x83\x4f\x94\xd7\xac\x95\x34\x87\x4f\x53\xd6\xca\x58\xef\x2e\x11\xd2\xcf\x5a\x7b\x4d\x44\x92\x7c\x87\x69\x6a\x34\x31\xd8\x44\xe6\x79\xcc\xe7\x6c\x74\xf2\x6b\x3a\x4c\x87\xe9\x28\xfb\x30\x1a\x0e\x3b\x68\x27\x79\x9e\xd2\x92\x0a\x3a\x1b\xcc\x91\x78\x40\x4b\xda\x04\x78\x44\x5e\x23\xe2\x3f\x1d\x88\xc5\x44\xa1\x69\x8a\x9b\x7e\xd7\x85\xee\x8c\x3e\x0f\x29\x17\x52\x6d\x0b\x7c\x61\xd8\x02\x7f\x47\xff\x30\xf1\x18\x18\x5e\xc6\xe4\xb0\x48\x9f\x12\x4b\x76\xb0\x15\xda\x40\x53\xcf\x54\x37\x02\x49\x4a\x0c\x96\x13\x87\xda\x32\x9d\x7d\x1d\x1c\xe5\xb0\xa8\x9c\x47\x55\xd1\x92\xaa\x85\xf4\x03\xa3\xa7\x83\x1c\xd5\x03\xf8\x41\xfa\xd3\xf1\xd1\x9b\x6a\x70\xfc\x7a\xf6\x82\x12\x5e\x3a\xa0\xb3\xaf\x47\x32\x30\x16\x54\x4d\xb5\x2d\x4a\x9e\x94\x9a\x54\xa5\x66\x1e\x83\xab\x14\xda\x42\xcf\x0a\xaa\x72\x98\x86\xe6\x77\xe1\xb8\xfe\xe1\xd2\x15\x54\x15\x81\xa2\x37\x54\xf3\x30\x03\x36\xd3\x82\xaa\xf2\x5b\x80\x00\x15\x2e\xc0\x1b\xb9\x6c\x36\x1c\xff\x14\x54\x39\x62\xf4\x50\x79\xa7\x26\x4e\x3b\x28\xa8\x22\x50\x21\xaa\x2a\x7e\xc6\x43\xad\xd8\x4b\x05\x05\x1d\xbf\x59\x27\xa2\x4b\x39\x83\x4c\x4c\x41\x19\x39\x1d\x74\x1a\x3f\x8b\x22\x39\x7d\x6e\xc4\x1d\xd6\xb0\x5a\x68\x8c\x96\xe1\x91\xdb\xed\xe2\xbc\x5e\x68\x03\x33\xc8\x33\xc1\x3e\xb4\xfb\xc2\x07\x7b\x4e\x7f\x52\xf4\xb6\x67\x19\x7a\x20\x0c\x5e\x41\xa7\xe7\x8c\x2e\x35\xf7\xba\x50\xb9\x90\x89\xc3\xd1\x61\x27\x58\x42\x89\x7e\x99\x89\xd3\xe1\xf0\xb3\xee\xb9\x05\xd0\x56\x8a\xd1\xf0\xa4\xdc\xca\x31\x7a\xdf\xe6\x58\xa0\x09\x25\x7c\x8e\xa2\xe9\xb4\x58\x2d\xa3\x5b\xc9\xf3\x4c\x34\x0d\x11\x4f\xa3\xc5\xd7\x94\xad\x17\xf4\x20\xf3\x3f\xac\x59\xf6\x4a\xb2\x85\xad\xdd\x80\x2b\xb2\x6e\xec\xf5\x5c\x9d\x56\x15\x4f\xe3\x1e\x9d\x9c\xd5\x77\x52\x26\xae\x90\xf8\x1e\xc7\x2b\x63\xd9\x58\xb8\x47\xb0\x75\xe5\x6d\x26\x64\x70\xc6\x48\x9c\x83\xf7\x9d\x78\x2d\x1b\x48\x7a\xfe\xf4\xcf\x9b\xa3\xeb\xdb\xe3\x7f\x37\xdd\x89\x0d\x25\x4a\xc7\x87\x42\x42\x41\x33\xd0\xd9\xfd\xa7\xbb\xc9\xe5\xf8\xe2\xea\x72\xf2\xe5\xee\x7c\xf2\xf7\xf5\xfd\xd5\xe4\xfc\xf2\x6e\x32\x3a\x79\x3f\xf9\x38\xfe\x3c\xb9\xbb\x3a\x3f\x39\x7d\xf7\xf3\x33\xea\x72\x7c\xf1\x03\xdc\x06\xcf\xf8\xb7\xf1\xab\x78\xb6\xe2\x76\xb0\x75\x32\x0b\x8e\xd8\x83\x2c\xcf\xe6\xcc\x2e\x1b\x0c\xba\x16\x3d\x58\x63\xc1\x2e\xda\x95\x6d\x4e\xe5\xfa\xb6\x75\x26\x0b\x69\x02\xfc\xee\xb1\xec\x2a\xbd\xd0\x60\xf2\x2f\x50\xf4\x2f\xb2\x3a\xde\x68\x84\x58\x72\xa0\xd4\x61\xde\x22\xec\xda\x43\x7d\xcd\xfb\xa9\x54\xf1\xae\x7f\x5c\x66\xc3\x74\xf4\x21\x1d\x76\xc1\xb7\xc1\x98\x5b\x34\x5a\x2d\x33\x71\x5d\xdc\x20\xdf\x7a\x20\xb0\xdc\xf3\x91\x1e\xd5\x7a\x34\x1a\x4b\x47\x3d\xeb\x6b\xee\x16\x3d\x67\xa2\x23\x0a\x21\xa2\xae\xb7\x0e\x34\xab\xc4\x72\xd2\xff\x6c\x2d\xa3\x7d\xac\x65\xbb\xb3\x9c\xb4\x19\x76\x78\x67\x6d\x90\x1f\xe3\x55\x91\x89\x77\xa7\xa7\x6f\x4f\xfa\x63\x37\x68\xbf\x20\xf2\x2e\x67\x6d\xcf\x8b\xd5\xba\x69\xee\xd5\xce\x94\xba\x8a\xd7\x17\x9d\x58\xfb\x8d\xd4\x7d\xac\x3e\xbf\x73\x90\x32\x61\xb4\x0d\x8f\x07\x3f\x48\xa5\xb7\x9f\x5f\xd6\x78\xbf\xd0\x0a\xce\x95\x8a\x56\x74\xf3\xe2\x05\xc3\x68\xe2\xdb\xa9\xfd\x12\x4b\x5a\xef\xa9\xfa\x21\xb5\x3a\xe6\xc6\xa7\x5b\xb8\x3a\xb9\xa8\xf0\xf6\xa5\xd4\xb8\x62\xc7\x95\x37\x8c\x7a\xe7\xd4\xb6\x07\xf7\x5d\x79\xe7\xc4\x83\x4d\x4b\xfd\x2f\x00\x00\xff\xff\x16\xe8\xae\x25\x03\x0e\x00\x00"),
|
||||
},
|
||||
"/build/prometheus/node-exporter/node-exporter-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "node-exporter-prometheusRule.yaml",
|
||||
@@ -405,7 +405,7 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/prometheus": &vfsgen۰DirInfo{
|
||||
name: "prometheus",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 956818053, time.UTC),
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 168147675, time.UTC),
|
||||
},
|
||||
"/build/prometheus/prometheus/prometheus-clusterRole.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "prometheus-clusterRole.yaml",
|
||||
@@ -430,10 +430,10 @@ var assets = func() http.FileSystem {
|
||||
},
|
||||
"/build/prometheus/prometheus/prometheus-prometheus.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "prometheus-prometheus.yaml",
|
||||
modTime: time.Date(2025, 7, 31, 8, 50, 53, 956443425, time.UTC),
|
||||
uncompressedSize: 2390,
|
||||
modTime: time.Date(2025, 11, 20, 6, 44, 5, 168271717, time.UTC),
|
||||
uncompressedSize: 2437,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x95\x5f\x6f\xe3\x36\x0c\xc0\xdf\xfd\x29\x88\xdb\xb3\xd3\xa4\x3b\x60\x83\xde\x8a\xae\x38\x1c\xb6\x2b\x86\xdd\x6d\xef\x8a\xcc\x24\x42\x25\x51\xa5\xa8\x5c\x82\x61\xdf\x7d\x90\xed\xc4\x7f\xd6\xa6\x29\x30\x3f\xd9\x14\x49\xf3\xcf\x8f\x94\x8e\xf6\x2f\xe4\x64\x29\x28\xf0\x14\xac\x10\xdb\xb0\x5d\x18\x62\xa4\xb4\x30\xe4\x6f\xf6\xab\xea\xc9\x86\x46\xc1\xef\x4c\x1e\x65\x87\x39\x55\x1e\x45\x37\x5a\xb4\xaa\x00\x9c\x5e\xa3\x4b\xe5\x0d\x40\xc7\xb8\x78\xca\x6b\xe4\x80\x82\x69\x61\xe9\xc6\x90\x8f\x14\x30\x88\x82\x38\xd8\xbf\xac\x6b\x43\x12\x1d\x0c\x2a\x78\xfa\xf9\x35\x9d\xa0\x3d\x5e\xe1\x2a\x6a\x96\x9a\x36\x0a\x8a\xb8\x7e\x53\x7d\x7f\x2a\xc1\xed\xe2\xc7\x8f\x8b\x65\x05\xd0\xfd\xa7\x8b\xa3\xbc\xa7\xa8\xdb\xc0\xf2\x1a\x53\xdc\x21\x63\x3d\x54\xab\x4e\xc7\x24\xe8\xab\x14\xd1\x94\x3a\xe8\xcd\xc6\x06\x2b\xc7\xae\x26\x81\x1a\xbc\x9b\x48\x00\x22\xe3\x06\x99\xb1\xf9\x25\x17\x07\x5f\xcd\x0e\x9b\xec\x6c\xd8\x7e\xde\x06\x3a\x8b\x1f\x0e\x68\xb2\x94\xb8\x7a\xb3\xba\x37\xc4\x52\xa4\x5e\x56\x1e\xaf\xc5\xec\x1e\x0e\x91\x31\x95\x34\xd2\xf8\xac\x86\x27\x3c\xaa\x36\x8a\x9a\xc9\xe1\x2c\xf3\x21\x8b\x91\x0d\x00\x45\x64\x2d\xc4\x0a\x1e\x0e\x36\x49\x3a\x1f\x7e\x47\xbb\xdd\x89\x82\xd5\x72\xd9\xca\x22\x35\x77\x41\xec\xff\x97\x20\x35\x27\x5f\xdf\x90\xfd\x38\x93\x16\xb4\xaf\xe8\xd0\x94\xc0\x26\xe1\xb6\x05\xf8\x6d\x04\xe2\xf0\xbc\x07\xc9\x4b\x56\xff\x85\xf3\x92\xf6\x8b\x98\x5e\x32\xb8\x08\x6c\xf7\x9c\x31\x9c\xf7\xf7\x12\x93\x83\xa2\x50\x24\x47\xdb\xe3\xaf\x05\x87\xe9\xcf\x77\x94\xa4\x78\x7f\xa5\xcb\xda\x21\x8b\x0d\xdb\x7e\xc6\xcb\x97\xd7\x41\x6f\x91\xfb\x48\x6a\x18\xaf\x91\xfd\x6d\x35\x04\xac\x26\xfa\xb5\xd7\x36\x54\xb3\x74\xd4\x35\x19\x44\x62\x51\xf0\x1d\xd7\x15\x00\xee\xb5\xcb\xba\x70\xf3\x39\x08\xf2\x5e\x3b\x05\xab\xa2\x88\x07\x41\x0e\xda\xf5\x24\xc0\xdf\xff\x54\x00\xd6\xeb\x6d\xdf\x8b\x9b\xa1\xae\x6a\x3f\x8c\x3a\x35\x38\xc5\x6a\x5a\x1d\x4a\x0a\x9c\x0d\xf9\x50\xb5\xb8\x7f\x19\xed\xbe\xe9\xf6\x7b\x2f\x6c\xd7\x41\x76\x35\x5c\xef\x86\xea\x9a\x3d\x28\xe4\xca\x26\x38\x6d\x95\x1a\x70\xb3\x41\x23\x0a\x1e\xa9\x1f\xea\x8e\x9a\x76\xc9\x34\xd8\x58\xa3\x05\x9b\x6a\xb6\x43\x9e\xb3\x76\xad\xac\x74\x0e\xc7\x37\x4d\x5f\xd3\xee\xfb\xf1\x84\xc4\xb9\x1d\x5d\x0b\x07\x8d\xf9\x01\xd3\x1a\x5f\xb5\x2a\x87\x33\xd9\x73\x46\xee\x97\x94\xd7\x87\x7b\x0a\x26\x73\x59\xa7\xc7\x96\xf5\x92\x30\x63\x74\xd6\xe8\xa4\x60\x05\x3f\x80\x50\x43\xad\x30\x51\xe6\xf3\xe4\x39\xeb\xad\x9c\x9b\x6e\x62\x56\xf0\x61\xf5\xa1\xff\xf4\xe8\x89\x8f\x0a\x3e\x7e\xb2\xad\x84\xf1\x39\x63\x9a\xa9\xdf\x2e\x97\x7e\xae\xbf\x5c\x7e\x29\x16\x9c\xdd\xab\x29\x95\xb3\x29\xaa\x2f\x6c\xbe\x11\xe4\x23\x8c\xca\xee\xef\x87\xb1\x2e\x6e\x8a\x3c\x19\xd6\x11\x67\x43\x94\xd0\x64\xb6\x72\xbc\xa7\x20\x78\x90\xce\xeb\x26\x7d\x62\xca\x51\x41\xb7\xf5\x39\x87\xbb\xf4\x48\xe1\x0f\x22\x51\xb0\xd1\x2e\xe1\x20\xff\x33\x21\x77\x8a\x09\x79\x6f\x0d\xde\x19\x43\x39\xc8\xe3\x0c\xdb\xba\x0b\xae\x57\x7a\x03\x81\xa9\xd6\xbc\x2c\x28\x18\xda\x7b\x04\x7e\x2a\xec\x25\x21\x2e\x43\xdf\x21\x47\x2e\x7b\xbc\x77\xda\xfa\x6f\xe8\xa3\xd3\x72\xbe\x3b\x4f\x17\x76\x5f\xa1\x69\x93\x4f\xc2\x69\xf7\x7a\xc3\xfe\x07\x70\xbb\x6c\xbb\x3c\x9f\x9b\x7f\x03\x00\x00\xff\xff\xf1\x72\x17\xdb\x56\x09\x00\x00"),
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x55\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x0c\xd2\xb3\x1c\x27\x5d\xa0\x05\x6f\x41\x1a\x2c\x16\xed\x1a\x45\x77\xdb\x3b\x4d\x8d\x6d\x22\x24\x87\x19\x0e\xbd\x36\x8a\xfe\xf7\x82\x92\x6c\x7d\x34\xf1\x3a\x40\x75\x12\x87\x6f\x86\xf3\xf1\xf8\xa8\xa3\xfd\x0b\x39\x59\x0a\x0a\x3c\x05\x2b\xc4\x36\x6c\x17\x86\x18\x29\x2d\x0c\xf9\xdb\xfd\x5d\xf5\x6c\x43\xa3\xe0\x77\x26\x8f\xb2\xc3\x9c\x2a\x8f\xa2\x1b\x2d\x5a\x55\x00\x4e\xaf\xd1\xa5\xf2\x07\xa0\x63\x5c\x3c\xe7\x35\x72\x40\xc1\xb4\xb0\x74\x6b\xc8\x47\x0a\x18\x44\x41\x1c\xfc\x5f\xc7\xda\x90\x44\x07\x83\x0a\x9e\x7f\x7e\x0b\x13\xb4\xc7\x2b\x42\x45\xcd\x52\xd3\x46\x41\x31\xd7\xdf\x85\xef\x4f\x2d\xb8\x5f\xfc\xf8\x61\xb1\xac\x00\xba\x73\xba\x3c\xca\x7f\x8a\xba\x4d\x2c\xaf\x31\xc5\x1d\x32\xd6\x43\xb7\xea\x74\x4c\x82\xbe\x4a\x11\x4d\xe9\x43\x64\x4b\x6c\xe5\xf8\xe8\x74\x4a\xab\x36\xd0\x4d\x07\xa9\x8d\xcb\x49\x90\x6b\xc3\x56\xac\xd1\xee\xa6\x02\xd0\x9b\x8d\x0d\x56\x8e\x5d\x0b\x03\x35\xf8\x30\xb1\x94\x80\xb8\x41\x66\x6c\x7e\xc9\xe5\xbc\x2f\x66\x87\x4d\x76\x36\x6c\x3f\x6d\x03\x9d\xcd\x4f\x07\x34\x59\x4a\x19\xbd\x5b\xdd\x3b\x62\xe9\x69\x6f\x2b\x9f\xd7\x62\x76\x4f\x87\xc8\x98\x4a\xd5\x69\xbc\x57\xc3\x33\x1e\x55\x9b\x45\xcd\xe4\x70\xd6\xa8\xa1\xe8\x91\x0f\x00\x45\x64\x2d\xc4\x0a\x9e\x0e\x36\x49\x3a\x6f\x7e\x43\xbb\xdd\x89\x82\xbb\xe5\xb2\xb5\x45\x6a\x1e\x82\xd8\xff\xaf\x40\x6a\x4e\xb1\xbe\x22\xfb\x71\x25\x2d\x2f\xbf\xa0\x43\x53\x12\x9b\xa4\xdb\x36\xe0\xb7\x11\x6f\x87\xef\x3d\x0c\xbe\xe4\xf5\x5f\x2e\x5f\x42\xbf\xca\xea\x4b\x0e\x17\xf9\xdd\x7d\x67\xd6\xce\xe7\x7b\x89\xc2\x03\x50\x28\x92\xa3\xed\xf1\xd7\x42\x87\xe9\xe1\x3b\x4a\x52\xa2\xbf\x31\x65\xed\x90\xc5\x86\x6d\x2f\x09\x65\xe5\x75\xd0\x5b\xe4\x3e\x93\x1a\xc6\xaa\xb3\xbf\xaf\x86\x84\xd5\x04\x5f\x7b\x6d\x43\x35\x2b\x47\x5d\x53\x41\x24\x16\x05\xdf\x70\x5d\x01\xe0\x5e\xbb\xac\x0b\x6f\x3e\x05\x41\xde\x6b\xa7\xe0\xae\x00\xf1\x20\xc8\x41\xbb\x9e\x09\xf0\xf7\x3f\x15\x80\xf5\x7a\xdb\xcf\xe2\x76\xe8\xab\xda\x0f\xca\x40\x0d\x4e\x69\x35\xed\x0e\x25\x05\xce\x86\x7c\xa8\x5a\xba\x7f\x1e\x49\xe5\x54\x2c\xdf\x4b\xb6\xeb\x48\x76\x35\xb9\xde\x4d\xaa\x6b\x64\x53\xc8\x15\x25\x38\xa9\x4a\x0d\xb8\xd9\xa0\x11\x05\x2b\xea\x2f\x75\xc7\x9a\x56\x64\x1a\x6c\xac\xd1\x82\x4d\x35\xd3\x90\x97\xac\x5d\x6b\x2b\x93\xc3\xf1\xc3\xd4\xf7\xb4\x5b\xaf\x4e\x94\x38\x8f\xa3\x1b\xe1\x80\x98\x6f\x30\xad\xf1\x4d\xaf\xb2\x39\xb3\xbd\x64\xe4\x5e\xa4\xbc\x3e\x3c\x52\x30\x99\x8b\x9c\x1e\x5b\xae\x97\x82\x19\xa3\xb3\x46\x27\x05\x77\xf0\x03\x08\x35\xd4\x1a\x13\x65\x3e\xdf\x3c\x67\xbd\x95\xf3\xd0\x4d\xcc\x0a\x6e\xee\x6e\xfa\xa5\x47\x4f\x7c\x54\xf0\xe1\xa3\x6d\x2d\x8c\x2f\x19\xd3\x0c\x7e\xbf\x5c\xfa\x39\x7e\xb9\xfc\x5c\x3c\x38\xbb\x37\x4b\x2a\x7b\x53\xaa\xbe\xa2\x7c\x23\x92\x8f\x68\x54\xb4\xbf\xbf\x8c\x75\x09\x53\xec\xc9\xb0\x8e\x38\xbb\x44\x09\x4d\x6e\x1f\x3b\x0a\x82\x07\xe9\xa2\x6e\xd2\x47\xa6\x1c\x15\x74\xaa\xcf\x39\x3c\xa4\x15\x85\x3f\x88\x44\xc1\x46\xbb\x84\x83\xfd\xcf\x84\xdc\x01\x13\xf2\xde\x1a\x7c\x30\x86\x72\x90\xd5\x8c\xb6\x75\x97\x5c\x0f\xfa\x0e\x05\xa6\xa8\x79\x5b\x50\x30\xb4\xef\x08\xfc\x54\xb8\x97\x84\xb8\x5c\xfa\x8e\x72\xe4\xb2\xc7\x47\xa7\xad\xff\x8a\x3e\x3a\x2d\xe7\xb7\xf3\xf4\xbe\xf7\x1d\x9a\x0e\xf9\x64\x9c\x4e\xaf\x77\xec\x0f\x80\xfb\x65\x3b\xe5\xf9\xbd\xf9\x37\x00\x00\xff\xff\xf9\x77\x43\xb8\x85\x09\x00\x00"),
|
||||
},
|
||||
"/build/prometheus/prometheus/prometheus-prometheusRule.yaml": &vfsgen۰CompressedFileInfo{
|
||||
name: "prometheus-prometheusRule.yaml",
|
||||
|
||||
@@ -11,5 +11,5 @@ data:
|
||||
notification:
|
||||
endpoint: http://notification-manager-svc.kubesphere-monitoring-system.svc:19093
|
||||
terminal:
|
||||
image: alpine:3.14
|
||||
timeout: 600
|
||||
image: beclab/alpine:3.14
|
||||
timeout: 7200
|
||||
|
||||
@@ -28,6 +28,7 @@ spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- command:
|
||||
- ks-apiserver
|
||||
|
||||
@@ -35,6 +35,7 @@ spec:
|
||||
hostPath:
|
||||
path: /etc/localtime
|
||||
type: ""
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- args:
|
||||
- --host=127.0.0.1
|
||||
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
insecureSkipVerify: true
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 1m
|
||||
interval: 10s
|
||||
metricRelabelings:
|
||||
- action: keep
|
||||
regex: container_cpu_usage_seconds_total|container_memory_usage_bytes|container_memory_cache|container_network_.+_bytes_total|container_memory_working_set_bytes|container_cpu_cfs_.*periods_total|container_processes.*|container_threads.*
|
||||
|
||||
@@ -31,6 +31,7 @@ spec:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/edge
|
||||
operator: DoesNotExist
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- args:
|
||||
- --web.listen-address=127.0.0.1:9100
|
||||
@@ -42,7 +43,7 @@ spec:
|
||||
- --collector.netdev.address-info
|
||||
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
|
||||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
|
||||
image: beclab/node-exporter:0.0.4
|
||||
image: beclab/node-exporter:0.0.5
|
||||
name: node-exporter
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -10,6 +10,7 @@ metadata:
|
||||
name: k8s
|
||||
namespace: kubesphere-monitoring-system
|
||||
spec:
|
||||
priorityClassName: "system-cluster-critical"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
|
||||
41
cli/pkg/kubesphere/plugins/ks_config.go
Normal file
41
cli/pkg/kubesphere/plugins/ks_config.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
type ApplyKsConfigManifests struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *ApplyKsConfigManifests) Execute(runtime connector.Runtime) error {
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var appKsConfigName = common.ChartNameKsConfig
|
||||
var appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, appKsConfigName)
|
||||
|
||||
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceKubesphereSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var values = make(map[string]interface{})
|
||||
if err := utils.UpgradeCharts(context.Background(), actionConfig, settings, appKsConfigName,
|
||||
appPath, "", common.NamespaceKubesphereSystem, values, false); err != nil {
|
||||
logger.Errorf("failed to install %s chart: %v", appKsConfigName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
30
cli/pkg/pipelines/gpu_disable_nouveau.go
Normal file
30
cli/pkg/pipelines/gpu_disable_nouveau.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package pipelines
|
||||
|
||||
import (
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/module"
|
||||
"github.com/beclab/Olares/cli/pkg/core/pipeline"
|
||||
"github.com/beclab/Olares/cli/pkg/gpu"
|
||||
)
|
||||
|
||||
func DisableNouveau() error {
|
||||
arg := common.NewArgument()
|
||||
arg.SetConsoleLog("gpudisable-nouveau.log", true)
|
||||
|
||||
runtime, err := common.NewKubeRuntime(common.AllInOne, *arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := &pipeline.Pipeline{
|
||||
Name: "DisableNouveau",
|
||||
Modules: []module.Module{
|
||||
&gpu.DisableNouveauModule{},
|
||||
},
|
||||
Runtime: runtime,
|
||||
}
|
||||
|
||||
return p.Start()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package pipelines
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/options"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/core/module"
|
||||
"github.com/beclab/Olares/cli/pkg/core/pipeline"
|
||||
"github.com/beclab/Olares/cli/pkg/gpu"
|
||||
"github.com/beclab/Olares/cli/pkg/manifest"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
)
|
||||
|
||||
func UpgradeGpuDrivers(opt *options.InstallGpuOptions) error {
|
||||
arg := common.NewArgument()
|
||||
arg.SetOlaresVersion(opt.Version)
|
||||
arg.SetCudaVersion(opt.Cuda)
|
||||
arg.SetBaseDir(opt.BaseDir)
|
||||
arg.SetConsoleLog("gpuupgrade.log", true)
|
||||
runtime, err := common.NewKubeRuntime(common.AllInOne, *arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestFile := path.Join(runtime.GetInstallerDir(), "installation.manifest")
|
||||
|
||||
runtime.Arg.SetManifest(manifestFile)
|
||||
|
||||
manifestMap, err := manifest.ReadAll(runtime.Arg.Manifest)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
p := &pipeline.Pipeline{
|
||||
Name: "UpgradeGpuDrivers",
|
||||
Modules: []module.Module{
|
||||
&gpu.ExitIfNoDriverUpgradeNeededModule{},
|
||||
&gpu.UninstallCudaModule{},
|
||||
&gpu.InstallDriversModule{
|
||||
ManifestModule: manifest.ManifestModule{
|
||||
Manifest: manifestMap,
|
||||
BaseDir: runtime.Arg.BaseDir,
|
||||
},
|
||||
FailOnNoInstallation: true,
|
||||
SkipNVMLCheckAfterInstall: true,
|
||||
},
|
||||
&gpu.InstallContainerToolkitModule{
|
||||
ManifestModule: manifest.ManifestModule{
|
||||
Manifest: manifestMap,
|
||||
BaseDir: runtime.Arg.BaseDir,
|
||||
},
|
||||
// when nvidia driver is just upgraded
|
||||
// nvidia-smi will fail to execute
|
||||
SkipCudaCheck: true,
|
||||
},
|
||||
&gpu.RestartContainerdModule{},
|
||||
&gpu.NodeLabelingModule{},
|
||||
},
|
||||
Runtime: runtime,
|
||||
}
|
||||
|
||||
if err := p.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("The GPU driver has been upgraded, for it to work properly, the machine needs to be rebooted.")
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
fmt.Printf("Reboot now? [yes/no]: ")
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to read user input for reboot confirmation: %v", err)
|
||||
}
|
||||
input = strings.ToLower(strings.TrimSpace(input))
|
||||
if input == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix("yes", input) {
|
||||
output, err := exec.Command("reboot").CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reboot: %v", err)
|
||||
}
|
||||
fmt.Println(string(output))
|
||||
return nil
|
||||
} else if strings.HasPrefix("no", input) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -675,7 +675,7 @@ func (m *ChangeIPModule) addKubernetesTasks() {
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "RegenerateK8sFilesWithKubeadm",
|
||||
Action: new(RegenerateFilesForK8sIPChange),
|
||||
Action: new(RegenerateFilesForK8s),
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "CopyNewKubeConfig",
|
||||
|
||||
@@ -366,11 +366,11 @@ func (a *ApplySystemEnv) Execute(runtime connector.Runtime) error {
|
||||
envItem.Default = procVal
|
||||
}
|
||||
|
||||
err = apputils.CheckEnvValueByType(envItem.Value, envItem.Type)
|
||||
err = envItem.ValidateValue(envItem.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid system env value: %s", envItem.Value)
|
||||
}
|
||||
err = apputils.CheckEnvValueByType(envItem.Default, envItem.Type)
|
||||
err = envItem.ValidateValue(envItem.Default)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid system env default value: %s", envItem.Value)
|
||||
}
|
||||
|
||||
@@ -447,11 +447,11 @@ func (a *PrepareFilesForK8sIPChange) Execute(runtime connector.Runtime) error {
|
||||
})
|
||||
}
|
||||
|
||||
type RegenerateFilesForK8sIPChange struct {
|
||||
type RegenerateFilesForK8s struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (a *RegenerateFilesForK8sIPChange) Execute(runtime connector.Runtime) error {
|
||||
func (a *RegenerateFilesForK8s) Execute(runtime connector.Runtime) error {
|
||||
initCmd := "/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --skip-phases=preflight,mark-control-plane,bootstrap-token,addon,show-join-command"
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd(initCmd, false, false); err != nil {
|
||||
|
||||
110
cli/pkg/upgrade/1_12_2.go
Normal file
110
cli/pkg/upgrade/1_12_2.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
"github.com/beclab/Olares/cli/pkg/gpu"
|
||||
"github.com/beclab/Olares/cli/version"
|
||||
)
|
||||
|
||||
var version_1_12_2 = semver.MustParse("1.12.2")
|
||||
|
||||
type upgrader_1_12_2 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_2) Version() *semver.Version {
|
||||
cliVersion, err := semver.NewVersion(version.VERSION)
|
||||
// tolerate local dev version
|
||||
if err != nil {
|
||||
return version_1_12_2
|
||||
}
|
||||
if samePatchLevelVersion(version_1_12_2, cliVersion) && getReleaseLineOfVersion(cliVersion) == mainLine {
|
||||
return cliVersion
|
||||
}
|
||||
return version_1_12_2
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_2) AddedBreakingChange() bool {
|
||||
if u.Version().Equal(version_1_12_2) {
|
||||
// if this version introduced breaking change
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func nvidiactkNeedsMigration() (bool, error) {
|
||||
_, err := exec.LookPath("nvidia-ctk")
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
out, err := exec.Command("nvidia-ctk", "-v").Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
var version *semver.Version
|
||||
for _, line := range lines {
|
||||
var versionStr string
|
||||
if n, err := fmt.Sscanf(line, "NVIDIA Container Toolkit CLI version %s", &versionStr); n == 1 && err == nil {
|
||||
versionStr = strings.TrimSpace(versionStr)
|
||||
version, err = semver.NewVersion(versionStr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if version == nil {
|
||||
return false, fmt.Errorf("failed to parse nvidia-ctk version")
|
||||
}
|
||||
minVer := semver.MustParse("1.18.0")
|
||||
if version.GreaterThanEqual(minVer) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_2) PrepareForUpgrade() []task.Interface {
|
||||
var preTasks []task.Interface
|
||||
needsMigration, err := nvidiactkNeedsMigration()
|
||||
if err != nil || needsMigration {
|
||||
preTasks = append(preTasks,
|
||||
&task.LocalTask{
|
||||
Name: "InstallNvidiaContainerToolkit",
|
||||
Action: new(gpu.InstallNvidiaContainerToolkit),
|
||||
Retry: 5,
|
||||
Delay: 10 * time.Second,
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "ConfigureContainerdRuntime",
|
||||
Action: new(gpu.ConfigureContainerdRuntime),
|
||||
Retry: 5,
|
||||
Delay: 10 * time.Second,
|
||||
},
|
||||
)
|
||||
}
|
||||
preTasks = append(preTasks, u.upgraderBase.PrepareForUpgrade()...)
|
||||
return preTasks
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_2) UpgradeSystemComponents() []task.Interface {
|
||||
var preTasks []task.Interface
|
||||
preTasks = append(preTasks,
|
||||
&task.LocalTask{
|
||||
Name: "UpgradeL4",
|
||||
Action: new(upgradeL4BFLProxy),
|
||||
Retry: 5,
|
||||
Delay: 10 * time.Second,
|
||||
})
|
||||
return append(preTasks, u.upgraderBase.UpgradeSystemComponents()...)
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerMainUpgrader(upgrader_1_12_2{})
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apixclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -46,7 +45,7 @@ func (u upgrader_1_12_2_20251020) UpgradeSystemComponents() []task.Interface {
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "UpgradeL4BflProxy",
|
||||
Action: new(upgradeL4),
|
||||
Action: &upgradeL4BFLProxy{Tag: "v0.3.6"},
|
||||
Retry: 3,
|
||||
Delay: 5 * time.Second,
|
||||
},
|
||||
@@ -147,20 +146,6 @@ func (d *deleteUserEnvsIfExists) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type upgradeL4 struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (u *upgradeL4) Execute(runtime connector.Runtime) error {
|
||||
if _, err := runtime.GetRunner().SudoCmd(
|
||||
"/usr/local/bin/kubectl set image deployment/l4-bfl-proxy proxy=beclab/l4-bfl-proxy:v0.3.6 -n os-network", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to upgrade L4 network proxy")
|
||||
}
|
||||
|
||||
logger.Infof("L4 upgrade to version v0.3.5 completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_2_20251020{})
|
||||
}
|
||||
|
||||
22
cli/pkg/upgrade/1_12_3_20251112.go
Normal file
22
cli/pkg/upgrade/1_12_3_20251112.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251112 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251112) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251112")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251112) PrepareForUpgrade() []task.Interface {
|
||||
return append(regenerateKubeFiles(), u.upgraderBase.PrepareForUpgrade()...)
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251112{})
|
||||
}
|
||||
32
cli/pkg/upgrade/1_12_3_20251114.go
Normal file
32
cli/pkg/upgrade/1_12_3_20251114.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251114 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251114) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251114")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251114) UpgradeSystemComponents() []task.Interface {
|
||||
pre := []task.Interface{
|
||||
&task.LocalTask{
|
||||
Name: "UpgradeL4BFLProxy",
|
||||
Action: &upgradeL4BFLProxy{Tag: "v0.3.8"},
|
||||
Retry: 3,
|
||||
Delay: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251114{})
|
||||
}
|
||||
32
cli/pkg/upgrade/1_12_3_20251118.go
Normal file
32
cli/pkg/upgrade/1_12_3_20251118.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251118 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251118) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251118")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251118) UpgradeSystemComponents() []task.Interface {
|
||||
pre := []task.Interface{
|
||||
&task.LocalTask{
|
||||
Name: "UpgradeL4BFLProxy",
|
||||
Action: &upgradeL4BFLProxy{Tag: "v0.3.9"},
|
||||
Retry: 3,
|
||||
Delay: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251118{})
|
||||
}
|
||||
29
cli/pkg/upgrade/1_12_3_20251126.go
Normal file
29
cli/pkg/upgrade/1_12_3_20251126.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251126 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251126) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251126")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251126) PrepareForUpgrade() []task.Interface {
|
||||
tasks := make([]task.Interface, 0)
|
||||
tasks = append(tasks, upgradeKsConfig()...)
|
||||
tasks = append(tasks, upgradePrometheusServiceMonitorKubelet()...)
|
||||
tasks = append(tasks, upgradeKSCore()...)
|
||||
tasks = append(tasks, regenerateKubeFiles()...)
|
||||
|
||||
tasks = append(tasks, u.upgraderBase.PrepareForUpgrade()...)
|
||||
return tasks
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251126{})
|
||||
}
|
||||
41
cli/pkg/upgrade/1_12_3_20251127.go
Normal file
41
cli/pkg/upgrade/1_12_3_20251127.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251127 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251127) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251127")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251127) NeedRestart() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// put GPU driver upgrade step at the very end right before updating the version
|
||||
func (u upgrader_1_12_3_20251127) UpdateOlaresVersion() []task.Interface {
|
||||
var tasks []task.Interface
|
||||
tasks = append(tasks,
|
||||
&task.LocalTask{
|
||||
Name: "UpgradeGPUDriver",
|
||||
Action: new(upgradeGPUDriverIfNeeded),
|
||||
},
|
||||
)
|
||||
tasks = append(tasks, u.upgraderBase.UpdateOlaresVersion()...)
|
||||
tasks = append(tasks,
|
||||
&task.LocalTask{
|
||||
Name: "RebootIfNeeded",
|
||||
Action: new(rebootIfNeeded),
|
||||
},
|
||||
)
|
||||
return tasks
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251127{})
|
||||
}
|
||||
25
cli/pkg/upgrade/1_12_3_20251203.go
Normal file
25
cli/pkg/upgrade/1_12_3_20251203.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
type upgrader_1_12_3_20251203 struct {
|
||||
breakingUpgraderBase
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251203) Version() *semver.Version {
|
||||
return semver.MustParse("1.12.3-20251203")
|
||||
}
|
||||
|
||||
func (u upgrader_1_12_3_20251203) PrepareForUpgrade() []task.Interface {
|
||||
tasks := make([]task.Interface, 0)
|
||||
tasks = append(tasks, upgradeNodeExporter()...)
|
||||
tasks = append(tasks, u.upgraderBase.PrepareForUpgrade()...)
|
||||
return tasks
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDailyUpgrader(upgrader_1_12_3_20251203{})
|
||||
}
|
||||
@@ -42,6 +42,10 @@ func (u upgraderBase) AddedBreakingChange() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (u upgraderBase) NeedRestart() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (u upgraderBase) PrepareForUpgrade() []task.Interface {
|
||||
var tasks []task.Interface
|
||||
tasks = append(tasks, upgradeKSCore()...)
|
||||
|
||||
@@ -16,6 +16,7 @@ type upgrader interface {
|
||||
UpdateOlaresVersion() []task.Interface
|
||||
PostUpgrade() []task.Interface
|
||||
AddedBreakingChange() bool
|
||||
NeedRestart() bool
|
||||
}
|
||||
|
||||
type breakingUpgrader interface {
|
||||
|
||||
@@ -1,16 +1,38 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
|
||||
"github.com/beclab/Olares/cli/pkg/clientset"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/container"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
"github.com/beclab/Olares/cli/pkg/core/util"
|
||||
"github.com/beclab/Olares/cli/pkg/gpu"
|
||||
"github.com/beclab/Olares/cli/pkg/k3s"
|
||||
k3stemplates "github.com/beclab/Olares/cli/pkg/k3s/templates"
|
||||
"github.com/beclab/Olares/cli/pkg/kubernetes"
|
||||
"github.com/beclab/Olares/cli/pkg/kubesphere"
|
||||
"github.com/beclab/Olares/cli/pkg/kubesphere/plugins"
|
||||
"github.com/beclab/Olares/cli/pkg/manifest"
|
||||
"time"
|
||||
"github.com/beclab/Olares/cli/pkg/phase"
|
||||
"github.com/beclab/Olares/cli/pkg/terminus"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const cacheRebootNeeded = "reboot.needed"
|
||||
|
||||
type upgradeContainerdAction struct {
|
||||
common.KubeAction
|
||||
}
|
||||
@@ -63,3 +85,251 @@ func upgradeKSCore() []task.Interface {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func upgradePrometheusServiceMonitorKubelet() []task.Interface {
|
||||
return []task.Interface{
|
||||
// prometheus kubelet ServiceMonitor
|
||||
&task.LocalTask{
|
||||
Name: "ApplyKubeletServiceMonitor",
|
||||
Action: new(applyKubeletServiceMonitorAction),
|
||||
Retry: 5,
|
||||
Delay: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func upgradeKsConfig() []task.Interface {
|
||||
return []task.Interface{
|
||||
&task.LocalTask{
|
||||
Name: "CopyEmbeddedKSManifests",
|
||||
Action: new(plugins.CopyEmbedFiles),
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "ApplyKsConfigManifests",
|
||||
Action: new(plugins.ApplyKsConfigManifests),
|
||||
Retry: 5,
|
||||
Delay: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// applyKubeletServiceMonitorAction applies embedded prometheus kubelet ServiceMonitor
|
||||
type applyKubeletServiceMonitorAction struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (a *applyKubeletServiceMonitorAction) Execute(runtime connector.Runtime) error {
|
||||
kubectlpath, err := util.GetCommand(common.CommandKubectl)
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "kubectl not found")
|
||||
}
|
||||
manifest := path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "prometheus", "kubernetes", "kubernetes-serviceMonitorKubelet.yaml")
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s apply -f %s", kubectlpath, manifest), false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "apply kubelet ServiceMonitor failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyNodeExporterAction applies embedded node-exporter
|
||||
type applyNodeExporterAction struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (a *applyNodeExporterAction) Execute(runtime connector.Runtime) error {
|
||||
kubectlpath, err := util.GetCommand(common.CommandKubectl)
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "kubectl not found")
|
||||
}
|
||||
manifest := path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "prometheus", "node-exporter", "node-exporter-daemonset.yaml")
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s apply -f %s", kubectlpath, manifest), false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "apply node-exporter failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeNodeExporter() []task.Interface {
|
||||
return []task.Interface{
|
||||
&task.LocalTask{
|
||||
Name: "CopyEmbeddedKSManifests",
|
||||
Action: new(plugins.CopyEmbedFiles),
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "applyNodeExporterManifests",
|
||||
Action: new(applyNodeExporterAction),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func regenerateKubeFiles() []task.Interface {
|
||||
var tasks []task.Interface
|
||||
kubeType := phase.GetKubeType()
|
||||
if kubeType == common.K3s {
|
||||
tasks = append(tasks,
|
||||
&task.LocalTask{
|
||||
Name: "RegenerateK3sService",
|
||||
Action: new(k3s.GenerateK3sService),
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "RestartK3sService",
|
||||
Action: &terminus.SystemctlCommand{
|
||||
Command: "restart",
|
||||
UnitNames: []string{k3stemplates.K3sService.Name()},
|
||||
DaemonReloadPreExec: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
} else {
|
||||
tasks = append(tasks,
|
||||
&task.LocalTask{
|
||||
Name: "RegenerateKubeadmConfig",
|
||||
Action: &kubernetes.GenerateKubeadmConfig{
|
||||
IsInitConfiguration: true,
|
||||
},
|
||||
},
|
||||
&task.LocalTask{
|
||||
Name: "RegenerateK8sFilesWithKubeadm",
|
||||
Action: new(terminus.RegenerateFilesForK8s),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
tasks = append(tasks,
|
||||
&task.LocalTask{
|
||||
Name: "WaitForKubeAPIServerUp",
|
||||
Action: new(precheck.GetKubernetesNodesStatus),
|
||||
Retry: 10,
|
||||
Delay: 10,
|
||||
},
|
||||
)
|
||||
return tasks
|
||||
}
|
||||
|
||||
type upgradeL4BFLProxy struct {
|
||||
common.KubeAction
|
||||
Tag string
|
||||
}
|
||||
|
||||
func (u *upgradeL4BFLProxy) Execute(runtime connector.Runtime) error {
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
|
||||
"/usr/local/bin/kubectl set image deployment/l4-bfl-proxy proxy=beclab/l4-bfl-proxy:%s -n os-network", u.Tag), false, true); err != nil {
|
||||
return errors.Wrapf(errors.WithStack(err), "failed to upgrade L4 network proxy to version %s", u.Tag)
|
||||
}
|
||||
|
||||
logger.Infof("L4 upgrade to version %s completed successfully", u.Tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
type upgradeGPUDriverIfNeeded struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (a *upgradeGPUDriverIfNeeded) Execute(runtime connector.Runtime) error {
|
||||
sys := runtime.GetSystemInfo()
|
||||
if sys.IsWsl() {
|
||||
return nil
|
||||
}
|
||||
if !(sys.IsUbuntu() || sys.IsDebian()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
m, err := manifest.ReadAll(a.KubeConf.Arg.Manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := m.Get("cuda-driver")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var targetDriverVersionStr string
|
||||
if parts := strings.Split(item.Filename, "-"); len(parts) >= 3 {
|
||||
targetDriverVersionStr = strings.TrimSuffix(parts[len(parts)-1], ".run")
|
||||
}
|
||||
if targetDriverVersionStr == "" {
|
||||
return fmt.Errorf("failed to parse target CUDA driver version from %s", item.Filename)
|
||||
}
|
||||
targetVersion, err := semver.NewVersion(targetDriverVersionStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid target driver version '%s': %v", targetDriverVersionStr, err)
|
||||
}
|
||||
|
||||
var needUpgrade bool
|
||||
|
||||
status, derr := utils.GetNvidiaStatus(runtime)
|
||||
// for now, consider it as not installed if error occurs
|
||||
// and continue to upgrade
|
||||
if derr != nil {
|
||||
logger.Warnf("failed to detect NVIDIA driver status, assuming upgrade is needed: %v", derr)
|
||||
needUpgrade = true
|
||||
}
|
||||
|
||||
if status != nil && status.Installed {
|
||||
currentStr := status.DriverVersion
|
||||
if status.Mismatch && status.LibraryVersion != "" {
|
||||
currentStr = status.LibraryVersion
|
||||
}
|
||||
if v, perr := semver.NewVersion(currentStr); perr == nil {
|
||||
needUpgrade = targetVersion.GreaterThan(v)
|
||||
} else {
|
||||
// cannot parse current version, assume upgrade needed
|
||||
needUpgrade = true
|
||||
}
|
||||
} else {
|
||||
needUpgrade = true
|
||||
}
|
||||
|
||||
changed := false
|
||||
if needUpgrade {
|
||||
// if apt-installed, uninstall apt nvidia packages but keep toolkit
|
||||
if status != nil && status.InstallMethod != utils.GPUDriverInstallMethodRunfile {
|
||||
if err := new(gpu.UninstallNvidiaDrivers).Execute(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, _ = runtime.GetRunner().SudoCmd("apt-get update", false, true)
|
||||
if _, err := runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends dkms build-essential linux-headers-$(uname -r)", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install kernel build dependencies for NVIDIA runfile")
|
||||
}
|
||||
// install runfile
|
||||
runfile := item.FilePath(runtime.GetBaseDir())
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", runfile), false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to chmod +x runfile")
|
||||
}
|
||||
cmd := fmt.Sprintf("sh %s -z --no-x-check --allow-installation-with-running-driver --no-check-for-alternate-installs --dkms --rebuild-initramfs -s", runfile)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install NVIDIA driver via runfile")
|
||||
}
|
||||
client, err := clientset.NewKubeClient()
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "kubeclient create error")
|
||||
}
|
||||
err = gpu.UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &targetDriverVersionStr, ptr.To(common.CurrentVerifiedCudaVersion), ptr.To("true"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
|
||||
needReboot := changed || (status != nil && status.Mismatch)
|
||||
a.PipelineCache.Set(cacheRebootNeeded, needReboot)
|
||||
return nil
|
||||
}
|
||||
|
||||
type rebootIfNeeded struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (r *rebootIfNeeded) Execute(runtime connector.Runtime) error {
|
||||
val, ok := r.PipelineCache.GetMustBool(cacheRebootNeeded)
|
||||
if ok && val {
|
||||
_, _ = runtime.GetRunner().SudoCmd("reboot now", false, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type VersionSpec struct {
|
||||
ReleaseNum int `json:"releaseNum"`
|
||||
PreRelease bool `json:"prerelease"`
|
||||
AddedBreakingChange bool `json:"addedBreakingChange"`
|
||||
NeedRestart bool `json:"needRestart"`
|
||||
MinimumUpgradableVersions MinimumVersionConstraints `json:"minimumUpgradableVersions"`
|
||||
}
|
||||
|
||||
@@ -178,6 +179,7 @@ func CurrentVersionSpec() (spec *VersionSpec, err error) {
|
||||
}
|
||||
u := getUpgraderByVersion(v)
|
||||
spec.AddedBreakingChange = u.AddedBreakingChange()
|
||||
spec.NeedRestart = u.NeedRestart()
|
||||
if spec.ReleaseType == releaseTypeDaily {
|
||||
lastBreakingVersion := getLastBreakingVersion(dailyUpgraders, v)
|
||||
if lastBreakingVersion == nil {
|
||||
|
||||
@@ -3,15 +3,29 @@ package utils
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/util"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// NVIDIA driver install method constants
|
||||
GPUDriverInstallMethodUnknown = "unknown"
|
||||
GPUDriverInstallMethodApt = "apt"
|
||||
GPUDriverInstallMethodRunfile = "runfile"
|
||||
|
||||
// GPU status/message constants parsed from nvidia-smi outputs
|
||||
GPUStatusDriverLibraryMismatch = "Driver/library version mismatch"
|
||||
GPUStatusCouldntCommunicateWithDrv = "couldn't communicate with the NVIDIA driver"
|
||||
GPUStatusNvmlLibraryVersionPrefix = "NVML library version:"
|
||||
)
|
||||
|
||||
type GPU struct {
|
||||
ID string `xml:"id,attr" json:"id"`
|
||||
ProductName string `xml:"product_name" json:"product_name"`
|
||||
@@ -165,7 +179,19 @@ type NvidiaGpuInfo struct {
|
||||
GPUS []GPU `xml:"gpu" json:"gpus"`
|
||||
}
|
||||
|
||||
func ExecNvidiaSmi(execRuntime connector.Runtime) (gpuInfo *NvidiaGpuInfo, installed bool, err error) {
|
||||
// NvidiaStatus is the unified GPU/driver status, combining nvidia-smi XML info and driver health.
|
||||
type NvidiaStatus struct {
|
||||
Installed bool
|
||||
Running bool // whether kernel driver module is loaded
|
||||
Info *NvidiaGpuInfo
|
||||
DriverVersion string
|
||||
CudaVersion string
|
||||
LibraryVersion string // NVML library version when mismatch occurs
|
||||
Mismatch bool // whether nvidia-smi reports Driver/library version mismatch
|
||||
InstallMethod string // apt | runfile | unknown
|
||||
}
|
||||
|
||||
func findNvidiaSmiPath() (string, error) {
|
||||
cmd := "nvidia-smi"
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd += ".exe"
|
||||
@@ -179,32 +205,149 @@ func ExecNvidiaSmi(execRuntime connector.Runtime) (gpuInfo *NvidiaGpuInfo, insta
|
||||
_, e := os.Stat(nvidiaSmiFile)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return nil, false, nil
|
||||
return "", exec.ErrNotFound
|
||||
}
|
||||
return nil, false, err
|
||||
return "", e
|
||||
}
|
||||
|
||||
cmdPath = nvidiaSmiFile
|
||||
} else {
|
||||
return nil, false, err
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := execRuntime.GetRunner().SudoCmd(cmdPath+" -q -x", false, false)
|
||||
if err != nil {
|
||||
// when nvidia-smi command is installed but cuda is not installed
|
||||
if strings.Contains(out, "couldn't communicate with the NVIDIA driver") {
|
||||
return nil, false, nil
|
||||
}
|
||||
klog.Error("Error running nvidia-smi:", err)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
var data NvidiaGpuInfo
|
||||
if err := xml.Unmarshal([]byte(out), &data); nil != err {
|
||||
klog.Error("Error unmarshalling from XML:", err)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return &data, true, nil
|
||||
return cmdPath, nil
|
||||
}
|
||||
|
||||
func GetNvidiaStatus(execRuntime connector.Runtime) (*NvidiaStatus, error) {
|
||||
status := &NvidiaStatus{InstallMethod: GPUDriverInstallMethodUnknown}
|
||||
|
||||
if out, _ := execRuntime.GetRunner().SudoCmd("dpkg -l | awk '/^(ii|i[UuFHWt]|rc|..R)/ {print $2}' | grep -i nvidia-driver", false, false); strings.TrimSpace(out) != "" {
|
||||
status.InstallMethod = GPUDriverInstallMethodApt
|
||||
} else {
|
||||
if util.IsExist("/usr/bin/nvidia-uninstall") || util.IsExist("/usr/bin/nvidia-installer") {
|
||||
status.InstallMethod = GPUDriverInstallMethodRunfile
|
||||
}
|
||||
}
|
||||
|
||||
// detect whether any NVIDIA kernel module is loaded (driver running)
|
||||
// this is a seperate status besides the installed status
|
||||
if out, _ := execRuntime.GetRunner().SudoCmd("lsmod | grep -i nvidia 2>/dev/null", false, false); strings.TrimSpace(out) != "" {
|
||||
status.Running = true
|
||||
}
|
||||
// read running kernel driver version from sysfs if available
|
||||
var kernelDriverVersion string
|
||||
if status.Running {
|
||||
if v, _ := execRuntime.GetRunner().SudoCmd("cat /sys/module/nvidia/version 2>/dev/null", false, false); strings.TrimSpace(v) != "" {
|
||||
kernelDriverVersion = strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
|
||||
cmdPath, pathErr := findNvidiaSmiPath()
|
||||
if pathErr == nil {
|
||||
out, err := execRuntime.GetRunner().SudoCmd(cmdPath+" -q -x", false, false)
|
||||
if err == nil {
|
||||
var data NvidiaGpuInfo
|
||||
uerr := xml.Unmarshal([]byte(out), &data)
|
||||
if uerr == nil {
|
||||
status.Installed = true
|
||||
// nvidia-smi works => kernel driver is active
|
||||
status.Running = true
|
||||
status.Info = &data
|
||||
status.DriverVersion = data.DriverVersion
|
||||
status.CudaVersion = data.CudaVersion
|
||||
return status, nil
|
||||
}
|
||||
return status, fmt.Errorf("failed to unmarshal nvidia-smi XML: %v", uerr)
|
||||
}
|
||||
if strings.Contains(out, GPUStatusDriverLibraryMismatch) {
|
||||
status.Installed = true
|
||||
status.Mismatch = true
|
||||
status.LibraryVersion = parseNvmlLibraryVersion(out)
|
||||
// kernel may still be running; prefer kernel driver version if available
|
||||
if kernelDriverVersion != "" {
|
||||
status.DriverVersion = kernelDriverVersion
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
// for now, consider as not installed
|
||||
if strings.Contains(out, GPUStatusCouldntCommunicateWithDrv) {
|
||||
// even if userland not communicating, kernel may be running
|
||||
if kernelDriverVersion != "" {
|
||||
status.DriverVersion = kernelDriverVersion
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
return status, fmt.Errorf("failed to get NVIDIA driver status: %v", out)
|
||||
}
|
||||
// consider as not installed
|
||||
// if kernel is running after uninstall (without reboot), reflect the running version
|
||||
if kernelDriverVersion != "" {
|
||||
status.DriverVersion = kernelDriverVersion
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func parseNvmlLibraryVersion(out string) string {
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, l := range lines {
|
||||
l = strings.TrimSpace(l)
|
||||
// handle token like "NVML library version:575.57"
|
||||
if idx := strings.Index(l, GPUStatusNvmlLibraryVersionPrefix); idx >= 0 {
|
||||
v := strings.TrimSpace(strings.TrimPrefix(l, GPUStatusNvmlLibraryVersionPrefix))
|
||||
// in case there are trailing characters
|
||||
v = strings.FieldsFunc(v, func(r rune) bool { return r == ' ' || r == '\t' || r == '\r' || r == ')' || r == '(' })[0]
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func DetectNvidiaModelAndArch(execRuntime connector.Runtime) (model string, architecture string, err error) {
|
||||
if execRuntime.GetSystemInfo().IsDarwin() {
|
||||
return "", "", nil
|
||||
}
|
||||
out, e := execRuntime.GetRunner().SudoCmd("lspci | grep -i -e vga -e 3d | grep -i nvidia || true", false, false)
|
||||
if e != nil {
|
||||
klog.Error("Error running lspci:", e)
|
||||
return "", "", e
|
||||
}
|
||||
out = strings.TrimSpace(out)
|
||||
if out == "" {
|
||||
return "", "", nil
|
||||
}
|
||||
// try to extract codename in square brackets e.g. "AD106 [GeForce RTX 4060 Ti]"
|
||||
// examples: "NVIDIA Corporation AD106 [GeForce RTX 4060 Ti]"
|
||||
model = out
|
||||
architecture = "Unknown"
|
||||
upper := strings.ToUpper(out)
|
||||
// codename appears as two letters followed by digits, within the line, often right before '['
|
||||
// detect common prefixes: AD(Ada), GB(Blackwell), GH(Hopper), GA(Ampere), TU(Turing), GV(Volta), GP(Pascal), GM(Maxwell), GK(Kepler), GF(Fermi)
|
||||
codePrefixes := []struct {
|
||||
Prefix string
|
||||
Arch string
|
||||
}{
|
||||
{"AD", "Ada Lovelace"},
|
||||
{"GB", "Blackwell"},
|
||||
{"GH", "Hopper"},
|
||||
{"GA", "Ampere"},
|
||||
{"TU", "Turing"},
|
||||
{"GV", "Volta"},
|
||||
{"GP", "Pascal"},
|
||||
{"GM", "Maxwell"},
|
||||
{"GK", "Kepler"},
|
||||
{"GF", "Fermi"},
|
||||
}
|
||||
for _, p := range codePrefixes {
|
||||
if strings.Contains(upper, p.Prefix) {
|
||||
architecture = p.Arch
|
||||
break
|
||||
}
|
||||
}
|
||||
// get bracket part as model if present
|
||||
if i := strings.Index(out, "["); i >= 0 {
|
||||
if j := strings.Index(out[i:], "]"); j > 0 {
|
||||
model = strings.TrimSpace(out[i+1 : i+j])
|
||||
}
|
||||
}
|
||||
return model, architecture, nil
|
||||
}
|
||||
|
||||
57
cli/pkg/utils/lvm/command.go
Normal file
57
cli/pkg/utils/lvm/command.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package lvm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type command[T any] struct {
|
||||
cmd string
|
||||
defaultArgs []string
|
||||
format func(data []byte) (T, error)
|
||||
}
|
||||
|
||||
func (c *command[T]) Run(args ...string) (*T, string, error) {
|
||||
if c.cmd == "" {
|
||||
return nil, "", errors.ErrUnsupported
|
||||
}
|
||||
|
||||
allArgs := append(c.defaultArgs, args...)
|
||||
o, e, err := runCommandSplit(c.cmd, allArgs...)
|
||||
if err != nil {
|
||||
return nil, string(e), err
|
||||
}
|
||||
|
||||
result, err := c.format(o)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return &result, "", nil
|
||||
}
|
||||
|
||||
func runCommandSplit(command string, args ...string) ([]byte, []byte, error) {
|
||||
var cmdStdout bytes.Buffer
|
||||
var cmdStderr bytes.Buffer
|
||||
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stdout = &cmdStdout
|
||||
cmd.Stderr = &cmdStderr
|
||||
err := cmd.Run()
|
||||
|
||||
output := cmdStdout.Bytes()
|
||||
error_output := cmdStderr.Bytes()
|
||||
|
||||
return output, error_output, err
|
||||
}
|
||||
|
||||
func findCmd(cmd string) string {
|
||||
path, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
log.Printf("failed to find command %s: %v\n", cmd, err)
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
179
cli/pkg/utils/lvm/disk.go
Normal file
179
cli/pkg/utils/lvm/disk.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package lvm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
/*
|
||||
lsblk -J
|
||||
|
||||
{
|
||||
"blockdevices": [
|
||||
{
|
||||
"name": "nvme0n1",
|
||||
"maj:min": "259:0",
|
||||
"rm": false,
|
||||
"size": "1.9T",
|
||||
"ro": false,
|
||||
"type": "disk",
|
||||
"mountpoints": [
|
||||
null
|
||||
],
|
||||
"children": [
|
||||
{
|
||||
"name": "nvme0n1p1",
|
||||
"maj:min": "259:1",
|
||||
"rm": false,
|
||||
"size": "512M",
|
||||
"ro": false,
|
||||
"type": "part",
|
||||
"mountpoints": [
|
||||
"/boot/efi"
|
||||
]
|
||||
},{
|
||||
"name": "nvme0n1p2",
|
||||
"maj:min": "259:2",
|
||||
"rm": false,
|
||||
"size": "1.9T",
|
||||
"ro": false,
|
||||
"type": "part",
|
||||
"mountpoints": [
|
||||
null
|
||||
],
|
||||
"children": [
|
||||
{
|
||||
"name": "olares--vg-swap",
|
||||
"maj:min": "252:0",
|
||||
"rm": false,
|
||||
"size": "1G",
|
||||
"ro": false,
|
||||
"type": "lvm",
|
||||
"mountpoints": [
|
||||
"[SWAP]"
|
||||
]
|
||||
},{
|
||||
"name": "olares--vg-root",
|
||||
"maj:min": "252:1",
|
||||
"rm": false,
|
||||
"size": "100G",
|
||||
"ro": false,
|
||||
"type": "lvm",
|
||||
"mountpoints": [
|
||||
"/"
|
||||
]
|
||||
},{
|
||||
"name": "olares--vg-data",
|
||||
"maj:min": "252:2",
|
||||
"rm": false,
|
||||
"size": "1.8T",
|
||||
"ro": false,
|
||||
"type": "lvm",
|
||||
"mountpoints": [
|
||||
"/olares", "/var"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
const LBLK = "lsblk"
|
||||
|
||||
type BlkPart struct {
|
||||
Name string `json:"name"`
|
||||
MajMin string `json:"maj:min"`
|
||||
Rm bool `json:"rm"`
|
||||
Size string `json:"size"`
|
||||
Ro bool `json:"ro"`
|
||||
Type string `json:"type"`
|
||||
Mountpoints BlkList[string] `json:"mountpoints"`
|
||||
Children BlkList[BlkPart] `json:"children,omitempty"`
|
||||
}
|
||||
|
||||
type BlkList[T any] []T
|
||||
|
||||
type BlkResult struct {
|
||||
Blockdevices BlkList[BlkPart] `json:"blockdevices"`
|
||||
}
|
||||
|
||||
func CommandLBLK() *command[BlkResult] {
|
||||
cmd := findCmd(LBLK)
|
||||
return &command[BlkResult]{
|
||||
cmd: cmd,
|
||||
defaultArgs: []string{"-J"},
|
||||
|
||||
format: func(data []byte) (BlkResult, error) {
|
||||
var res BlkResult
|
||||
err := json.Unmarshal(data, &res)
|
||||
return res, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BlkList[T]) UnmarshalJSON(b []byte) error {
|
||||
b = bytes.TrimSpace(b)
|
||||
if bytes.Equal(b, []byte("null")) {
|
||||
*s = nil
|
||||
return nil
|
||||
}
|
||||
var raws []json.RawMessage
|
||||
if err := json.Unmarshal(b, &raws); err != nil {
|
||||
return err
|
||||
}
|
||||
var out []T
|
||||
for _, r := range raws {
|
||||
if bytes.Equal(bytes.TrimSpace(r), []byte("null")) {
|
||||
continue
|
||||
}
|
||||
var v T
|
||||
if err := json.Unmarshal(r, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
out = append(out, v)
|
||||
}
|
||||
*s = out
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
findmnt -n -J --target /olares
|
||||
|
||||
{
|
||||
"filesystems": [
|
||||
{
|
||||
"target": "/olares",
|
||||
"source": "/dev/mapper/olares--vg-data[/olares]",
|
||||
"fstype": "ext4",
|
||||
"options": "rw,relatime"
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
type Filesystem struct {
|
||||
Target string `json:"target"`
|
||||
Source string `json:"source"`
|
||||
Fstype string `json:"fstype"`
|
||||
Options string `json:"options"`
|
||||
}
|
||||
|
||||
type FindMntResult struct {
|
||||
Filesystems []Filesystem `json:"filesystems"`
|
||||
}
|
||||
|
||||
const FINDMNT = "findmnt"
|
||||
|
||||
func CommandFindMnt() *command[FindMntResult] {
|
||||
cmd := findCmd(FINDMNT)
|
||||
return &command[FindMntResult]{
|
||||
cmd: cmd,
|
||||
defaultArgs: []string{"-J"},
|
||||
format: func(data []byte) (FindMntResult, error) {
|
||||
var res FindMntResult
|
||||
err := json.Unmarshal(data, &res)
|
||||
return res, err
|
||||
},
|
||||
}
|
||||
}
|
||||
147
cli/pkg/utils/lvm/lvm.go
Normal file
147
cli/pkg/utils/lvm/lvm.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package lvm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
const (
|
||||
LVS = "lvs"
|
||||
VGS = "vgs"
|
||||
PVS = "pvs"
|
||||
)
|
||||
|
||||
/*
|
||||
{
|
||||
"report": [
|
||||
{
|
||||
"lv": [
|
||||
{"lv_name":"data", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"1.76t", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""},
|
||||
{"lv_name":"root", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"100.00g", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""},
|
||||
{"lv_name":"swap", "vg_name":"olares-vg", "lv_attr":"-wi-ao----", "lv_size":"1.00g", "pool_lv":"", "origin":"", "data_percent":"", "metadata_percent":"", "move_pv":"", "mirror_log":"", "copy_percent":"", "convert_lv":""}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
type LvItem struct {
|
||||
LvName string `json:"lv_name"`
|
||||
VgName string `json:"vg_name"`
|
||||
LvAttr string `json:"lv_attr"`
|
||||
LvSize string `json:"lv_size"`
|
||||
PoolLv string `json:"pool_lv"`
|
||||
Origin string `json:"origin"`
|
||||
DataPercent string `json:"data_percent"`
|
||||
MetadataPercent string `json:"metadata_percent"`
|
||||
MovePv string `json:"move_pv"`
|
||||
MirrorLog string `json:"mirror_log"`
|
||||
CopyPercent string `json:"copy_percent"`
|
||||
ConvertLv string `json:"convert_lv"`
|
||||
LvPath string `json:"lv_path"`
|
||||
LvDmPath string `json:"lv_dm_path"`
|
||||
Mountpoints []string `json:"mountpoints"`
|
||||
}
|
||||
|
||||
type LvsResult struct {
|
||||
Report []struct {
|
||||
Lv []LvItem `json:"lv"`
|
||||
} `json:"report"`
|
||||
}
|
||||
|
||||
func CommandLVS() *command[LvsResult] {
|
||||
cmd := findCmd(LVS)
|
||||
|
||||
return &command[LvsResult]{
|
||||
cmd: cmd,
|
||||
defaultArgs: []string{"--reportformat", "json"},
|
||||
|
||||
format: func(data []byte) (LvsResult, error) {
|
||||
var res LvsResult
|
||||
err := json.Unmarshal(data, &res)
|
||||
return res, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
{
|
||||
"report": [
|
||||
{
|
||||
"vg": [
|
||||
{"vg_name":"olares-vg", "pv_count":"1", "lv_count":"3", "snap_count":"0", "vg_attr":"wz--n-", "vg_size":"1.86t", "vg_free":"0 "}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
type VgItem struct {
|
||||
VgName string `json:"vg_name"`
|
||||
PvCount string `json:"pv_count"`
|
||||
LvCount string `json:"lv_count"`
|
||||
SnapCount string `json:"snap_count"`
|
||||
VgAttr string `json:"vg_attr"`
|
||||
VgSize string `json:"vg_size"`
|
||||
VgFree string `json:"vg_free"`
|
||||
PvName string `json:"pv_name"`
|
||||
}
|
||||
|
||||
type VgsResult struct {
|
||||
Report []struct {
|
||||
Vg []VgItem `json:"vg"`
|
||||
} `json:"report"`
|
||||
}
|
||||
|
||||
func CommandVGS() *command[VgsResult] {
|
||||
cmd := findCmd(VGS)
|
||||
|
||||
return &command[VgsResult]{
|
||||
cmd: cmd,
|
||||
defaultArgs: []string{"--reportformat", "json"},
|
||||
|
||||
format: func(data []byte) (VgsResult, error) {
|
||||
var res VgsResult
|
||||
err := json.Unmarshal(data, &res)
|
||||
return res, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
{
|
||||
"report": [
|
||||
{
|
||||
"pv": [
|
||||
{"pv_name":"/dev/nvme0n1p2", "vg_name":"olares-vg", "pv_fmt":"lvm2", "pv_attr":"a--", "pv_size":"1.86t", "pv_free":"0 "}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
type PvItem struct {
|
||||
PvName string `json:"pv_name"`
|
||||
VgName string `json:"vg_name"`
|
||||
PvFmt string `json:"pv_fmt"`
|
||||
PvAttr string `json:"pv_attr"`
|
||||
PvSize string `json:"pv_size"`
|
||||
PvFree string `json:"pv_free"`
|
||||
}
|
||||
|
||||
type PvsResult struct {
|
||||
Report []struct {
|
||||
Pv []PvItem `json:"pv"`
|
||||
} `json:"report"`
|
||||
}
|
||||
|
||||
func CommandPVS() *command[PvsResult] {
|
||||
cmd := findCmd(PVS)
|
||||
|
||||
return &command[PvsResult]{
|
||||
cmd: cmd,
|
||||
defaultArgs: []string{"--reportformat", "json"},
|
||||
|
||||
format: func(data []byte) (PvsResult, error) {
|
||||
var res PvsResult
|
||||
err := json.Unmarshal(data, &res)
|
||||
return res, err
|
||||
},
|
||||
}
|
||||
}
|
||||
272
cli/pkg/utils/lvm/tools.go
Normal file
272
cli/pkg/utils/lvm/tools.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package lvm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func FindCurrentLVM() ([]*VgItem, error) {
|
||||
VG := CommandVGS()
|
||||
result, errmsg, err := VG.Run()
|
||||
if err != nil {
|
||||
log.Printf("failed to run vgs command: %s \n%s\n", err, errmsg)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Report) == 0 || len(result.Report[0].Vg) == 0 {
|
||||
err = errors.New("no volume groups found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var vgs []*VgItem
|
||||
for _, vg := range result.Report[0].Vg {
|
||||
if vg.PvCount == "0" || vg.LvCount == "0" {
|
||||
continue
|
||||
}
|
||||
vgs = append(vgs, &vg)
|
||||
}
|
||||
|
||||
if len(vgs) == 0 {
|
||||
err = errors.New("no valid volume groups found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vgs, nil
|
||||
}
|
||||
|
||||
func FindUnmountedDevices() (map[string]*BlkPart, error) {
|
||||
lblkCmd := CommandLBLK()
|
||||
result, errmsg, err := lblkCmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("failed to run lsblk command: %s \n%s\n", err, errmsg)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unmountedDevices map[string]*BlkPart = make(map[string]*BlkPart)
|
||||
var unmountedPart func(part BlkPart) bool
|
||||
unmountedPart = func(part BlkPart) bool {
|
||||
if len(part.Mountpoints) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(part.Mountpoints) == 0 && len(part.Children) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, child := range part.Children {
|
||||
if !unmountedPart(child) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for _, dev := range result.Blockdevices {
|
||||
if dev.Type != "disk" {
|
||||
continue
|
||||
}
|
||||
|
||||
if unmountedPart(dev) {
|
||||
unmountedDevices["/dev/"+dev.Name] = &dev
|
||||
}
|
||||
}
|
||||
|
||||
return unmountedDevices, nil
|
||||
}
|
||||
|
||||
func FindLvByVgName(vgName string) ([]*LvItem, error) {
|
||||
LV := CommandLVS()
|
||||
result, errmsg, err := LV.Run("-o", "+lv_dm_path,lv_path")
|
||||
if err != nil {
|
||||
log.Printf("failed to run lvs command: %s \n%s\n", err, errmsg)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Report) == 0 || len(result.Report[0].Lv) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var lvs []*LvItem
|
||||
for _, lv := range result.Report[0].Lv {
|
||||
if lv.VgName == vgName {
|
||||
mountpoints, err := FindMountpointsByLvDmPath(lv.LvDmPath)
|
||||
if err == nil {
|
||||
lv.Mountpoints = mountpoints
|
||||
}
|
||||
lvs = append(lvs, &lv)
|
||||
}
|
||||
}
|
||||
|
||||
return lvs, nil
|
||||
}
|
||||
|
||||
func FindMountpointsByLvDmPath(lvDmPath string) ([]string, error) {
|
||||
FINDMNT := CommandFindMnt()
|
||||
result, errmsg, err := FINDMNT.Run(lvDmPath)
|
||||
if err != nil && errmsg != "" {
|
||||
log.Printf("failed to run findmnt command: %s \n%s\n", err, errmsg)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result == nil || len(result.Filesystems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var mountpoints []string
|
||||
for _, fs := range result.Filesystems {
|
||||
mountpoints = append(mountpoints, fs.Target)
|
||||
}
|
||||
|
||||
return mountpoints, nil
|
||||
}
|
||||
|
||||
/*
|
||||
wipefs -a /dev/nvme0n1
|
||||
sgdisk --zap-all /dev/nvme0n1
|
||||
*/
|
||||
func DeleteDevicePartitions(devicePath string) error {
|
||||
c, err := exec.Command("wipefs", "-a", devicePath).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to wipe device %s: %s\n", devicePath, c)
|
||||
return err
|
||||
}
|
||||
|
||||
// c, err = exec.Command("sgdisk", "--zap-all", devicePath).CombinedOutput()
|
||||
// if err != nil {
|
||||
// log.Printf("failed to zap device %s: %s\n", devicePath, c)
|
||||
// return err
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
sudo parted /dev/sdX mklabel gpt
|
||||
sudo parted -a optimal /dev/sdX mkpart primary 1MiB 100%
|
||||
*/
|
||||
func MakePartOnDevice(devicePath string) error {
|
||||
c, err := exec.Command("parted", "-s", devicePath, "mklabel", "gpt").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to make partition table on device %s: %s\n", devicePath, c)
|
||||
return err
|
||||
}
|
||||
|
||||
c, err = exec.Command("parted", "-a", "optimal", devicePath, "mkpart", "primary", "1MiB", "100%").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to make partition on device %s: %s\n", devicePath, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
sudo pvcreate /dev/sdX1
|
||||
sudo vgextend target_vg /dev/sdX1
|
||||
*/
|
||||
func AddNewPV(devicePath string, vg string) error {
|
||||
partition := devicePath + "p1"
|
||||
c, err := exec.Command("pvcreate", "-f", partition).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to create physical volume on device %s: %s\n", partition, c)
|
||||
return err
|
||||
}
|
||||
|
||||
c, err = exec.Command("vgextend", vg, partition).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to extend volume group %s with device %s: %s\n", vg, partition, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
lvextend -l +100%FREE "/dev/$VG_NAME/$LV_ROOT_NAME"
|
||||
resize2fs "/dev/$VG_NAME/$LV_ROOT_NAME"
|
||||
*/
|
||||
func ExtendLv(vg, lv string) error {
|
||||
c, err := exec.Command("lvextend", "-l", "+100%FREE", "/dev/"+vg+"/"+lv).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to extend logical volume %s in volume group %s: %s\n", lv, vg, c)
|
||||
return err
|
||||
}
|
||||
|
||||
c, err = exec.Command("resize2fs", "/dev/"+vg+"/"+lv).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to resize filesystem on logical volume %s in volume group %s: %s\n", lv, vg, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeactivateLv(vg string) error {
|
||||
c, err := exec.Command("lvchange", "-an", vg).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to deactivate logical volume in volume group %s: %s\n", vg, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveLv(lvpath string) error {
|
||||
_, err := os.Stat(lvpath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
log.Printf("failed to stat logical volume %s: %s\n", lvpath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := exec.Command("lvremove", "-f", lvpath).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to remove logical volume %s: %s\n", lvpath, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveVg(vg string) error {
|
||||
c, err := exec.Command("vgremove", "-f", vg).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to remove volume group %s: %s\n", vg, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemovePv(pv string) error {
|
||||
c, err := exec.Command("pvremove", "-f", pv).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to remove physical volume %s: %s\n", pv, c)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindVgsOnDevice(devicePaths []string) ([]*VgItem, error) {
|
||||
VG := CommandVGS()
|
||||
result, errmsg, err := VG.Run("-o", "+pv_name")
|
||||
if err != nil {
|
||||
log.Printf("failed to run vgs command: %s \n%s\n", err, errmsg)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var vgs []*VgItem
|
||||
for _, vg := range result.Report[0].Vg {
|
||||
if slices.Contains(devicePaths, vg.PvName) {
|
||||
vgs = append(vgs, &vg)
|
||||
}
|
||||
}
|
||||
|
||||
return vgs, nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
)
|
||||
|
||||
func TestA(t *testing.T) {
|
||||
var a = "/home/ubuntu/.terminus/versions/v1.8.0-20240928/wizard/config/apps/argo"
|
||||
var b = filepath.Base(a)
|
||||
fmt.Println("---b---", b)
|
||||
}
|
||||
|
||||
func TestExecNvidiaSmi(t *testing.T) {
|
||||
|
||||
runtime := common.LocalRuntime{}
|
||||
|
||||
info, installed, err := ExecNvidiaSmi(&runtime)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
|
||||
t.Log(installed)
|
||||
t.Log(info)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/web5/crypto/dsa"
|
||||
@@ -24,10 +25,18 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
db *leveldb.DB
|
||||
db *leveldb.DB
|
||||
dbOnce sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
func getDB() *leveldb.DB {
|
||||
dbOnce.Do(func() {
|
||||
initDB()
|
||||
})
|
||||
return db
|
||||
}
|
||||
|
||||
func initDB() {
|
||||
var (
|
||||
err error
|
||||
info os.FileInfo
|
||||
@@ -84,7 +93,7 @@ type CheckJWSResult struct {
|
||||
func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
|
||||
name := strings.Replace(olares_id, "@", ".", -1)
|
||||
// Try to get from cache first
|
||||
cached, err := db.Get([]byte(name), nil)
|
||||
cached, err := getDB().Get([]byte(name), nil)
|
||||
if err == nil {
|
||||
var result didcore.ResolutionResult
|
||||
if err := json.Unmarshal(cached, &result); err == nil {
|
||||
@@ -117,7 +126,7 @@ func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
|
||||
}
|
||||
|
||||
// Cache the result
|
||||
if err := db.Put([]byte(name), body, nil); err != nil {
|
||||
if err := getDB().Put([]byte(name), body, nil); err != nil {
|
||||
// Log error but don't fail
|
||||
fmt.Printf("failed to cache DID document: %v\n", err)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func (i *InstallAppxPackage) Execute(runtime connector.Runtime) error {
|
||||
wslAppxPackage := wslAppxPackageObj.(*files.KubeBinary)
|
||||
|
||||
var ps = &utils.PowerShellCommandExecutor{
|
||||
Commands: []string{fmt.Sprintf("Add-AppxPackage %s -ForceUpdateFromAnyVersion", wslAppxPackage.Path())},
|
||||
Commands: []string{fmt.Sprintf("Add-AppxPackage \"%s\" -ForceUpdateFromAnyVersion", wslAppxPackage.Path())},
|
||||
}
|
||||
|
||||
if _, err := ps.Run(); err != nil {
|
||||
@@ -216,7 +216,7 @@ func (i *InstallWSLDistro) Execute(runtime connector.Runtime) error {
|
||||
logger.Infof("%s path: %s", ubuntuTool, installerPath)
|
||||
|
||||
var checkInstallerPs = &utils.PowerShellCommandExecutor{
|
||||
Commands: []string{fmt.Sprintf("Test-Path %s", installerPath)},
|
||||
Commands: []string{fmt.Sprintf("Test-Path \"%s\"", installerPath)},
|
||||
}
|
||||
installerExists, err := checkInstallerPs.Run()
|
||||
if err != nil {
|
||||
@@ -314,7 +314,7 @@ func (c *ConfigWslConf) Execute(runtime connector.Runtime) error {
|
||||
}
|
||||
|
||||
cmd = &utils.DefaultCommandExecutor{
|
||||
Commands: []string{"--shutdown", distro},
|
||||
Commands: []string{"-t", distro},
|
||||
}
|
||||
if _, err := cmd.RunCmd("wsl", utils.DEFAULT); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("shutdown wsl %s failed", distro))
|
||||
@@ -493,7 +493,6 @@ func (i *InstallTerminus) Execute(runtime connector.Runtime) error {
|
||||
fmt.Sprintf("export %s=%s", common.ENV_HOST_IP, systemInfo.GetLocalIp()),
|
||||
fmt.Sprintf("export %s=%s", common.ENV_DISABLE_HOST_IP_PROMPT, os.Getenv(common.ENV_DISABLE_HOST_IP_PROMPT)),
|
||||
fmt.Sprintf("export %s=%s", common.ENV_OLARES_CDN_SERVICE, i.KubeConf.Arg.OlaresCDNService),
|
||||
fmt.Sprintf("export %s=%s", common.ENV_NVIDIA_CONTAINER_REPO_MIRROR, os.Getenv(common.ENV_NVIDIA_CONTAINER_REPO_MIRROR)),
|
||||
}
|
||||
|
||||
var bashUrl = fmt.Sprintf("https://%s", cc.DefaultBashUrl)
|
||||
|
||||
726
cli/pkg/wizard/app.go
Normal file
726
cli/pkg/wizard/app.go
Normal file
@@ -0,0 +1,726 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
// App class - simplified version for backend CLI use
|
||||
type App struct {
|
||||
Version string `json:"version"`
|
||||
API *Client `json:"-"` // Uses Client from client.go
|
||||
}
|
||||
|
||||
// NewApp constructor - initializes with Client (corresponds to original TypeScript constructor)
|
||||
func NewApp(sender Sender) *App {
|
||||
// Create simplified client state (backend CLI doesn't need complex state management)
|
||||
state := &SimpleClientState{}
|
||||
|
||||
// Initialize Client (corresponds to original TypeScript's new Client(this.state, sender, hook))
|
||||
client := NewClient(state, sender)
|
||||
|
||||
return &App{
|
||||
Version: "3.0",
|
||||
API: client,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAppWithBaseURL creates App with base URL (convenience function)
|
||||
func NewAppWithBaseURL(baseURL string) *App {
|
||||
// Create HTTP Sender
|
||||
sender := NewHTTPSender(baseURL)
|
||||
|
||||
// Create App with HTTP Sender
|
||||
return NewApp(sender)
|
||||
}
|
||||
|
||||
// SimpleClientState - simplified client state for backend CLI
|
||||
type SimpleClientState struct {
|
||||
session *Session
|
||||
account *Account
|
||||
device *DeviceInfo
|
||||
}
|
||||
|
||||
func (s *SimpleClientState) GetSession() *Session {
|
||||
return s.session
|
||||
}
|
||||
|
||||
func (s *SimpleClientState) SetSession(session *Session) {
|
||||
s.session = session
|
||||
}
|
||||
|
||||
func (s *SimpleClientState) GetAccount() *Account {
|
||||
return s.account
|
||||
}
|
||||
|
||||
func (s *SimpleClientState) SetAccount(account *Account) {
|
||||
s.account = account
|
||||
}
|
||||
|
||||
func (s *SimpleClientState) GetDevice() *DeviceInfo {
|
||||
if s.device == nil {
|
||||
s.device = &DeviceInfo{
|
||||
ID: "cli-device-" + generateUUID(),
|
||||
Platform: "go-cli",
|
||||
}
|
||||
}
|
||||
return s.device
|
||||
}
|
||||
|
||||
// Signup function - based on original TypeScript signup method (ref: app.ts)
|
||||
func (a *App) Signup(params SignupParams) (*CreateAccountResponse, error) {
|
||||
log.Printf("Starting signup process for DID: %s", params.DID)
|
||||
|
||||
// 1. Initialize account object (ref: app.ts line 954-959)
|
||||
account := &Account{
|
||||
ID: generateUUID(),
|
||||
DID: params.DID,
|
||||
Name: params.BFLUser, // Use BFLUser as account name
|
||||
Local: false,
|
||||
Created: getCurrentTimeISO(),
|
||||
Updated: getCurrentTimeISO(),
|
||||
MainVault: MainVault{
|
||||
ID: "", // Will be set on server side
|
||||
},
|
||||
Orgs: []OrgInfo{}, // Initialize as empty array to prevent undefined
|
||||
Settings: AccountSettings{},
|
||||
Version: "3.0.14",
|
||||
}
|
||||
|
||||
// Initialize account with master password (ref: account.ts line 182-190)
|
||||
err := a.initializeAccount(account, params.MasterPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize account: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Account initialized: ID=%s, DID=%s, Name=%s", account.ID, account.DID, account.Name)
|
||||
|
||||
// 2. Initialize auth object (ref: app.ts line 964-970)
|
||||
auth := NewAuth(params.DID)
|
||||
authKey, err := auth.GetAuthKey(params.MasterPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get auth key: %v", err)
|
||||
}
|
||||
|
||||
// Calculate verifier (ref: app.ts line 968-970)
|
||||
srpClient := NewSRPClient(SRPGroup4096)
|
||||
err = srpClient.Initialize(authKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize SRP client: %v", err)
|
||||
}
|
||||
|
||||
auth.Verifier = srpClient.GetV()
|
||||
log.Printf("SRP verifier generated: %x...", auth.Verifier[:8])
|
||||
|
||||
// 3. Send create account request to server (ref: app.ts line 973-987)
|
||||
createParams := CreateAccountParams{
|
||||
Account: *account,
|
||||
Auth: *auth,
|
||||
AuthToken: params.AuthToken,
|
||||
BFLToken: params.BFLToken,
|
||||
SessionID: params.SessionID,
|
||||
BFLUser: params.BFLUser,
|
||||
JWS: params.JWS,
|
||||
}
|
||||
|
||||
response, err := a.API.CreateAccount(createParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create account on server: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Account created on server successfully")
|
||||
log.Printf("MFA token received: %s", response.MFA)
|
||||
|
||||
// 4. Login to newly created account (ref: app.ts line 991)
|
||||
loginParams := LoginParams{
|
||||
DID: params.DID,
|
||||
Password: params.MasterPassword,
|
||||
}
|
||||
|
||||
err = a.Login(loginParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to login after signup: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Login after signup successful")
|
||||
|
||||
// 5. Initialize main vault and create TOTP item (ref: app.ts line 1003-1038)
|
||||
// err = a.initializeMainVaultWithTOTP(response.MFA)
|
||||
// if err != nil {
|
||||
// log.Printf("Warning: Failed to initialize main vault with TOTP: %v", err)
|
||||
// // Don't return error as account creation was successful
|
||||
// } else {
|
||||
// log.Printf("Main vault initialized with TOTP item successfully")
|
||||
// }
|
||||
|
||||
// 6. Activate account (ref: app.ts line 1039-1046)
|
||||
activeParams := ActiveAccountParams{
|
||||
ID: a.API.State.GetAccount().ID, // Use logged-in account ID
|
||||
BFLToken: params.BFLToken,
|
||||
BFLUser: params.BFLUser,
|
||||
JWS: params.JWS,
|
||||
}
|
||||
|
||||
err = a.API.ActiveAccount(activeParams)
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to activate account: %v", err)
|
||||
// Don't return error as account creation was successful
|
||||
} else {
|
||||
log.Printf("Account activated successfully")
|
||||
}
|
||||
|
||||
log.Printf("Signup completed successfully for DID: %s", params.DID)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Login function - simplified version
|
||||
func (a *App) Login(params LoginParams) error {
|
||||
log.Printf("Starting login process for DID: %s", params.DID)
|
||||
|
||||
// 1. Start creating session
|
||||
startParams := StartCreateSessionParams{
|
||||
DID: params.DID,
|
||||
AuthToken: params.AuthToken,
|
||||
AsAdmin: params.AsAdmin,
|
||||
}
|
||||
|
||||
startResponse, err := a.API.StartCreateSession(startParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start create session: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Session creation started for Account ID: %s", startResponse.AccountID)
|
||||
|
||||
// 2. Use SRP for authentication
|
||||
authKey, err := deriveKeyPBKDF2(
|
||||
[]byte(params.Password),
|
||||
startResponse.KeyParams.Salt.Bytes(),
|
||||
startResponse.KeyParams.Iterations,
|
||||
32,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to derive auth key: %v", err)
|
||||
}
|
||||
|
||||
// 3. SRP client negotiation
|
||||
srpClient := NewSRPClient(SRPGroup4096)
|
||||
err = srpClient.Initialize(authKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize SRP client: %v", err)
|
||||
}
|
||||
|
||||
err = srpClient.SetB(startResponse.B.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set B value: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("SRP negotiation completed")
|
||||
|
||||
// 4. Complete session creation
|
||||
completeParams := CompleteCreateSessionParams{
|
||||
SRPId: startResponse.SRPId,
|
||||
AccountID: startResponse.AccountID,
|
||||
A: Base64Bytes(srpClient.GetA()),
|
||||
M: Base64Bytes(srpClient.GetM1()),
|
||||
AddTrustedDevice: false, // Don't add trusted device by default
|
||||
Kind: "oe", // Based on server logs, kind should be "oe"
|
||||
Version: "4.0.0", // Based on server logs, version should be "4.0.0"
|
||||
}
|
||||
|
||||
session, err := a.API.CompleteCreateSession(completeParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete create session: %v", err)
|
||||
}
|
||||
|
||||
// 5. Set session key
|
||||
sessionKey := srpClient.GetK()
|
||||
session.Key = sessionKey
|
||||
a.API.State.SetSession(session)
|
||||
|
||||
log.Printf("Session created: %s", session.ID)
|
||||
log.Printf("Session key length: %d bytes", len(sessionKey))
|
||||
log.Printf("Session key (hex): %x", sessionKey)
|
||||
|
||||
// Create a simplified account object for subsequent operations
|
||||
// account, err := a.API.GetAccount()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to get account: %v", err)
|
||||
// }
|
||||
|
||||
account := &Account{
|
||||
ID: startResponse.AccountID,
|
||||
DID: params.DID,
|
||||
Name: params.DID,
|
||||
}
|
||||
|
||||
a.API.State.SetAccount(account)
|
||||
|
||||
log.Printf("Login completed successfully for DID: %s (skipped GetAccount due to signature issue)", params.DID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parameter structures
|
||||
type SignupParams struct {
|
||||
DID string `json:"did"`
|
||||
MasterPassword string `json:"masterPassword"`
|
||||
Name string `json:"name"`
|
||||
AuthToken string `json:"authToken"`
|
||||
SessionID string `json:"sessionId"`
|
||||
BFLToken string `json:"bflToken"`
|
||||
BFLUser string `json:"bflUser"`
|
||||
JWS string `json:"jws"`
|
||||
}
|
||||
|
||||
type LoginParams struct {
|
||||
DID string `json:"did"`
|
||||
Password string `json:"password"`
|
||||
AuthToken *string `json:"authToken,omitempty"`
|
||||
AsAdmin *bool `json:"asAdmin,omitempty"`
|
||||
}
|
||||
|
||||
// Extend Client interface to support App-required methods
|
||||
func (c *Client) CreateAccount(params CreateAccountParams) (*CreateAccountResponse, error) {
|
||||
requestParams := []interface{}{params}
|
||||
response, err := c.call("createAccount", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result CreateAccountResponse
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse CreateAccount response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Client) ActiveAccount(params ActiveAccountParams) error {
|
||||
requestParams := []interface{}{params}
|
||||
_, err := c.call("activeAccount", requestParams)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) StartCreateSession(params StartCreateSessionParams) (*StartCreateSessionResponse, error) {
|
||||
requestParams := []interface{}{params}
|
||||
response, err := c.call("startCreateSession", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add debug info: print raw response
|
||||
if responseBytes, err := json.Marshal(response.Result); err == nil {
|
||||
log.Printf("StartCreateSession raw response: %s", string(responseBytes))
|
||||
}
|
||||
|
||||
var result StartCreateSessionResponse
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse StartCreateSession response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Client) CompleteCreateSession(params CompleteCreateSessionParams) (*Session, error) {
|
||||
requestParams := []interface{}{params}
|
||||
response, err := c.call("completeCreateSession", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result Session
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse CompleteCreateSession response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetAccount() (*Account, error) {
|
||||
// getAccount needs no parameters, pass empty array (ref: client.ts line 46-47: undefined -> [])
|
||||
response, err := c.call("getAccount", []interface{}{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result Account
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse GetAccount response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Client) UpdateVault(vault Vault) (*Vault, error) {
|
||||
requestParams := []interface{}{vault}
|
||||
response, err := c.call("updateVault", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result Vault
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse UpdateVault response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// New data structures
|
||||
type CreateAccountParams struct {
|
||||
Account Account `json:"account"`
|
||||
Auth Auth `json:"auth"`
|
||||
AuthToken string `json:"authToken"`
|
||||
BFLToken string `json:"bflToken"`
|
||||
SessionID string `json:"sessionId"`
|
||||
BFLUser string `json:"bflUser"`
|
||||
JWS string `json:"jws"`
|
||||
}
|
||||
|
||||
type CreateAccountResponse struct {
|
||||
MFA string `json:"mfa"`
|
||||
}
|
||||
|
||||
type ActiveAccountParams struct {
|
||||
ID string `json:"id"`
|
||||
BFLToken string `json:"bflToken"`
|
||||
BFLUser string `json:"bflUser"`
|
||||
JWS string `json:"jws"`
|
||||
}
|
||||
|
||||
type StartCreateSessionParams struct {
|
||||
DID string `json:"did"`
|
||||
AuthToken *string `json:"authToken,omitempty"`
|
||||
AsAdmin *bool `json:"asAdmin,omitempty"`
|
||||
}
|
||||
|
||||
type StartCreateSessionResponse struct {
|
||||
AccountID string `json:"accountId"`
|
||||
KeyParams PBKDF2Params `json:"keyParams"`
|
||||
SRPId string `json:"srpId"`
|
||||
B Base64Bytes `json:"B"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type CompleteCreateSessionParams struct {
|
||||
SRPId string `json:"srpId"`
|
||||
AccountID string `json:"accountId"`
|
||||
A Base64Bytes `json:"A"` // Use Base64Bytes to handle @AsBytes() decorator
|
||||
M Base64Bytes `json:"M"` // Use Base64Bytes to handle @AsBytes() decorator
|
||||
AddTrustedDevice bool `json:"addTrustedDevice"` // Add missing field
|
||||
Kind string `json:"kind"` // Add kind field
|
||||
Version string `json:"version"` // Add version field
|
||||
}
|
||||
|
||||
type PBKDF2Params struct {
|
||||
Algorithm string `json:"algorithm,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Salt Base64Bytes `json:"salt"`
|
||||
Iterations int `json:"iterations"`
|
||||
KeySize int `json:"keySize,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
ID string `json:"id"`
|
||||
DID string `json:"did"`
|
||||
Verifier []byte `json:"verifier"`
|
||||
KeyParams PBKDF2Params `json:"keyParams"`
|
||||
}
|
||||
|
||||
// Auth methods
|
||||
func NewAuth(did string) *Auth {
|
||||
return &Auth{
|
||||
ID: generateUUID(),
|
||||
DID: did,
|
||||
KeyParams: PBKDF2Params{
|
||||
Salt: generateRandomBytes(16),
|
||||
Iterations: 100000,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAuthKey generates authentication key (ref: auth.ts line 278-284)
|
||||
func (a *Auth) GetAuthKey(password string) ([]byte, error) {
|
||||
// If no salt is set, generate a random value (ref: auth.ts line 281-282)
|
||||
if len(a.KeyParams.Salt) == 0 {
|
||||
a.KeyParams.Salt = Base64Bytes(generateRandomBytes(16))
|
||||
}
|
||||
|
||||
// Use PBKDF2 to derive key (ref: auth.ts line 284 and crypto.ts line 78-101)
|
||||
return deriveKeyPBKDF2(
|
||||
[]byte(password),
|
||||
a.KeyParams.Salt.Bytes(),
|
||||
a.KeyParams.Iterations,
|
||||
32, // 256 bits = 32 bytes
|
||||
)
|
||||
}
|
||||
|
||||
// deriveKeyPBKDF2 implements real PBKDF2 key derivation (ref: deriveKey in crypto.ts)
|
||||
func deriveKeyPBKDF2(password, salt []byte, iterations, keyLen int) ([]byte, error) {
|
||||
// Use real PBKDF2 implementation, ref: crypto.ts line 78-101
|
||||
// Use SHA-256 as hash function (corresponds to params.hash in TypeScript)
|
||||
key := pbkdf2.Key(password, salt, iterations, keyLen, sha256.New)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// generateRandomBytes generates secure random bytes
|
||||
func generateRandomBytes(length int) []byte {
|
||||
bytes := make([]byte, length)
|
||||
_, err := rand.Read(bytes)
|
||||
if err != nil {
|
||||
// Should handle this error in production implementation
|
||||
panic(fmt.Sprintf("Failed to generate random bytes: %v", err))
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
// getCurrentTimeISO gets current time in ISO 8601 format string
|
||||
func getCurrentTimeISO() string {
|
||||
return time.Now().UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// initializeMainVaultWithTOTP initializes main vault and creates TOTP item (ref: app.ts line 1003-1038)
|
||||
func (a *App) initializeMainVaultWithTOTP(mfaToken string) error {
|
||||
account := a.API.State.GetAccount()
|
||||
if account == nil {
|
||||
return fmt.Errorf("account is null")
|
||||
}
|
||||
|
||||
// 1. Initialize main vault (ref: server.ts line 1573-1579)
|
||||
vault := &Vault{
|
||||
Kind: "vault", // Serializable.kind getter (ref: vault.ts line 18-20)
|
||||
ID: generateUUID(),
|
||||
Name: "My Vault",
|
||||
Owner: account.ID,
|
||||
Created: getCurrentTimeISO(),
|
||||
Updated: getCurrentTimeISO(),
|
||||
Items: []VaultItem{}, // Initialize empty items array
|
||||
Version: "4.0.0", // Serialization version (ref: encoding.ts toRaw)
|
||||
}
|
||||
|
||||
// 2. Initialize parent class fields (SharedContainer extends BaseContainer)
|
||||
// BaseContainer has: encryptionParams: AESEncryptionParams = new AESEncryptionParams()
|
||||
vault.EncryptionParams = EncryptionParams{
|
||||
Algorithm: "AES-GCM",
|
||||
TagSize: 128,
|
||||
KeySize: 256,
|
||||
IV: "", // Empty, will be set when data is encrypted
|
||||
AdditionalData: "", // Empty, will be set when data is encrypted
|
||||
Version: "4.0.0",
|
||||
}
|
||||
|
||||
// SharedContainer has: keyParams: RSAEncryptionParams = new RSAEncryptionParams()
|
||||
vault.KeyParams = map[string]any{
|
||||
"algorithm": "RSA-OAEP",
|
||||
"hash": "SHA-256",
|
||||
"kind": "c",
|
||||
"version": "4.0.0",
|
||||
}
|
||||
|
||||
// SharedContainer has: accessors: Accessor[] = []
|
||||
vault.Accessors = []map[string]any{} // Empty array, will be populated via updateAccessors()
|
||||
|
||||
log.Printf("Main vault initialized: ID=%s, Name=%s, Owner=%s", vault.ID, vault.Name, vault.Owner)
|
||||
|
||||
// 2. Get authenticator template (ref: app.ts line 1008-1014)
|
||||
template := GetAuthenticatorTemplate()
|
||||
if template == nil {
|
||||
return fmt.Errorf("authenticator template is null")
|
||||
}
|
||||
|
||||
// 3. Set MFA token value (ref: app.ts line 1015)
|
||||
template.Fields[0].Value = mfaToken
|
||||
log.Printf("TOTP template prepared with MFA token: %s...", mfaToken[:min(8, len(mfaToken))])
|
||||
|
||||
// 4. Create vault item (ref: app.ts line 1024-1033)
|
||||
item, err := a.createVaultItem(CreateVaultItemParams{
|
||||
Name: account.Name,
|
||||
Vault: vault,
|
||||
Fields: template.Fields,
|
||||
Tags: []string{},
|
||||
Icon: template.Icon,
|
||||
Type: VaultTypeTerminusTotp,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create vault item: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("TOTP vault item created: ID=%s, Name=%s", item.ID, item.Name)
|
||||
log.Printf("TOTP field value: %s", item.Fields[0].Value)
|
||||
|
||||
// 5. Add item to vault
|
||||
vault.Items = append(vault.Items, *item)
|
||||
|
||||
// 6. Update vault on server (ref: app.ts line 2138: await this.addItems([item], vault))
|
||||
// Note: The vault is created empty without encryption. Items will be encrypted when
|
||||
// the user unlocks the vault for the first time via vault.unlock() -> vault.updateAccessors()
|
||||
err = a.updateVault(vault)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update vault on server: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Vault updated on server successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVaultItemParams parameters for creating a vault item
|
||||
type CreateVaultItemParams struct {
|
||||
Name string
|
||||
Vault *Vault
|
||||
Fields []Field
|
||||
Tags []string
|
||||
Icon string
|
||||
Type VaultType
|
||||
}
|
||||
|
||||
// createVaultItem creates a new vault item (ref: app.ts line 2096-2141)
|
||||
func (a *App) createVaultItem(params CreateVaultItemParams) (*VaultItem, error) {
|
||||
account := a.API.State.GetAccount()
|
||||
if account == nil {
|
||||
return nil, fmt.Errorf("account is null")
|
||||
}
|
||||
|
||||
// Create vault item (ref: item.ts line 451-475)
|
||||
item := &VaultItem{
|
||||
ID: generateUUID(),
|
||||
Name: params.Name,
|
||||
Type: params.Type,
|
||||
Icon: params.Icon,
|
||||
Fields: params.Fields,
|
||||
Tags: params.Tags,
|
||||
Updated: getCurrentTimeISO(),
|
||||
UpdatedBy: account.ID,
|
||||
}
|
||||
|
||||
log.Printf("Vault item created: ID=%s, Name=%s, Type=%d", item.ID, item.Name, item.Type)
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// updateVault updates vault on server (ref: app.ts line 1855-2037)
|
||||
func (a *App) updateVault(vault *Vault) error {
|
||||
// Update vault revision
|
||||
vault.Revision = generateUUID()
|
||||
vault.Updated = getCurrentTimeISO()
|
||||
|
||||
// Call server API to update vault
|
||||
updatedVault, err := a.API.UpdateVault(*vault)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update vault on server: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Vault updated on server: ID=%s, Revision=%s", updatedVault.ID, updatedVault.Revision)
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the minimum of two integers
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// initializeAccount initializes account with RSA keys and encryption parameters (ref: account.ts line 182-190)
|
||||
func (a *App) initializeAccount(account *Account, masterPassword string) error {
|
||||
// 1. Generate RSA key pair (ref: account.ts line 183-186)
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate RSA key pair: %v", err)
|
||||
}
|
||||
|
||||
// 2. Extract public key and encode it (ref: account.ts line 186)
|
||||
publicKeyDER, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal public key: %v", err)
|
||||
}
|
||||
account.PublicKey = base64.StdEncoding.EncodeToString(publicKeyDER)
|
||||
|
||||
// 3. Set up key derivation parameters (ref: container.ts line 125-133)
|
||||
salt := generateRandomBytes(16)
|
||||
account.KeyParams = KeyParams{
|
||||
Algorithm: "PBKDF2",
|
||||
Hash: "SHA-256",
|
||||
KeySize: 256,
|
||||
Iterations: 100000,
|
||||
Salt: base64.StdEncoding.EncodeToString(salt),
|
||||
Version: "3.0.14",
|
||||
}
|
||||
|
||||
// 4. Derive encryption key from master password
|
||||
encryptionKey := pbkdf2.Key([]byte(masterPassword), salt, account.KeyParams.Iterations, 32, sha256.New)
|
||||
|
||||
// 5. Set up encryption parameters (ref: container.ts line 48-56)
|
||||
iv := generateRandomBytes(16)
|
||||
additionalData := generateRandomBytes(16)
|
||||
account.EncryptionParams = EncryptionParams{
|
||||
Algorithm: "AES-GCM",
|
||||
TagSize: 128,
|
||||
KeySize: 256,
|
||||
IV: base64.StdEncoding.EncodeToString(iv),
|
||||
AdditionalData: base64.StdEncoding.EncodeToString(additionalData),
|
||||
Version: "3.0.14",
|
||||
}
|
||||
|
||||
// 6. Create account secrets (private key + signing key)
|
||||
privateKeyDER := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
signingKey := generateRandomBytes(32) // HMAC key
|
||||
|
||||
// Combine private key and signing key into account secrets
|
||||
accountSecrets := struct {
|
||||
SigningKey []byte `json:"signingKey"`
|
||||
PrivateKey []byte `json:"privateKey"`
|
||||
}{
|
||||
SigningKey: signingKey,
|
||||
PrivateKey: privateKeyDER,
|
||||
}
|
||||
|
||||
accountSecretsBytes, err := json.Marshal(accountSecrets)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal account secrets: %v", err)
|
||||
}
|
||||
|
||||
// 7. Encrypt account secrets (ref: container.ts line 59-63)
|
||||
encryptedData, err := a.encryptAESGCM(encryptionKey, accountSecretsBytes, iv, additionalData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt account secrets: %v", err)
|
||||
}
|
||||
account.EncryptedData = base64.StdEncoding.EncodeToString(encryptedData)
|
||||
|
||||
log.Printf("Account initialized with RSA key pair and encryption parameters")
|
||||
log.Printf("Public key length: %d bytes", len(publicKeyDER))
|
||||
log.Printf("Encrypted data length: %d bytes", len(encryptedData))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encryptAESGCM encrypts data using AES-GCM
|
||||
func (a *App) encryptAESGCM(key, plaintext, iv, additionalData []byte) ([]byte, error) {
|
||||
// Import crypto/aes and crypto/cipher packages are needed at the top of the file
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cipher: %v", err)
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCMWithNonceSize(block, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GCM: %v", err)
|
||||
}
|
||||
|
||||
// Encrypt the plaintext using AES-GCM
|
||||
ciphertext := gcm.Seal(nil, iv, plaintext, additionalData)
|
||||
|
||||
return ciphertext, nil
|
||||
}
|
||||
287
cli/pkg/wizard/auth.go
Normal file
287
cli/pkg/wizard/auth.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Token struct, corresponds to TypeScript Token interface
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
ExpiresAt int `json:"expires_at"`
|
||||
SessionID string `json:"session_id"`
|
||||
FA2 bool `json:"fa2"`
|
||||
}
|
||||
|
||||
// FirstFactorRequest represents first factor request structure
|
||||
type FirstFactorRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
KeepMeLoggedIn bool `json:"keepMeLoggedIn"`
|
||||
RequestMethod string `json:"requestMethod"`
|
||||
TargetURL string `json:"targetURL"`
|
||||
AcceptCookie bool `json:"acceptCookie"`
|
||||
}
|
||||
|
||||
// FirstFactorResponse represents first factor response structure
|
||||
type FirstFactorResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data Token `json:"data"`
|
||||
}
|
||||
|
||||
// OnFirstFactor implements first factor authentication (ref: BindTerminusBusiness.ts)
|
||||
func OnFirstFactor(baseURL, terminusName, osUser, osPwd string, acceptCookie, needTwoFactor bool) (*Token, error) {
|
||||
log.Printf("Starting onFirstFactor for user: %s", osUser)
|
||||
|
||||
// Process password (salted MD5)
|
||||
processedPassword := passwordAddSort(osPwd)
|
||||
|
||||
// Build request
|
||||
reqData := FirstFactorRequest{
|
||||
Username: osUser,
|
||||
Password: processedPassword,
|
||||
KeepMeLoggedIn: false,
|
||||
RequestMethod: "POST",
|
||||
TargetURL: baseURL,
|
||||
AcceptCookie: acceptCookie,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
// Send HTTP request
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
reqURL := fmt.Sprintf("%s/api/firstfactor?hideCookie=true", baseURL)
|
||||
req, err := http.NewRequest("POST", reqURL, strings.NewReader(string(jsonData)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
log.Printf("Sending request to: %s", reqURL)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var response FirstFactorResponse
|
||||
if err := json.Unmarshal(body, &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
if response.Status != "OK" {
|
||||
return nil, fmt.Errorf("authentication failed: %s", response.Status)
|
||||
}
|
||||
|
||||
log.Printf("First factor authentication successful")
|
||||
return &response.Data, nil
|
||||
}
|
||||
|
||||
// passwordAddSort implements salted MD5 (ref: TypeScript version)
|
||||
func passwordAddSort(password string) string {
|
||||
// Salt and MD5
|
||||
saltedPassword := password + "@Olares2025"
|
||||
hash := md5.Sum([]byte(saltedPassword))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
// Main authentication function - corresponds to original TypeScript _authenticate function
|
||||
func Authenticate(req AuthenticateRequest) (*AuthenticateResponse, error) {
|
||||
if platform == nil {
|
||||
return nil, NewAuthError(ErrorCodeServerError, "Platform not initialized", nil)
|
||||
}
|
||||
|
||||
step := 1
|
||||
var authReq *StartAuthRequestResponse = req.PendingRequest
|
||||
|
||||
// Step 1: If no pending request, start new authentication request
|
||||
if authReq == nil {
|
||||
log.Printf("[%s] Step %d: req is empty, starting auth request...", req.Caller, step)
|
||||
|
||||
opts := StartAuthRequestOptions{
|
||||
Type: &req.Type,
|
||||
Purpose: req.Purpose,
|
||||
DID: &req.DID,
|
||||
AuthenticatorIndex: &req.AuthenticatorIndex,
|
||||
}
|
||||
|
||||
var err error
|
||||
authReq, err = platform.StartAuthRequest(opts)
|
||||
if err != nil {
|
||||
log.Printf("[%s] Step %d: Error occurred while starting auth request: %v", req.Caller, step, err)
|
||||
return nil, NewAuthError(
|
||||
ErrorCodeAuthenticationFailed,
|
||||
fmt.Sprintf("[%s] Step %d: An error occurred: %s", req.Caller, step, err.Error()),
|
||||
map[string]any{"error": err},
|
||||
)
|
||||
}
|
||||
|
||||
reqJSON, _ := json.Marshal(authReq)
|
||||
log.Printf("[%s] Step %d: Auth request started successfully. Request details: %s", req.Caller, step, string(reqJSON))
|
||||
} else {
|
||||
log.Printf("[%s] Step %d: req already exists. Skipping auth request.", req.Caller, step)
|
||||
}
|
||||
|
||||
// Step 2: Complete authentication request
|
||||
step = 2
|
||||
reqJSON, _ := json.Marshal(authReq)
|
||||
log.Printf("[%s] Step %d: Completing auth request with req: %s", req.Caller, step, string(reqJSON))
|
||||
|
||||
res, err := platform.CompleteAuthRequest(authReq)
|
||||
if err != nil {
|
||||
log.Printf("[%s] Step %d: Error occurred while completing auth request: %v", req.Caller, step, err)
|
||||
return nil, NewAuthError(
|
||||
ErrorCodeAuthenticationFailed,
|
||||
fmt.Sprintf("[%s] Step %d: An error occurred: %s", req.Caller, step, err.Error()),
|
||||
map[string]any{"error": err},
|
||||
)
|
||||
}
|
||||
|
||||
resJSON, _ := json.Marshal(res)
|
||||
log.Printf("[%s] Step %d: Auth request completed successfully. Response details: %s", req.Caller, step, string(resJSON))
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// UserBindTerminus main user binding function (ref: TypeScript version)
|
||||
func UserBindTerminus(mnemonic, bflUrl, vaultUrl, osPwd, terminusName, localName string) (string, error) {
|
||||
log.Printf("Starting userBindTerminus for user: %s", terminusName)
|
||||
|
||||
// 1. Initialize global storage
|
||||
if globalUserStore == nil {
|
||||
log.Printf("Initializing global stores...")
|
||||
err := InitializeGlobalStores(mnemonic, terminusName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to initialize global stores: %w", err)
|
||||
}
|
||||
log.Printf("Global stores initialized successfully")
|
||||
}
|
||||
|
||||
// 2. Initialize platform and App (if not already initialized)
|
||||
var app *App
|
||||
if platform == nil {
|
||||
log.Printf("Initializing platform...")
|
||||
|
||||
// Create App using vaultUrl as base URL
|
||||
app = NewAppWithBaseURL(vaultUrl)
|
||||
|
||||
// Create and set WebPlatform (no need to pass mnemonic, uses global storage)
|
||||
webPlatform := NewWebPlatform(app.API)
|
||||
SetPlatform(webPlatform)
|
||||
|
||||
log.Printf("Platform initialized successfully with base URL: %s", vaultUrl)
|
||||
} else {
|
||||
// If platform already initialized, create new App instance for signup
|
||||
app = NewAppWithBaseURL(vaultUrl)
|
||||
}
|
||||
|
||||
log.Printf("Using bflUrl: %s", bflUrl)
|
||||
|
||||
// 3. Call onFirstFactor to get token (ref: TypeScript implementation)
|
||||
token, err := OnFirstFactor(bflUrl, terminusName, localName, osPwd, false, false)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("onFirstFactor failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("First factor authentication successful, session_id: %s", token.SessionID)
|
||||
|
||||
// 4. Execute authentication - call _authenticate function from pkg/activate
|
||||
authRes, err := Authenticate(AuthenticateRequest{
|
||||
DID: localName,
|
||||
Type: AuthTypeSSI,
|
||||
Purpose: AuthPurposeSignup,
|
||||
AuthenticatorIndex: 0,
|
||||
Caller: "E001",
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("authentication failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Authentication successful for DID: %s", authRes.DID)
|
||||
|
||||
// 5. Generate JWS - ref: BindTerminusBusiness.ts
|
||||
log.Printf("Creating JWS for signup...")
|
||||
|
||||
// Extract domain (ref: TypeScript implementation)
|
||||
domain := vaultUrl
|
||||
if strings.HasPrefix(domain, "http://") {
|
||||
domain = domain[7:]
|
||||
} else if strings.HasPrefix(domain, "https://") {
|
||||
domain = domain[8:]
|
||||
}
|
||||
|
||||
// Use globalUserStore to sign JWS (ref: userStore.signJWS in TypeScript)
|
||||
jws, err := globalUserStore.SignJWS(map[string]any{
|
||||
"name": terminusName,
|
||||
"did": globalUserStore.GetDid(),
|
||||
"domain": domain,
|
||||
"time": fmt.Sprintf("%d", time.Now().UnixMilli()),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("JWS signing failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("JWS created successfully: %s...", jws[:50])
|
||||
|
||||
// 6. Execute signup (call real implementation in app.go)
|
||||
log.Printf("Executing signup...")
|
||||
|
||||
// Build SignupParams (ref: app.signup in BindTerminusBusiness.ts)
|
||||
signupParams := SignupParams{
|
||||
DID: authRes.DID,
|
||||
MasterPassword: mnemonic,
|
||||
Name: terminusName,
|
||||
AuthToken: authRes.Token,
|
||||
SessionID: token.SessionID,
|
||||
BFLToken: token.AccessToken,
|
||||
BFLUser: localName,
|
||||
JWS: jws,
|
||||
}
|
||||
|
||||
// Call real app.Signup function
|
||||
signupResponse, err := app.Signup(signupParams)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("signup failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Signup successful! MFA: %s", signupResponse.MFA)
|
||||
|
||||
// Save MFA token to UserStore for next stage use
|
||||
err = globalUserStore.SetMFA(signupResponse.MFA)
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to save MFA token: %v", err)
|
||||
// Don't return error as main process has succeeded
|
||||
} else {
|
||||
log.Printf("MFA token saved to UserStore for future use")
|
||||
}
|
||||
|
||||
log.Printf("User bind to Terminus completed successfully!")
|
||||
|
||||
return token.AccessToken, nil
|
||||
}
|
||||
180
cli/pkg/wizard/client.go
Normal file
180
cli/pkg/wizard/client.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client implementation - based on original TypeScript Client class
|
||||
type Client struct {
|
||||
State ClientState
|
||||
Sender Sender
|
||||
}
|
||||
|
||||
func NewClient(state ClientState, sender Sender) *Client {
|
||||
return &Client{
|
||||
State: state,
|
||||
Sender: sender,
|
||||
}
|
||||
}
|
||||
|
||||
// Implement AppAPI interface
|
||||
func (c *Client) StartAuthRequest(params StartAuthRequestParams) (*StartAuthRequestResponse, error) {
|
||||
// Build request parameters
|
||||
requestParams := []interface{}{params}
|
||||
|
||||
// Send request
|
||||
response, err := c.call("startAuthRequest", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result StartAuthRequestResponse
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse StartAuthRequest response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Client) CompleteAuthRequest(params CompleteAuthRequestParams) (*CompleteAuthRequestResponse, error) {
|
||||
// Build request parameters
|
||||
requestParams := []interface{}{params}
|
||||
|
||||
// Send request
|
||||
response, err := c.call("completeAuthRequest", requestParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result CompleteAuthRequestResponse
|
||||
if err := c.parseResponse(response.Result, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse CompleteAuthRequest response: %v", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Generic RPC call method
|
||||
func (c *Client) call(method string, params []interface{}) (*Response, error) {
|
||||
session := c.State.GetSession()
|
||||
|
||||
// Build request
|
||||
req := &Request{
|
||||
Method: method,
|
||||
Params: params,
|
||||
Device: c.State.GetDevice(),
|
||||
}
|
||||
|
||||
// If session exists, add authentication info
|
||||
if session != nil {
|
||||
auth, err := c.authenticateRequest(req, session)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authenticate request: %v", err)
|
||||
}
|
||||
req.Auth = auth
|
||||
|
||||
// Temporary debug: print full request JSON
|
||||
if reqJSON, err := json.Marshal(req); err == nil {
|
||||
log.Printf("Full request JSON: %s", string(reqJSON))
|
||||
}
|
||||
}
|
||||
|
||||
// Send request
|
||||
response, err := c.Sender.Send(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send request: %v", err)
|
||||
}
|
||||
|
||||
// Check response error
|
||||
if response.Error != nil {
|
||||
return nil, NewAuthError(
|
||||
ErrorCode(response.Error.Code),
|
||||
response.Error.Message,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
// If session exists, verify response
|
||||
if session != nil {
|
||||
if err := c.verifyResponse(response, session); err != nil {
|
||||
return nil, fmt.Errorf("failed to verify response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// authenticateRequest implements real HMAC session signing (ref: session.ts line 176-189)
|
||||
func (c *Client) authenticateRequest(req *Request, session *Session) (*RequestAuth, error) {
|
||||
if session.Key == nil {
|
||||
return nil, fmt.Errorf("session key is nil")
|
||||
}
|
||||
|
||||
// 1. Build timestamp
|
||||
now := time.Now()
|
||||
|
||||
// 2. Serialize request data (ref: session.ts line 158: data = req.params)
|
||||
data := req.Params // Use entire params array directly, not just first element
|
||||
|
||||
dataJSON, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request data: %v", err)
|
||||
}
|
||||
|
||||
// 3. Build signature message (ref: session.ts line 179)
|
||||
// Format: ${session}_${time.toISOString()}_${marshal(data)}
|
||||
// Use same format as ISOTime.MarshalJSON to ensure consistency
|
||||
utcTime := now.UTC()
|
||||
timeStr := fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02d.%03dZ",
|
||||
utcTime.Year(), utcTime.Month(), utcTime.Day(),
|
||||
utcTime.Hour(), utcTime.Minute(), utcTime.Second(),
|
||||
utcTime.Nanosecond()/1000000)
|
||||
message := fmt.Sprintf("%s_%s_%s", session.ID, timeStr, string(dataJSON))
|
||||
|
||||
// 4. Use HMAC-SHA256 signing (ref: HMACParams in crypto.ts)
|
||||
mac := hmac.New(sha256.New, session.Key)
|
||||
mac.Write([]byte(message))
|
||||
signature := mac.Sum(nil)
|
||||
|
||||
log.Printf("Session signing: sessionId=%s, message_len=%d", session.ID, len(message))
|
||||
log.Printf("Signing message: %s", message)
|
||||
log.Printf("Data JSON: %s", string(dataJSON))
|
||||
log.Printf("Session key for signing: %x", session.Key)
|
||||
log.Printf("Signature (hex): %x", signature)
|
||||
|
||||
return &RequestAuth{
|
||||
Session: session.ID,
|
||||
Time: ISOTime(now), // Convert to ISOTime type
|
||||
Signature: Base64Bytes(signature), // Convert to Base64Bytes type
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifyResponse (simplified version)
|
||||
func (c *Client) verifyResponse(response *Response, session *Session) error {
|
||||
// In actual implementation, response signature verification is needed here
|
||||
// Real implementation for response parsing
|
||||
log.Printf("Verifying response with session: %s", session.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseResponse helper method
|
||||
func (c *Client) parseResponse(result interface{}, target interface{}) error {
|
||||
// Convert result to JSON, then parse to target structure
|
||||
jsonData, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal result: %v", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(jsonData, target); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal to target: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
203
cli/pkg/wizard/did_key_utils.go
Normal file
203
cli/pkg/wizard/did_key_utils.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/hmac"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/web5/jwk"
|
||||
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/multiformats/go-varint"
|
||||
"github.com/tyler-smith/go-bip39"
|
||||
)
|
||||
|
||||
// Ed25519 multicodec identifier
|
||||
const ED25519_CODEC_ID = 0xed
|
||||
|
||||
// DIDKeyResult represents the result of DID key generation
|
||||
type DIDKeyResult struct {
|
||||
DID string `json:"did"`
|
||||
PublicJWK jwk.JWK `json:"publicJwk"`
|
||||
PrivateJWK jwk.JWK `json:"privateJwk"`
|
||||
}
|
||||
|
||||
// HDWalletGo is a pure Go HD wallet based on Trust Wallet Core implementation
|
||||
type HDWalletGo struct {
|
||||
seed []byte
|
||||
mnemonic string
|
||||
passphrase string
|
||||
}
|
||||
|
||||
// HDNode represents BIP32 hierarchical deterministic node
|
||||
type HDNode struct {
|
||||
privateKey []byte
|
||||
publicKey []byte
|
||||
chainCode []byte
|
||||
depth uint8
|
||||
childNum uint32
|
||||
fingerprint uint32
|
||||
}
|
||||
|
||||
// NewHDWalletFromMnemonic creates HD wallet from mnemonic (simulates Trust Wallet Core implementation)
|
||||
func NewHDWalletFromMnemonic(mnemonic, passphrase string) (*HDWalletGo, error) {
|
||||
// Validate mnemonic
|
||||
if !bip39.IsMnemonicValid(mnemonic) {
|
||||
return nil, fmt.Errorf("invalid mnemonic")
|
||||
}
|
||||
|
||||
// Generate seed (64 bytes) - using standard BIP39 implementation here
|
||||
seed := bip39.NewSeed(mnemonic, passphrase)
|
||||
|
||||
return &HDWalletGo{
|
||||
seed: seed,
|
||||
mnemonic: mnemonic,
|
||||
passphrase: passphrase,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getMasterNode generates master node from seed (simulates Trust Wallet Core's hdnode_from_seed)
|
||||
func (w *HDWalletGo) getMasterNode() (*HDNode, error) {
|
||||
// BIP32 master key generation
|
||||
// Use "ed25519 seed" as HMAC-SHA512 key
|
||||
h := hmac.New(sha512.New, []byte("ed25519 seed"))
|
||||
h.Write(w.seed)
|
||||
hash := h.Sum(nil)
|
||||
|
||||
// First 32 bytes as private key, last 32 bytes as chain code
|
||||
privateKey := hash[:32]
|
||||
chainCode := hash[32:]
|
||||
|
||||
// Generate public key
|
||||
publicKey := ed25519.NewKeyFromSeed(privateKey).Public().(ed25519.PublicKey)
|
||||
|
||||
return &HDNode{
|
||||
privateKey: privateKey,
|
||||
publicKey: publicKey,
|
||||
chainCode: chainCode,
|
||||
depth: 0,
|
||||
childNum: 0,
|
||||
fingerprint: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMasterKeyEd25519 gets Ed25519 master key (simulates Trust Wallet Core's getMasterKey)
|
||||
func (w *HDWalletGo) GetMasterKeyEd25519() (ed25519.PrivateKey, ed25519.PublicKey, error) {
|
||||
node, err := w.getMasterNode()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate complete Ed25519 private key from private key seed
|
||||
privateKey := ed25519.NewKeyFromSeed(node.privateKey)
|
||||
publicKey := privateKey.Public().(ed25519.PublicKey)
|
||||
|
||||
return privateKey, publicKey, nil
|
||||
}
|
||||
|
||||
// GetPrivateJWKTrustWalletCore generates private JWK using Trust Wallet Core compatible method
|
||||
func (w *HDWalletGo) GetPrivateJWKTrustWalletCore() (*DIDKeyResult, error) {
|
||||
// Get Ed25519 key pair
|
||||
privateKey, publicKey, err := w.GetMasterKeyEd25519()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get master key: %w", err)
|
||||
}
|
||||
|
||||
// Create DID
|
||||
did, err := createDIDFromPublicKey(publicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DID: %w", err)
|
||||
}
|
||||
|
||||
// Create key ID
|
||||
keyId := fmt.Sprintf("%s#z%s", did, base58.Encode(append(varint.ToUvarint(ED25519_CODEC_ID), publicKey...)))
|
||||
|
||||
// Create public JWK
|
||||
publicJWK := jwk.JWK{
|
||||
ALG: "EdDSA",
|
||||
CRV: "Ed25519",
|
||||
KID: keyId,
|
||||
KTY: "OKP",
|
||||
USE: "sig",
|
||||
X: base64.RawURLEncoding.EncodeToString(publicKey),
|
||||
}
|
||||
|
||||
// Create private JWK
|
||||
privateJWK := jwk.JWK{
|
||||
ALG: "EdDSA",
|
||||
CRV: "Ed25519",
|
||||
KID: keyId,
|
||||
KTY: "OKP",
|
||||
USE: "sig",
|
||||
X: base64.RawURLEncoding.EncodeToString(publicKey),
|
||||
D: base64.RawURLEncoding.EncodeToString(privateKey), // Use complete 64-byte private key
|
||||
}
|
||||
|
||||
return &DIDKeyResult{
|
||||
DID: did,
|
||||
PublicJWK: publicJWK,
|
||||
PrivateJWK: privateJWK,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createDIDFromPublicKey creates a DID:key from Ed25519 public key
|
||||
func createDIDFromPublicKey(publicKey ed25519.PublicKey) (string, error) {
|
||||
// Create multicodec identifier for Ed25519
|
||||
codecBytes := varint.ToUvarint(ED25519_CODEC_ID)
|
||||
|
||||
// Combine codec + public key
|
||||
idBytes := make([]byte, len(codecBytes)+len(publicKey))
|
||||
copy(idBytes, codecBytes)
|
||||
copy(idBytes[len(codecBytes):], publicKey)
|
||||
|
||||
// Encode with base58btc
|
||||
id := base58.Encode(idBytes)
|
||||
|
||||
return fmt.Sprintf("did:key:z%s", id), nil
|
||||
}
|
||||
|
||||
// GetPrivateJWK convenience function: generate private JWK from mnemonic
|
||||
func GetPrivateJWK(mnemonic string) (*DIDKeyResult, error) {
|
||||
wallet, err := NewHDWalletFromMnemonic(mnemonic, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HD wallet: %w", err)
|
||||
}
|
||||
|
||||
return wallet.GetPrivateJWKTrustWalletCore()
|
||||
}
|
||||
|
||||
// GetDID convenience function: generate DID from mnemonic
|
||||
func GetDID(mnemonic string) (string, error) {
|
||||
result, err := GetPrivateJWK(mnemonic)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.DID, nil
|
||||
}
|
||||
|
||||
// GetPublicJWK convenience function: generate public JWK from mnemonic
|
||||
func GetPublicJWK(mnemonic string) (*jwk.JWK, error) {
|
||||
result, err := GetPrivateJWK(mnemonic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result.PublicJWK, nil
|
||||
}
|
||||
|
||||
// GenerateMnemonic generates new BIP39 mnemonic
|
||||
func GenerateMnemonic() string {
|
||||
// Generate 128 bits of entropy for 12-word mnemonic
|
||||
entropy, err := bip39.NewEntropy(128)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate entropy for mnemonic: %v", err))
|
||||
}
|
||||
|
||||
mnemonic, err := bip39.NewMnemonic(entropy)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate mnemonic from entropy: %v", err))
|
||||
}
|
||||
|
||||
return mnemonic
|
||||
}
|
||||
22
cli/pkg/wizard/errors.go
Normal file
22
cli/pkg/wizard/errors.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package wizard
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AuthError represents authentication errors
|
||||
type AuthError struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data any `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (e *AuthError) Error() string {
|
||||
return fmt.Sprintf("[%s] %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func NewAuthError(code ErrorCode, message string, data any) *AuthError {
|
||||
return &AuthError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
76
cli/pkg/wizard/http_sender.go
Normal file
76
cli/pkg/wizard/http_sender.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HTTPSender implements HTTP-based Sender interface
|
||||
type HTTPSender struct {
|
||||
BaseURL string
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// NewHTTPSender creates new HTTP Sender
|
||||
func NewHTTPSender(baseURL string) *HTTPSender {
|
||||
return &HTTPSender{
|
||||
BaseURL: baseURL,
|
||||
Client: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Send implements Sender interface, sends HTTP request
|
||||
func (h *HTTPSender) Send(req *Request) (*Response, error) {
|
||||
// Build request URL
|
||||
url := fmt.Sprintf("%s/api/rpc", h.BaseURL)
|
||||
|
||||
// Serialize request
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
httpReq, err := http.NewRequest("POST", url, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
// Set request headers
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Accept", "application/json")
|
||||
|
||||
// Authentication info already included in JSON request body's auth field, no extra HTTP headers needed
|
||||
|
||||
// Send request
|
||||
httpResp, err := h.Client.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("HTTP request failed: %w", err)
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
// Read response
|
||||
respBody, err := io.ReadAll(httpResp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
// Check HTTP status code
|
||||
if httpResp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("HTTP error %d: %s", httpResp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var response Response
|
||||
if err := json.Unmarshal(respBody, &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
267
cli/pkg/wizard/login_terminus.go
Normal file
267
cli/pkg/wizard/login_terminus.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base32"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LoginTerminus implements Terminus login functionality (ref: BindTerminusBusiness.ts loginTerminus)
|
||||
func LoginTerminus(bflUrl, terminusName, localName, password string, needTwoFactor bool) (*Token, error) {
|
||||
log.Printf("Starting loginTerminus for user: %s", terminusName)
|
||||
|
||||
// 1. Call onFirstFactor to get initial token (ref: loginTerminus line 364-372)
|
||||
token, err := OnFirstFactor(bflUrl, terminusName, localName, password, true, needTwoFactor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("first factor authentication failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("First factor completed, session_id: %s, FA2 required: %t", token.SessionID, token.FA2 || needTwoFactor)
|
||||
|
||||
// 2. If second factor authentication is required (ref: loginTerminus line 379-446)
|
||||
if token.FA2 || needTwoFactor {
|
||||
log.Printf("Second factor authentication required")
|
||||
|
||||
// Get TOTP value
|
||||
totpValue, err := getTOTPFromMFA()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get TOTP: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Generated TOTP: %s", totpValue)
|
||||
|
||||
// Perform second factor authentication
|
||||
secondToken, err := performSecondFactor(bflUrl, terminusName, totpValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("second factor authentication failed: %v", err)
|
||||
}
|
||||
|
||||
// Update token information
|
||||
token.AccessToken = secondToken.AccessToken
|
||||
token.RefreshToken = secondToken.RefreshToken
|
||||
token.SessionID = secondToken.SessionID
|
||||
|
||||
log.Printf("Second factor completed, updated session_id: %s", token.SessionID)
|
||||
}
|
||||
|
||||
log.Printf("LoginTerminus completed successfully")
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// getTOTPFromMFA generates TOTP from stored MFA (ref: loginTerminus line 380-403)
|
||||
func getTOTPFromMFA() (string, error) {
|
||||
// Get MFA token from global storage
|
||||
mfa, err := globalUserStore.GetMFA()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("MFA token not found: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Using MFA token for TOTP generation: %s", mfa)
|
||||
|
||||
// Generate TOTP (ref: TypeScript hotp function)
|
||||
currentTime := time.Now().Unix()
|
||||
interval := int64(30) // 30 second interval
|
||||
counter := currentTime / interval
|
||||
|
||||
totp, err := generateHOTP(mfa, counter)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate TOTP: %v", err)
|
||||
}
|
||||
|
||||
return totp, nil
|
||||
}
|
||||
|
||||
// generateHOTP generates HOTP (ref: TypeScript hotp function)
|
||||
func generateHOTP(secret string, counter int64) (string, error) {
|
||||
// Process base32 string: remove spaces, convert to uppercase, handle padding
|
||||
cleanSecret := strings.ToUpper(strings.ReplaceAll(secret, " ", ""))
|
||||
|
||||
// Add padding characters if needed
|
||||
padding := len(cleanSecret) % 8
|
||||
if padding != 0 {
|
||||
cleanSecret += strings.Repeat("=", 8-padding)
|
||||
}
|
||||
|
||||
// Decode base32 encoded secret to bytes
|
||||
secretBytes, err := base32.StdEncoding.DecodeString(cleanSecret)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode base32 secret: %v", err)
|
||||
}
|
||||
|
||||
// Convert counter to 8-byte big-endian
|
||||
counterBytes := make([]byte, 8)
|
||||
for i := 7; i >= 0; i-- {
|
||||
counterBytes[i] = byte(counter & 0xff)
|
||||
counter >>= 8
|
||||
}
|
||||
|
||||
// Use HMAC-SHA1 to calculate hash (consistent with TypeScript version)
|
||||
h := hmac.New(sha1.New, secretBytes)
|
||||
h.Write(counterBytes)
|
||||
hash := h.Sum(nil)
|
||||
|
||||
// Dynamic truncation (consistent with TypeScript getToken function)
|
||||
offset := hash[len(hash)-1] & 0xf
|
||||
code := ((int(hash[offset]) & 0x7f) << 24) |
|
||||
((int(hash[offset+1]) & 0xff) << 16) |
|
||||
((int(hash[offset+2]) & 0xff) << 8) |
|
||||
(int(hash[offset+3]) & 0xff)
|
||||
|
||||
// Generate 6-digit number
|
||||
otp := code % int(math.Pow10(6))
|
||||
|
||||
return fmt.Sprintf("%06d", otp), nil
|
||||
}
|
||||
|
||||
// performSecondFactor performs second factor authentication (ref: loginTerminus line 419-446)
|
||||
func performSecondFactor(baseURL, terminusName, totpValue string) (*Token, error) {
|
||||
log.Printf("Performing second factor authentication")
|
||||
|
||||
// Build target URL
|
||||
targetURL := fmt.Sprintf("https://desktop.%s/", strings.ReplaceAll(terminusName, "@", "."))
|
||||
|
||||
// Build request data
|
||||
reqData := map[string]interface{}{
|
||||
"targetUrl": targetURL,
|
||||
"token": totpValue,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
// Send HTTP request
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/secondfactor/totp", baseURL)
|
||||
req, err := http.NewRequest("POST", url, strings.NewReader(string(jsonData)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Access-Control-Allow-Origin", "*")
|
||||
req.Header.Set("X-Unauth-Error", "Non-Redirect")
|
||||
|
||||
log.Printf("Sending second factor request to: %s", url)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var response struct {
|
||||
Status string `json:"status"`
|
||||
Data Token `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
if response.Status != "OK" {
|
||||
return nil, fmt.Errorf("second factor authentication failed: %s", response.Status)
|
||||
}
|
||||
|
||||
log.Printf("Second factor authentication successful")
|
||||
return &response.Data, nil
|
||||
}
|
||||
|
||||
// ResetPassword implements password reset functionality (ref: account.ts reset_password)
|
||||
func ResetPassword(baseURL, localName, currentPassword, newPassword, accessToken string) error {
|
||||
log.Printf("Starting reset password for user: %s", localName)
|
||||
|
||||
// Process passwords (salted MD5)
|
||||
processedCurrentPassword := passwordAddSort(currentPassword)
|
||||
processedNewPassword := passwordAddSort(newPassword)
|
||||
|
||||
// Build request data (ref: account.ts line 138-141)
|
||||
reqData := map[string]interface{}{
|
||||
"current_password": processedCurrentPassword,
|
||||
"password": processedNewPassword,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
// Create HTTP client (ref: account.ts line 128-135)
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Build request URL (ref: account.ts line 136-137)
|
||||
url := fmt.Sprintf("%s/bfl/iam/v1alpha1/users/%s/password", baseURL, localName)
|
||||
req, err := http.NewRequest("PUT", url, strings.NewReader(string(jsonData)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Set request headers (ref: account.ts line 131-134)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Authorization", accessToken)
|
||||
|
||||
log.Printf("Sending reset password request to: %s", url)
|
||||
|
||||
// Send request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read response
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response: %v", err)
|
||||
}
|
||||
|
||||
// Check HTTP status code (ref: account.ts line 144-146)
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var response struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &response); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
// Check response status (ref: account.ts line 148-155)
|
||||
if response.Code != 0 {
|
||||
if response.Message != "" {
|
||||
return fmt.Errorf("password reset failed: %s", response.Message)
|
||||
}
|
||||
return fmt.Errorf("password reset failed: network error")
|
||||
}
|
||||
|
||||
log.Printf("Password reset completed successfully")
|
||||
return nil
|
||||
}
|
||||
138
cli/pkg/wizard/platform.go
Normal file
138
cli/pkg/wizard/platform.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// WebPlatform implementation - based on original TypeScript WebPlatform
|
||||
type WebPlatform struct {
|
||||
SupportedAuthTypes []AuthType
|
||||
App AppAPI // App interface, currently only interface definition
|
||||
Mnemonic string // Mnemonic for real JWS signing
|
||||
DID string // DID for user identification
|
||||
}
|
||||
|
||||
func NewWebPlatform(app AppAPI) *WebPlatform {
|
||||
return &WebPlatform{
|
||||
SupportedAuthTypes: []AuthType{AuthTypeSSI},
|
||||
App: app,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWebPlatformWithMnemonic creates WebPlatform with mnemonic
|
||||
func NewWebPlatformWithMnemonic(app AppAPI, mnemonic, did string) *WebPlatform {
|
||||
return &WebPlatform{
|
||||
SupportedAuthTypes: []AuthType{AuthTypeSSI},
|
||||
App: app,
|
||||
Mnemonic: mnemonic,
|
||||
DID: did,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WebPlatform) getAuthClient(authType AuthType) (AuthClient, error) {
|
||||
// Only support SSI authentication type
|
||||
if authType != AuthTypeSSI {
|
||||
return nil, fmt.Errorf("authentication type not supported: %s", authType)
|
||||
}
|
||||
|
||||
// Check if global storage is initialized
|
||||
if globalUserStore == nil {
|
||||
return nil, fmt.Errorf("global stores not initialized, call InitializeGlobalStores first")
|
||||
}
|
||||
|
||||
// Use global storage
|
||||
return &SSIAuthClient{
|
||||
UserStore: globalUserStore,
|
||||
// JWSSigner removed as UserStore.SignJWS() is actually used
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *WebPlatform) prepareCompleteAuthRequest(req *StartAuthRequestResponse) (map[string]any, error) {
|
||||
client, err := p.getAuthClient(req.Type)
|
||||
if err != nil {
|
||||
return nil, NewAuthError(ErrorCodeAuthenticationFailed, "Authentication type not supported!", err)
|
||||
}
|
||||
|
||||
return client.PrepareAuthentication(req.Data)
|
||||
}
|
||||
|
||||
func (p *WebPlatform) StartAuthRequest(opts StartAuthRequestOptions) (*StartAuthRequestResponse, error) {
|
||||
params := StartAuthRequestParams{
|
||||
DID: *opts.DID,
|
||||
Type: opts.Type,
|
||||
SupportedTypes: p.SupportedAuthTypes,
|
||||
Purpose: opts.Purpose,
|
||||
AuthenticatorID: opts.AuthenticatorID,
|
||||
AuthenticatorIndex: opts.AuthenticatorIndex,
|
||||
}
|
||||
|
||||
return p.App.StartAuthRequest(params)
|
||||
}
|
||||
|
||||
func (p *WebPlatform) CompleteAuthRequest(req *StartAuthRequestResponse) (*AuthenticateResponse, error) {
|
||||
// If request already verified, return directly
|
||||
if req.RequestStatus == AuthRequestStatusVerified {
|
||||
return &AuthenticateResponse{
|
||||
DID: req.DID,
|
||||
Token: req.Token,
|
||||
DeviceTrusted: req.DeviceTrusted,
|
||||
AccountStatus: *req.AccountStatus,
|
||||
Provisioning: *req.Provisioning,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Prepare authentication data
|
||||
data, err := p.prepareCompleteAuthRequest(req)
|
||||
if err != nil {
|
||||
return nil, NewAuthError(ErrorCodeAuthenticationFailed, "The request was canceled.", err)
|
||||
}
|
||||
|
||||
if data == nil {
|
||||
return nil, NewAuthError(ErrorCodeAuthenticationFailed, "The request was canceled.", nil)
|
||||
}
|
||||
|
||||
// Only support SSI authentication type, no need to handle other types
|
||||
|
||||
// Call App API to complete authentication
|
||||
params := CompleteAuthRequestParams{
|
||||
ID: req.ID,
|
||||
Data: data,
|
||||
DID: req.DID,
|
||||
}
|
||||
|
||||
response, err := p.App.CompleteAuthRequest(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AuthenticateResponse{
|
||||
DID: req.DID,
|
||||
Token: req.Token,
|
||||
DeviceTrusted: response.DeviceTrusted,
|
||||
AccountStatus: response.AccountStatus,
|
||||
Provisioning: response.Provisioning,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Global variables
|
||||
var platform Platform
|
||||
var globalUserStore *UserStore
|
||||
// globalJWSSigner removed as UserStore.SignJWS() is actually used
|
||||
|
||||
func SetPlatform(p Platform) {
|
||||
platform = p
|
||||
}
|
||||
|
||||
// InitializeGlobalStores initializes global storage
|
||||
func InitializeGlobalStores(mnemonic, terminusName string) error {
|
||||
// Create UserStore (contains all necessary JWS signing functionality)
|
||||
userStore, err := NewUserStore(mnemonic, terminusName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UserStore: %w", err)
|
||||
}
|
||||
|
||||
// Set global variables
|
||||
globalUserStore = userStore
|
||||
|
||||
return nil
|
||||
}
|
||||
481
cli/pkg/wizard/srp.go
Normal file
481
cli/pkg/wizard/srp.go
Normal file
@@ -0,0 +1,481 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SRPGroupLength represents SRP group length types
|
||||
type SRPGroupLength int
|
||||
|
||||
const (
|
||||
SRPGroup3072 SRPGroupLength = 3072
|
||||
SRPGroup4096 SRPGroupLength = 4096
|
||||
SRPGroup6144 SRPGroupLength = 6144
|
||||
SRPGroup8192 SRPGroupLength = 8192
|
||||
)
|
||||
|
||||
// SRPParams represents SRP parameters
|
||||
type SRPParams struct {
|
||||
Length SRPGroupLength
|
||||
Hash string // "SHA-256"
|
||||
G *big.Int
|
||||
N *big.Int
|
||||
}
|
||||
|
||||
// SRPSession represents SRP session state
|
||||
type SRPSession struct {
|
||||
ID string `json:"id"`
|
||||
Created time.Time `json:"created"`
|
||||
FailedAttempts int `json:"failedAttempts"`
|
||||
AsAdmin bool `json:"asAdmin"`
|
||||
X *big.Int `json:"x,omitempty"`
|
||||
V *big.Int `json:"v,omitempty"`
|
||||
A *big.Int `json:"a,omitempty"`
|
||||
BigA *big.Int `json:"A,omitempty"`
|
||||
B *big.Int `json:"b,omitempty"`
|
||||
BigB *big.Int `json:"B,omitempty"`
|
||||
K *big.Int `json:"K,omitempty"`
|
||||
M1 *big.Int `json:"M1,omitempty"`
|
||||
M2 *big.Int `json:"M2,omitempty"`
|
||||
}
|
||||
|
||||
func NewSRPSession() *SRPSession {
|
||||
return &SRPSession{
|
||||
ID: generateUUID(),
|
||||
Created: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// SRPClient represents SRP client
|
||||
type SRPClient struct {
|
||||
session *SRPSession
|
||||
core *SRPCore
|
||||
}
|
||||
|
||||
func NewSRPClient(length SRPGroupLength) *SRPClient {
|
||||
return &SRPClient{
|
||||
session: NewSRPSession(),
|
||||
core: NewSRPCore(length),
|
||||
}
|
||||
}
|
||||
|
||||
// Getter methods
|
||||
func (c *SRPClient) GetV() []byte {
|
||||
if c.session.V == nil {
|
||||
return nil
|
||||
}
|
||||
return c.session.V.Bytes()
|
||||
}
|
||||
|
||||
func (c *SRPClient) GetA() []byte {
|
||||
if c.session.BigA == nil {
|
||||
return nil
|
||||
}
|
||||
return c.session.BigA.Bytes()
|
||||
}
|
||||
|
||||
func (c *SRPClient) GetK() []byte {
|
||||
if c.session.K == nil {
|
||||
return nil
|
||||
}
|
||||
return c.session.K.Bytes()
|
||||
}
|
||||
|
||||
func (c *SRPClient) GetM1() []byte {
|
||||
if c.session.M1 == nil {
|
||||
return nil
|
||||
}
|
||||
return c.session.M1.Bytes()
|
||||
}
|
||||
|
||||
func (c *SRPClient) GetM2() []byte {
|
||||
if c.session.M2 == nil {
|
||||
return nil
|
||||
}
|
||||
return c.session.M2.Bytes()
|
||||
}
|
||||
|
||||
// Initialize initializes SRP client
|
||||
func (c *SRPClient) Initialize(x []byte) error {
|
||||
c.session.X = new(big.Int).SetBytes(x)
|
||||
c.session.V = c.core.V(c.session.X)
|
||||
|
||||
// Generate random a
|
||||
a, err := generateRandomBigInt(32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate random a: %v", err)
|
||||
}
|
||||
c.session.A = a
|
||||
c.session.BigA = c.core.A(a)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetB sets server's B value
|
||||
func (c *SRPClient) SetB(B []byte) error {
|
||||
if c.session.X == nil || c.session.A == nil || c.session.BigA == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
c.session.BigB = new(big.Int).SetBytes(B)
|
||||
|
||||
// Calculate K, M1, M2
|
||||
var err error
|
||||
c.session.K, err = c.getKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate key: %v", err)
|
||||
}
|
||||
|
||||
c.session.M1, err = c.core.M1(c.session.BigA, c.session.BigB, c.session.K)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate M1: %v", err)
|
||||
}
|
||||
|
||||
c.session.M2, err = c.core.M2(c.session.BigA, c.session.M1, c.session.K)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate M2: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *SRPClient) getKey() (*big.Int, error) {
|
||||
if c.session.X == nil || c.session.A == nil || c.session.BigA == nil || c.session.BigB == nil {
|
||||
return nil, fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if c.core.IsZeroWhenModN(c.session.BigB) {
|
||||
return nil, fmt.Errorf("invalid B value")
|
||||
}
|
||||
|
||||
u, err := c.core.U(c.session.BigA, c.session.BigB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to calculate u: %v", err)
|
||||
}
|
||||
|
||||
S, err := c.core.ClientS(c.session.BigB, c.session.X, c.session.A, u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to calculate S: %v", err)
|
||||
}
|
||||
|
||||
return c.core.K(S)
|
||||
}
|
||||
|
||||
// SRPServer represents SRP server
|
||||
type SRPServer struct {
|
||||
session *SRPSession
|
||||
core *SRPCore
|
||||
}
|
||||
|
||||
func NewSRPServer(length SRPGroupLength) *SRPServer {
|
||||
return &SRPServer{
|
||||
session: NewSRPSession(),
|
||||
core: NewSRPCore(length),
|
||||
}
|
||||
}
|
||||
|
||||
// Getter methods
|
||||
func (s *SRPServer) GetB() []byte {
|
||||
if s.session.BigB == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.BigB.Bytes()
|
||||
}
|
||||
|
||||
func (s *SRPServer) GetK() []byte {
|
||||
if s.session.K == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.K.Bytes()
|
||||
}
|
||||
|
||||
func (s *SRPServer) GetM1() []byte {
|
||||
if s.session.M1 == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.M1.Bytes()
|
||||
}
|
||||
|
||||
func (s *SRPServer) GetM2() []byte {
|
||||
if s.session.M2 == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.M2.Bytes()
|
||||
}
|
||||
|
||||
// Initialize initializes SRP server
|
||||
func (s *SRPServer) Initialize(v []byte) error {
|
||||
s.session.V = new(big.Int).SetBytes(v)
|
||||
|
||||
// Generate random b
|
||||
b, err := generateRandomBigInt(32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate random b: %v", err)
|
||||
}
|
||||
s.session.B = b
|
||||
|
||||
s.session.BigB, err = s.core.B(s.session.V, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate B: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetA sets client's A value
|
||||
func (s *SRPServer) SetA(A []byte) error {
|
||||
if s.session.V == nil || s.session.B == nil || s.session.BigB == nil {
|
||||
return fmt.Errorf("server not initialized")
|
||||
}
|
||||
|
||||
s.session.BigA = new(big.Int).SetBytes(A)
|
||||
|
||||
// Calculate K, M1, M2
|
||||
var err error
|
||||
s.session.K, err = s.getKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate key: %v", err)
|
||||
}
|
||||
|
||||
s.session.M1, err = s.core.M1(s.session.BigA, s.session.BigB, s.session.K)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate M1: %v", err)
|
||||
}
|
||||
|
||||
s.session.M2, err = s.core.M2(s.session.BigA, s.session.M1, s.session.K)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate M2: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SRPServer) getKey() (*big.Int, error) {
|
||||
if s.session.V == nil || s.session.B == nil || s.session.BigB == nil || s.session.BigA == nil {
|
||||
return nil, fmt.Errorf("server not initialized")
|
||||
}
|
||||
|
||||
if s.core.IsZeroWhenModN(s.session.BigA) {
|
||||
return nil, fmt.Errorf("invalid A value")
|
||||
}
|
||||
|
||||
u, err := s.core.U(s.session.BigA, s.session.BigB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to calculate u: %v", err)
|
||||
}
|
||||
|
||||
S := s.core.ServerS(s.session.BigA, s.session.V, u, s.session.B)
|
||||
return s.core.K(S)
|
||||
}
|
||||
|
||||
// SRPCore implements core SRP algorithms
|
||||
type SRPCore struct {
|
||||
params *SRPParams
|
||||
}
|
||||
|
||||
func NewSRPCore(length SRPGroupLength) *SRPCore {
|
||||
params, err := getSRPParams(length)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Invalid group length: %v", err))
|
||||
}
|
||||
|
||||
return &SRPCore{
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// H hash function (...inp) - ref: TypeScript srp.ts line 384-386
|
||||
func (c *SRPCore) H(inputs ...*big.Int) (*big.Int, error) {
|
||||
hasher := sha256.New()
|
||||
|
||||
for _, input := range inputs {
|
||||
// Ensure byte array length is correct (ref: TypeScript i2b function line 133-138)
|
||||
bytes := c.bigIntToBytes(input)
|
||||
hasher.Write(bytes)
|
||||
}
|
||||
|
||||
hash := hasher.Sum(nil)
|
||||
return new(big.Int).SetBytes(hash), nil
|
||||
}
|
||||
|
||||
// bigIntToBytes converts big integer to byte array, ensuring even length (ref: TypeScript i2b function)
|
||||
func (c *SRPCore) bigIntToBytes(i *big.Int) []byte {
|
||||
hex := i.Text(16) // Convert to hexadecimal string
|
||||
|
||||
// Ensure hexadecimal string has even length (ref: TypeScript srp.ts line 135-136)
|
||||
if len(hex)%2 == 1 {
|
||||
hex = "0" + hex
|
||||
}
|
||||
|
||||
// Convert hexadecimal string to byte array
|
||||
bytes := make([]byte, len(hex)/2)
|
||||
for i := 0; i < len(hex); i += 2 {
|
||||
var b byte
|
||||
fmt.Sscanf(hex[i:i+2], "%02x", &b)
|
||||
bytes[i/2] = b
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
// V calculates verifier v = g^x % N
|
||||
func (c *SRPCore) V(x *big.Int) *big.Int {
|
||||
return new(big.Int).Exp(c.params.G, x, c.params.N)
|
||||
}
|
||||
|
||||
// A calculates A = g^a % N
|
||||
func (c *SRPCore) A(a *big.Int) *big.Int {
|
||||
return new(big.Int).Exp(c.params.G, a, c.params.N)
|
||||
}
|
||||
|
||||
// B calculates B = (k*v + g^b % N) % N
|
||||
func (c *SRPCore) B(v, b *big.Int) (*big.Int, error) {
|
||||
k, err := c.K_multiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// k * v
|
||||
kv := new(big.Int).Mul(k, v)
|
||||
|
||||
// g^b % N
|
||||
gb := new(big.Int).Exp(c.params.G, b, c.params.N)
|
||||
|
||||
// (k*v + g^b) % N
|
||||
return new(big.Int).Add(kv, gb).Mod(new(big.Int).Add(kv, gb), c.params.N), nil
|
||||
}
|
||||
|
||||
// U calculates u = H(A | B)
|
||||
func (c *SRPCore) U(A, B *big.Int) (*big.Int, error) {
|
||||
return c.H(A, B)
|
||||
}
|
||||
|
||||
// ClientS calculates S = (B - k*(g^x % N))^(a + u*x) % N
|
||||
func (c *SRPCore) ClientS(B, x, a, u *big.Int) (*big.Int, error) {
|
||||
k, err := c.K_multiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// g^x % N
|
||||
gx := new(big.Int).Exp(c.params.G, x, c.params.N)
|
||||
|
||||
// k * (g^x % N)
|
||||
kgx := new(big.Int).Mul(k, gx)
|
||||
|
||||
// B - k*(g^x % N)
|
||||
base := new(big.Int).Sub(B, kgx)
|
||||
|
||||
// a + u*x
|
||||
exp := new(big.Int).Add(a, new(big.Int).Mul(u, x))
|
||||
|
||||
// (B - k*(g^x % N))^(a + u*x) % N
|
||||
return new(big.Int).Exp(base, exp, c.params.N), nil
|
||||
}
|
||||
|
||||
// ServerS calculates S = (A * v^u % N)^b % N
|
||||
func (c *SRPCore) ServerS(A, v, u, b *big.Int) *big.Int {
|
||||
// v^u % N
|
||||
vu := new(big.Int).Exp(v, u, c.params.N)
|
||||
|
||||
// A * v^u % N
|
||||
base := new(big.Int).Mul(A, vu).Mod(new(big.Int).Mul(A, vu), c.params.N)
|
||||
|
||||
// (A * v^u % N)^b % N
|
||||
return new(big.Int).Exp(base, b, c.params.N)
|
||||
}
|
||||
|
||||
// K_multiplier calculates multiplier k = H(N | g)
|
||||
func (c *SRPCore) K_multiplier() (*big.Int, error) {
|
||||
return c.H(c.params.N, c.params.G)
|
||||
}
|
||||
|
||||
// K calculates shared key K = H(S)
|
||||
func (c *SRPCore) K(S *big.Int) (*big.Int, error) {
|
||||
return c.H(S)
|
||||
}
|
||||
|
||||
// M1 calculates first verification value M1 = H(A | B | K)
|
||||
func (c *SRPCore) M1(A, B, K *big.Int) (*big.Int, error) {
|
||||
return c.H(A, B, K)
|
||||
}
|
||||
|
||||
// M2 calculates second verification value M2 = H(A | M1 | K)
|
||||
func (c *SRPCore) M2(A, M1, K *big.Int) (*big.Int, error) {
|
||||
return c.H(A, M1, K)
|
||||
}
|
||||
|
||||
// IsZeroWhenModN checks if value is zero mod N
|
||||
func (c *SRPCore) IsZeroWhenModN(n *big.Int) bool {
|
||||
return new(big.Int).Mod(n, c.params.N).Cmp(big.NewInt(0)) == 0
|
||||
}
|
||||
|
||||
// getSRPParams gets SRP parameters
|
||||
func getSRPParams(length SRPGroupLength) (*SRPParams, error) {
|
||||
switch length {
|
||||
case SRPGroup3072:
|
||||
return &SRPParams{
|
||||
Length: 3072,
|
||||
Hash: "SHA-256",
|
||||
G: big.NewInt(5),
|
||||
N: hexToBigInt("FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF"),
|
||||
}, nil
|
||||
|
||||
case SRPGroup4096:
|
||||
return &SRPParams{
|
||||
Length: 4096,
|
||||
Hash: "SHA-256",
|
||||
G: big.NewInt(5),
|
||||
N: hexToBigInt("FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199 FFFFFFFF FFFFFFFF"),
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported group length: %d", length)
|
||||
}
|
||||
}
|
||||
|
||||
// hexToBigInt helper function: converts hexadecimal string to big integer
|
||||
func hexToBigInt(hex string) *big.Int {
|
||||
// Remove spaces
|
||||
cleanHex := ""
|
||||
for _, char := range hex {
|
||||
if char != ' ' {
|
||||
cleanHex += string(char)
|
||||
}
|
||||
}
|
||||
|
||||
result, success := new(big.Int).SetString(cleanHex, 16)
|
||||
if !success {
|
||||
panic(fmt.Sprintf("Failed to parse hex string: %s", hex))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// generateRandomBigInt generates random big integer
|
||||
func generateRandomBigInt(bytes int) (*big.Int, error) {
|
||||
randomBytes := make([]byte, bytes)
|
||||
_, err := rand.Read(randomBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return new(big.Int).SetBytes(randomBytes), nil
|
||||
}
|
||||
|
||||
// generateUUID generates UUID (simplified version)
|
||||
func generateUUID() string {
|
||||
randomBytes := make([]byte, 16)
|
||||
rand.Read(randomBytes)
|
||||
|
||||
return fmt.Sprintf("%x-%x-%x-%x-%x",
|
||||
randomBytes[0:4],
|
||||
randomBytes[4:6],
|
||||
randomBytes[6:8],
|
||||
randomBytes[8:10],
|
||||
randomBytes[10:16])
|
||||
}
|
||||
481
cli/pkg/wizard/srp_test.go
Normal file
481
cli/pkg/wizard/srp_test.go
Normal file
@@ -0,0 +1,481 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test SRP core algorithms
|
||||
func TestSRPCore(t *testing.T) {
|
||||
core := NewSRPCore(SRPGroup4096)
|
||||
|
||||
if core.params.Length != 4096 {
|
||||
t.Errorf("Expected length 4096, got %d", core.params.Length)
|
||||
}
|
||||
|
||||
if core.params.Hash != "SHA-256" {
|
||||
t.Errorf("Expected hash SHA-256, got %s", core.params.Hash)
|
||||
}
|
||||
|
||||
if core.params.G.Cmp(big.NewInt(5)) != 0 {
|
||||
t.Errorf("Expected G=5, got %v", core.params.G)
|
||||
}
|
||||
|
||||
t.Logf("SRP Core initialized successfully with %d-bit group", core.params.Length)
|
||||
}
|
||||
|
||||
// Test complete SRP authentication flow
|
||||
func TestSRPAuthentication(t *testing.T) {
|
||||
// Simulate user credentials
|
||||
password := "test-password-123"
|
||||
|
||||
// Step 1: Registration phase - client generates verifier
|
||||
t.Log("=== Registration Phase ===")
|
||||
|
||||
// Generate salt
|
||||
salt := make([]byte, 16)
|
||||
copy(salt, []byte("test-salt-123456")) // Use fixed salt for testing
|
||||
|
||||
// Calculate x = H(salt | password)
|
||||
x := calculateX(salt, password)
|
||||
t.Logf("Generated x: %x", x.Bytes())
|
||||
|
||||
// Client initialization
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
err := client.Initialize(x.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Client initialization failed: %v", err)
|
||||
}
|
||||
|
||||
v := client.GetV()
|
||||
if v == nil {
|
||||
t.Fatal("Failed to get verifier v")
|
||||
}
|
||||
t.Logf("Generated verifier v: %x", v)
|
||||
|
||||
t.Log("=== Login Phase - Server Side ===")
|
||||
|
||||
// Server initialization
|
||||
server := NewSRPServer(SRPGroup4096)
|
||||
err = server.Initialize(v)
|
||||
if err != nil {
|
||||
t.Fatalf("Server initialization failed: %v", err)
|
||||
}
|
||||
|
||||
B := server.GetB()
|
||||
if B == nil {
|
||||
t.Fatal("Failed to get server B value")
|
||||
}
|
||||
t.Logf("Server B: %x", B)
|
||||
|
||||
t.Log("=== Login Phase - Client Side ===")
|
||||
|
||||
loginClient := NewSRPClient(SRPGroup4096)
|
||||
err = loginClient.Initialize(x.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Login client initialization failed: %v", err)
|
||||
}
|
||||
|
||||
err = loginClient.SetB(B)
|
||||
if err != nil {
|
||||
t.Fatalf("Client SetB failed: %v", err)
|
||||
}
|
||||
|
||||
A := loginClient.GetA()
|
||||
M1_client := loginClient.GetM1()
|
||||
K_client := loginClient.GetK()
|
||||
|
||||
if A == nil || M1_client == nil || K_client == nil {
|
||||
t.Fatal("Failed to get client A, M1, or K values")
|
||||
}
|
||||
|
||||
t.Logf("Client A: %x", A)
|
||||
t.Logf("Client M1: %x", M1_client)
|
||||
t.Logf("Client K: %x", K_client)
|
||||
|
||||
t.Log("=== Server Verification ===")
|
||||
|
||||
err = server.SetA(A)
|
||||
if err != nil {
|
||||
t.Fatalf("Server SetA failed: %v", err)
|
||||
}
|
||||
|
||||
M1_server := server.GetM1()
|
||||
K_server := server.GetK()
|
||||
|
||||
if M1_server == nil || K_server == nil {
|
||||
t.Fatal("Failed to get server M1 or K values")
|
||||
}
|
||||
|
||||
t.Logf("Server M1: %x", M1_server)
|
||||
t.Logf("Server K: %x", K_server)
|
||||
|
||||
if !bytes.Equal(M1_client, M1_server) {
|
||||
t.Errorf("M1 values don't match!")
|
||||
t.Errorf("Client M1: %x", M1_client)
|
||||
t.Errorf("Server M1: %x", M1_server)
|
||||
} else {
|
||||
t.Log("✅ M1 verification successful!")
|
||||
}
|
||||
|
||||
if !bytes.Equal(K_client, K_server) {
|
||||
t.Errorf("K values don't match!")
|
||||
t.Errorf("Client K: %x", K_client)
|
||||
t.Errorf("Server K: %x", K_server)
|
||||
} else {
|
||||
t.Log("✅ Shared key K verification successful!")
|
||||
}
|
||||
|
||||
t.Log("=== M2 Verification ===")
|
||||
|
||||
M2_server := server.GetM2()
|
||||
M2_client := loginClient.GetM2()
|
||||
|
||||
if M2_server == nil || M2_client == nil {
|
||||
t.Fatal("Failed to get M2 values")
|
||||
}
|
||||
|
||||
if !bytes.Equal(M2_client, M2_server) {
|
||||
t.Errorf("M2 values don't match!")
|
||||
t.Errorf("Client M2: %x", M2_client)
|
||||
t.Errorf("Server M2: %x", M2_server)
|
||||
} else {
|
||||
t.Log("✅ M2 verification successful!")
|
||||
}
|
||||
|
||||
t.Log("🎉 Complete SRP authentication flow successful!")
|
||||
}
|
||||
|
||||
// Test different group lengths
|
||||
func TestSRPDifferentGroupLengths(t *testing.T) {
|
||||
groupLengths := []SRPGroupLength{SRPGroup3072, SRPGroup4096}
|
||||
|
||||
for _, length := range groupLengths {
|
||||
t.Run(fmt.Sprintf("Group%d", length), func(t *testing.T) {
|
||||
client := NewSRPClient(length)
|
||||
server := NewSRPServer(length)
|
||||
|
||||
x := calculateX([]byte("test-salt"), "test-password")
|
||||
|
||||
// Client initialization
|
||||
err := client.Initialize(x.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Client initialization failed for group %d: %v", length, err)
|
||||
}
|
||||
|
||||
// Server initialization
|
||||
v := client.GetV()
|
||||
err = server.Initialize(v)
|
||||
if err != nil {
|
||||
t.Fatalf("Server initialization failed for group %d: %v", length, err)
|
||||
}
|
||||
|
||||
t.Logf("✅ Group %d initialization successful", length)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSRPErrorCases(t *testing.T) {
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
|
||||
err := client.SetB([]byte("invalid"))
|
||||
if err == nil {
|
||||
t.Error("Expected error when calling SetB on uninitialized client")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Error("Expected panic for invalid group length")
|
||||
}
|
||||
}()
|
||||
NewSRPCore(SRPGroupLength(1234))
|
||||
}
|
||||
|
||||
func TestSRPCoreComponents(t *testing.T) {
|
||||
core := NewSRPCore(SRPGroup4096)
|
||||
|
||||
a := big.NewInt(123)
|
||||
b := big.NewInt(456)
|
||||
|
||||
h1, err := core.H(a, b)
|
||||
if err != nil {
|
||||
t.Fatalf("Hash function failed: %v", err)
|
||||
}
|
||||
|
||||
h2, err := core.H(a, b)
|
||||
if err != nil {
|
||||
t.Fatalf("Hash function failed: %v", err)
|
||||
}
|
||||
|
||||
if h1.Cmp(h2) != 0 {
|
||||
t.Error("Hash function should be deterministic")
|
||||
}
|
||||
|
||||
x := big.NewInt(789)
|
||||
v := core.V(x)
|
||||
if v == nil {
|
||||
t.Error("Verifier calculation failed")
|
||||
}
|
||||
|
||||
a_val := big.NewInt(101112)
|
||||
A := core.A(a_val)
|
||||
if A == nil {
|
||||
t.Error("A value calculation failed")
|
||||
}
|
||||
|
||||
t.Log("✅ SRP core components test successful")
|
||||
}
|
||||
|
||||
func BenchmarkSRPAuthentication(b *testing.B) {
|
||||
salt := []byte("benchmark-salt-123456")
|
||||
password := "benchmark-password"
|
||||
x := calculateX(salt, password)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
client.Initialize(x.Bytes())
|
||||
v := client.GetV()
|
||||
|
||||
server := NewSRPServer(SRPGroup4096)
|
||||
server.Initialize(v)
|
||||
B := server.GetB()
|
||||
|
||||
loginClient := NewSRPClient(SRPGroup4096)
|
||||
loginClient.Initialize(x.Bytes())
|
||||
loginClient.SetB(B)
|
||||
|
||||
A := loginClient.GetA()
|
||||
server.SetA(A)
|
||||
|
||||
M1_client := loginClient.GetM1()
|
||||
M1_server := server.GetM1()
|
||||
|
||||
if !bytes.Equal(M1_client, M1_server) {
|
||||
b.Fatalf("M1 verification failed in benchmark")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func calculateX(salt []byte, password string) *big.Int {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(salt)
|
||||
hasher.Write([]byte(password))
|
||||
hash := hasher.Sum(nil)
|
||||
|
||||
return new(big.Int).SetBytes(hash)
|
||||
}
|
||||
|
||||
func TestSRPKnownVectors(t *testing.T) {
|
||||
salt := []byte("test-salt")
|
||||
password := "test-password"
|
||||
x := calculateX(salt, password)
|
||||
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
err := client.Initialize(x.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Client initialization failed: %v", err)
|
||||
}
|
||||
|
||||
v := client.GetV()
|
||||
A := client.GetA()
|
||||
|
||||
if len(v) == 0 {
|
||||
t.Error("Verifier v should not be empty")
|
||||
}
|
||||
|
||||
if len(A) == 0 {
|
||||
t.Error("A value should not be empty")
|
||||
}
|
||||
|
||||
core := NewSRPCore(SRPGroup4096)
|
||||
vBig := new(big.Int).SetBytes(v)
|
||||
ABig := new(big.Int).SetBytes(A)
|
||||
|
||||
if vBig.Cmp(core.params.N) >= 0 {
|
||||
t.Error("Verifier v should be less than N")
|
||||
}
|
||||
|
||||
if ABig.Cmp(core.params.N) >= 0 {
|
||||
t.Error("A value should be less than N")
|
||||
}
|
||||
|
||||
t.Logf("Known vectors test successful")
|
||||
t.Logf("Salt: %x", salt)
|
||||
t.Logf("X: %x", x.Bytes())
|
||||
t.Logf("V: %x", v)
|
||||
t.Logf("A: %x", A)
|
||||
}
|
||||
|
||||
func TestSRPConsistency(t *testing.T) {
|
||||
salt := []byte("consistency-test-salt")
|
||||
password := "consistency-password"
|
||||
x := calculateX(salt, password)
|
||||
|
||||
client1 := NewSRPClient(SRPGroup4096)
|
||||
client1.Initialize(x.Bytes())
|
||||
v1 := client1.GetV()
|
||||
|
||||
client2 := NewSRPClient(SRPGroup4096)
|
||||
client2.Initialize(x.Bytes())
|
||||
v2 := client2.GetV()
|
||||
|
||||
if !bytes.Equal(v1, v2) {
|
||||
t.Error("Verifiers should be identical for same x value")
|
||||
}
|
||||
|
||||
A1 := client1.GetA()
|
||||
A2 := client2.GetA()
|
||||
|
||||
if bytes.Equal(A1, A2) {
|
||||
t.Error("A values should be different due to random a")
|
||||
}
|
||||
|
||||
t.Log("✅ SRP consistency test successful")
|
||||
}
|
||||
|
||||
func TestSRPInvalidInputs(t *testing.T) {
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
server := NewSRPServer(SRPGroup4096)
|
||||
|
||||
err := client.Initialize([]byte{})
|
||||
if err != nil {
|
||||
t.Logf("Expected behavior: empty x rejected: %v", err)
|
||||
}
|
||||
|
||||
err = server.Initialize([]byte{})
|
||||
if err != nil {
|
||||
t.Logf("Expected behavior: empty v rejected: %v", err)
|
||||
}
|
||||
|
||||
x := calculateX([]byte("test-salt"), "test-password")
|
||||
err = client.Initialize(x.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Normal initialization should succeed: %v", err)
|
||||
}
|
||||
|
||||
zeroB := make([]byte, 512) // 全零的 B 值
|
||||
err = client.SetB(zeroB)
|
||||
if err == nil {
|
||||
t.Error("Should reject zero B value")
|
||||
} else {
|
||||
t.Logf("✅ Correctly rejected zero B value: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSRPGroupLengths(b *testing.B) {
|
||||
groupLengths := []SRPGroupLength{SRPGroup3072, SRPGroup4096}
|
||||
|
||||
for _, length := range groupLengths {
|
||||
b.Run(fmt.Sprintf("Group%d", length), func(b *testing.B) {
|
||||
salt := []byte("benchmark-salt")
|
||||
password := "benchmark-password"
|
||||
x := calculateX(salt, password)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
client := NewSRPClient(length)
|
||||
client.Initialize(x.Bytes())
|
||||
|
||||
server := NewSRPServer(length)
|
||||
server.Initialize(client.GetV())
|
||||
|
||||
loginClient := NewSRPClient(length)
|
||||
loginClient.Initialize(x.Bytes())
|
||||
loginClient.SetB(server.GetB())
|
||||
|
||||
server.SetA(loginClient.GetA())
|
||||
|
||||
if !bytes.Equal(loginClient.GetM1(), server.GetM1()) {
|
||||
b.Fatal("M1 verification failed")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSRPFlowExample(t *testing.T) {
|
||||
fmt.Println("=== SRP Authentication Flow Example ===")
|
||||
|
||||
username := "user@example.com"
|
||||
password := "secure-password-123"
|
||||
salt := []byte("random-salt-16bytes")
|
||||
|
||||
fmt.Printf("Username: %s\n", username)
|
||||
fmt.Printf("Password: %s\n", password)
|
||||
fmt.Printf("Salt: %x\n", salt)
|
||||
|
||||
fmt.Println("\n1. Registration Phase:")
|
||||
x := calculateX(salt, password)
|
||||
client := NewSRPClient(SRPGroup4096)
|
||||
client.Initialize(x.Bytes())
|
||||
v := client.GetV()
|
||||
|
||||
fmt.Printf(" Generated verifier v: %x...\n", v[:8])
|
||||
|
||||
fmt.Println("\n2. Login Phase - Server:")
|
||||
server := NewSRPServer(SRPGroup4096)
|
||||
server.Initialize(v)
|
||||
B := server.GetB()
|
||||
|
||||
fmt.Printf(" Server B: %x...\n", B[:8])
|
||||
|
||||
fmt.Println("\n3. Login Phase - Client:")
|
||||
loginClient := NewSRPClient(SRPGroup4096)
|
||||
loginClient.Initialize(x.Bytes())
|
||||
loginClient.SetB(B)
|
||||
|
||||
A := loginClient.GetA()
|
||||
M1_client := loginClient.GetM1()
|
||||
|
||||
fmt.Printf(" Client A: %x...\n", A[:8])
|
||||
fmt.Printf(" Client M1: %x...\n", M1_client[:8])
|
||||
|
||||
fmt.Println("\n4. Server Verification:")
|
||||
server.SetA(A)
|
||||
M1_server := server.GetM1()
|
||||
|
||||
fmt.Printf(" Server M1: %x...\n", M1_server[:8])
|
||||
|
||||
if bytes.Equal(M1_client, M1_server) {
|
||||
fmt.Println(" ✅ Authentication successful!")
|
||||
|
||||
K_client := loginClient.GetK()
|
||||
K_server := server.GetK()
|
||||
|
||||
fmt.Printf(" Shared key K: %x...\n", K_client[:8])
|
||||
|
||||
if bytes.Equal(K_client, K_server) {
|
||||
fmt.Println(" ✅ Shared key established!")
|
||||
}
|
||||
} else {
|
||||
fmt.Println(" ❌ Authentication failed!")
|
||||
}
|
||||
|
||||
// Output:
|
||||
// === SRP Authentication Flow Example ===
|
||||
// Username: user@example.com
|
||||
// Password: secure-password-123
|
||||
// Salt: 72616e646f6d2d73616c742d313662797465730000000000000000
|
||||
//
|
||||
// 1. Registration Phase:
|
||||
// Generated verifier v: a1b2c3d4...
|
||||
//
|
||||
// 2. Login Phase - Server:
|
||||
// Server B: e5f6a7b8...
|
||||
//
|
||||
// 3. Login Phase - Client:
|
||||
// Client A: 12345678...
|
||||
// Client M1: 9abcdef0...
|
||||
//
|
||||
// 4. Server Verification:
|
||||
// Server M1: 9abcdef0...
|
||||
// ✅ Authentication successful!
|
||||
// Shared key K: fedcba98...
|
||||
// ✅ Shared key established!
|
||||
}
|
||||
46
cli/pkg/wizard/ssi.go
Normal file
46
cli/pkg/wizard/ssi.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SSI authentication client implementation
|
||||
type SSIAuthClient struct {
|
||||
UserStore *UserStore // Direct use of UserStore struct
|
||||
// JWSSigner removed as UserStore.SignJWS() is actually used
|
||||
}
|
||||
|
||||
// PrepareAuthentication implements authentication functionality for SSI client
|
||||
func (p *SSIAuthClient) PrepareAuthentication(params map[string]any) (map[string]any, error) {
|
||||
|
||||
// Extract challenge
|
||||
challenge, ok := params["challenge"].(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid challenge format")
|
||||
}
|
||||
|
||||
challengeValue, ok := challenge["value"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("challenge value not found")
|
||||
}
|
||||
|
||||
// Build JWS payload
|
||||
payload := map[string]any{
|
||||
"name": p.UserStore.GetTerminusName(),
|
||||
"did": p.UserStore.GetDid(),
|
||||
"domain": "http://example.domain",
|
||||
"time": fmt.Sprintf("%d", time.Now().UnixMilli()),
|
||||
"challenge": challengeValue,
|
||||
}
|
||||
|
||||
// Sign JWS
|
||||
jws, err := p.UserStore.SignJWS(payload)
|
||||
if err != nil || jws == "" {
|
||||
return nil, fmt.Errorf("jws signing failed: %v", err)
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"jws": jws,
|
||||
}, nil
|
||||
}
|
||||
436
cli/pkg/wizard/types.go
Normal file
436
cli/pkg/wizard/types.go
Normal file
@@ -0,0 +1,436 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ============================================================================
|
||||
// Interface Definitions
|
||||
// ============================================================================
|
||||
|
||||
// Platform interface for authentication operations
|
||||
type Platform interface {
|
||||
StartAuthRequest(opts StartAuthRequestOptions) (*StartAuthRequestResponse, error)
|
||||
CompleteAuthRequest(req *StartAuthRequestResponse) (*AuthenticateResponse, error)
|
||||
}
|
||||
|
||||
// AppAPI interface for app-level operations
|
||||
type AppAPI interface {
|
||||
StartAuthRequest(params StartAuthRequestParams) (*StartAuthRequestResponse, error)
|
||||
CompleteAuthRequest(params CompleteAuthRequestParams) (*CompleteAuthRequestResponse, error)
|
||||
}
|
||||
|
||||
// ClientState interface for managing client session state
|
||||
type ClientState interface {
|
||||
GetSession() *Session
|
||||
SetSession(session *Session)
|
||||
GetAccount() *Account
|
||||
SetAccount(account *Account)
|
||||
GetDevice() *DeviceInfo
|
||||
}
|
||||
|
||||
// Sender interface for network transport
|
||||
type Sender interface {
|
||||
Send(req *Request) (*Response, error)
|
||||
}
|
||||
|
||||
// AuthClient interface for authentication clients
|
||||
type AuthClient interface {
|
||||
PrepareAuthentication(params map[string]any) (map[string]any, error)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Type Definitions and Enums
|
||||
// ============================================================================
|
||||
type AuthType string
|
||||
|
||||
const (
|
||||
AuthTypeSSI AuthType = "ssi"
|
||||
)
|
||||
|
||||
type AuthPurpose string
|
||||
|
||||
const (
|
||||
AuthPurposeSignup AuthPurpose = "signup"
|
||||
AuthPurposeLogin AuthPurpose = "login"
|
||||
AuthPurposeRecover AuthPurpose = "recover"
|
||||
AuthPurposeAccessKeyStore AuthPurpose = "access_key_store"
|
||||
AuthPurposeTestAuthenticator AuthPurpose = "test_authenticator"
|
||||
AuthPurposeAdminLogin AuthPurpose = "admin_login"
|
||||
)
|
||||
|
||||
type AccountStatus string
|
||||
|
||||
const (
|
||||
AccountStatusUnregistered AccountStatus = "unregistered"
|
||||
AccountStatusActive AccountStatus = "active"
|
||||
AccountStatusBlocked AccountStatus = "blocked"
|
||||
AccountStatusDeleted AccountStatus = "deleted"
|
||||
)
|
||||
|
||||
type AuthRequestStatus string
|
||||
|
||||
const (
|
||||
AuthRequestStatusStarted AuthRequestStatus = "started"
|
||||
AuthRequestStatusVerified AuthRequestStatus = "verified"
|
||||
AuthRequestStatusExpired AuthRequestStatus = "expired"
|
||||
)
|
||||
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
ErrorCodeAuthenticationFailed ErrorCode = "email_verification_failed"
|
||||
ErrorCodeNotFound ErrorCode = "not_found"
|
||||
ErrorCodeServerError ErrorCode = "server_error"
|
||||
)
|
||||
|
||||
// AccountProvisioning represents account provisioning information
|
||||
type AccountProvisioning struct {
|
||||
ID string `json:"id"`
|
||||
DID string `json:"did"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
AccountID *string `json:"accountId,omitempty"`
|
||||
Status string `json:"status"`
|
||||
StatusLabel string `json:"statusLabel"`
|
||||
StatusMessage string `json:"statusMessage"`
|
||||
ActionURL *string `json:"actionUrl,omitempty"`
|
||||
ActionLabel *string `json:"actionLabel,omitempty"`
|
||||
MetaData map[string]any `json:"metaData,omitempty"`
|
||||
SkipTos bool `json:"skipTos"`
|
||||
BillingPage any `json:"billingPage,omitempty"`
|
||||
Quota map[string]any `json:"quota"`
|
||||
Features map[string]any `json:"features"`
|
||||
Orgs []string `json:"orgs"`
|
||||
}
|
||||
|
||||
type StartAuthRequestResponse struct {
|
||||
ID string `json:"id"`
|
||||
DID string `json:"did"`
|
||||
Token string `json:"token"`
|
||||
Data map[string]any `json:"data"`
|
||||
Type AuthType `json:"type"`
|
||||
Purpose AuthPurpose `json:"purpose"`
|
||||
AuthenticatorID string `json:"authenticatorId"`
|
||||
RequestStatus AuthRequestStatus `json:"requestStatus"`
|
||||
AccountStatus *AccountStatus `json:"accountStatus,omitempty"`
|
||||
Provisioning *AccountProvisioning `json:"provisioning,omitempty"`
|
||||
DeviceTrusted bool `json:"deviceTrusted"`
|
||||
}
|
||||
|
||||
type AuthenticateRequest struct {
|
||||
DID string `json:"did"`
|
||||
Type AuthType `json:"type"`
|
||||
Purpose AuthPurpose `json:"purpose"`
|
||||
AuthenticatorIndex int `json:"authenticatorIndex"`
|
||||
PendingRequest *StartAuthRequestResponse `json:"pendingRequest,omitempty"`
|
||||
Caller string `json:"caller"`
|
||||
}
|
||||
|
||||
type AuthenticateResponse struct {
|
||||
DID string `json:"did"`
|
||||
Token string `json:"token"`
|
||||
AccountStatus AccountStatus `json:"accountStatus"`
|
||||
Provisioning AccountProvisioning `json:"provisioning"`
|
||||
DeviceTrusted bool `json:"deviceTrusted"`
|
||||
}
|
||||
|
||||
type StartAuthRequestOptions struct {
|
||||
Purpose AuthPurpose `json:"purpose"`
|
||||
Type *AuthType `json:"type,omitempty"`
|
||||
DID *string `json:"did,omitempty"`
|
||||
AuthenticatorID *string `json:"authenticatorId,omitempty"`
|
||||
AuthenticatorIndex *int `json:"authenticatorIndex,omitempty"`
|
||||
}
|
||||
|
||||
type StartAuthRequestParams struct {
|
||||
DID string `json:"did"`
|
||||
Type *AuthType `json:"type,omitempty"`
|
||||
SupportedTypes []AuthType `json:"supportedTypes"`
|
||||
Purpose AuthPurpose `json:"purpose"`
|
||||
AuthenticatorID *string `json:"authenticatorId,omitempty"`
|
||||
AuthenticatorIndex *int `json:"authenticatorIndex,omitempty"`
|
||||
}
|
||||
|
||||
type CompleteAuthRequestParams struct {
|
||||
ID string `json:"id"`
|
||||
Data map[string]any `json:"data"`
|
||||
DID string `json:"did"`
|
||||
}
|
||||
|
||||
type CompleteAuthRequestResponse struct {
|
||||
AccountStatus AccountStatus `json:"accountStatus"`
|
||||
DeviceTrusted bool `json:"deviceTrusted"`
|
||||
Provisioning AccountProvisioning `json:"provisioning"`
|
||||
}
|
||||
|
||||
// Session represents a user session
|
||||
type Session struct {
|
||||
ID string `json:"id"`
|
||||
Key []byte `json:"key,omitempty"`
|
||||
// Other session-related fields...
|
||||
}
|
||||
|
||||
// OrgInfo represents organization information
|
||||
type OrgInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Revision string `json:"revision,omitempty"`
|
||||
}
|
||||
|
||||
// MainVault represents main vault information
|
||||
type MainVault struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Revision string `json:"revision,omitempty"`
|
||||
}
|
||||
|
||||
// AccountSettings represents account settings
|
||||
type AccountSettings struct {
|
||||
// Simplified version, can be extended as needed
|
||||
}
|
||||
|
||||
// EncryptionParams represents AES encryption parameters
|
||||
type EncryptionParams struct {
|
||||
Algorithm string `json:"algorithm"` // "AES-GCM"
|
||||
TagSize int `json:"tagSize"` // 128
|
||||
KeySize int `json:"keySize"` // 256
|
||||
IV string `json:"iv"` // Base64 encoded initialization vector
|
||||
AdditionalData string `json:"additionalData"` // Base64 encoded additional data
|
||||
Version string `json:"version"` // "3.0.14"
|
||||
}
|
||||
|
||||
// KeyParams represents PBKDF2 key derivation parameters
|
||||
type KeyParams struct {
|
||||
Algorithm string `json:"algorithm"` // "PBKDF2"
|
||||
Hash string `json:"hash"` // "SHA-256"
|
||||
KeySize int `json:"keySize"` // 256
|
||||
Iterations int `json:"iterations"` // 100000
|
||||
Salt string `json:"salt"` // Base64 encoded salt
|
||||
Version string `json:"version"` // "3.0.14"
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
ID string `json:"id"`
|
||||
DID string `json:"did"`
|
||||
Name string `json:"name"`
|
||||
Local bool `json:"local,omitempty"`
|
||||
Created string `json:"created,omitempty"` // ISO 8601 format
|
||||
Updated string `json:"updated,omitempty"` // ISO 8601 format
|
||||
PublicKey string `json:"publicKey,omitempty"` // Base64 encoded RSA public key
|
||||
EncryptedData string `json:"encryptedData,omitempty"` // Base64 encoded encrypted data
|
||||
EncryptionParams EncryptionParams `json:"encryptionParams,omitempty"` // AES encryption parameters
|
||||
KeyParams KeyParams `json:"keyParams,omitempty"` // PBKDF2 key derivation parameters
|
||||
MainVault MainVault `json:"mainVault"` // Main vault information
|
||||
Orgs []OrgInfo `json:"orgs"` // Organization list (important: prevent undefined)
|
||||
Revision string `json:"revision,omitempty"` // Version control
|
||||
Kid string `json:"kid,omitempty"` // Key ID
|
||||
Settings AccountSettings `json:"settings,omitempty"` // Account settings
|
||||
Version string `json:"version,omitempty"` // Version
|
||||
}
|
||||
|
||||
type DeviceInfo struct {
|
||||
ID string `json:"id"`
|
||||
Platform string `json:"platform"`
|
||||
// Other device-related fields...
|
||||
}
|
||||
|
||||
// Request represents an RPC request
|
||||
type Request struct {
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params,omitempty"`
|
||||
Device *DeviceInfo `json:"device,omitempty"`
|
||||
Auth *RequestAuth `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Result interface{} `json:"result,omitempty"`
|
||||
Error *ErrorInfo `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ISOTime is a custom time type that ensures JSON serialization matches JavaScript toISOString() format
|
||||
type ISOTime time.Time
|
||||
|
||||
// MarshalJSON implements JSON serialization using JavaScript toISOString() format
|
||||
func (t ISOTime) MarshalJSON() ([]byte, error) {
|
||||
// JavaScript toISOString() format: 2006-01-02T15:04:05.000Z
|
||||
// Ensure milliseconds are always 3 digits
|
||||
utcTime := time.Time(t).UTC()
|
||||
timeStr := fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02d.%03dZ",
|
||||
utcTime.Year(), utcTime.Month(), utcTime.Day(),
|
||||
utcTime.Hour(), utcTime.Minute(), utcTime.Second(),
|
||||
utcTime.Nanosecond()/1000000)
|
||||
return json.Marshal(timeStr)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements JSON deserialization
|
||||
func (t *ISOTime) UnmarshalJSON(data []byte) error {
|
||||
var str string
|
||||
if err := json.Unmarshal(data, &str); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsed, err := time.Parse("2006-01-02T15:04:05.000Z", str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = ISOTime(parsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unix returns Unix timestamp for compatibility
|
||||
func (t ISOTime) Unix() int64 {
|
||||
return time.Time(t).Unix()
|
||||
}
|
||||
|
||||
type RequestAuth struct {
|
||||
Session string `json:"session"`
|
||||
Time ISOTime `json:"time"` // Use custom ISOTime type
|
||||
Signature Base64Bytes `json:"signature"` // Use Base64Bytes to automatically handle base64 encoding
|
||||
}
|
||||
|
||||
type ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Base64Bytes automatically handles base64 encoding/decoding for byte arrays
|
||||
type Base64Bytes []byte
|
||||
|
||||
// UnmarshalJSON implements JSON deserialization, automatically decoding from base64 string
|
||||
func (b *Base64Bytes) UnmarshalJSON(data []byte) error {
|
||||
var str string
|
||||
if err := json.Unmarshal(data, &str); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Server uses URL-safe base64 encoding by default (ref: encoding.ts line 366: urlSafe = true)
|
||||
// Try base64url decoding first
|
||||
decoded, err := base64.URLEncoding.DecodeString(str)
|
||||
if err != nil {
|
||||
// If base64url fails, try raw base64url decoding
|
||||
decoded, err = base64.RawURLEncoding.DecodeString(str)
|
||||
if err != nil {
|
||||
// Finally try standard base64 decoding
|
||||
decoded, err = base64.StdEncoding.DecodeString(str)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode base64url/base64: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*b = Base64Bytes(decoded)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements JSON serialization, automatically encoding to base64 string
|
||||
func (b Base64Bytes) MarshalJSON() ([]byte, error) {
|
||||
encoded := base64.StdEncoding.EncodeToString([]byte(b))
|
||||
return json.Marshal(encoded)
|
||||
}
|
||||
|
||||
// Bytes returns the underlying byte array
|
||||
func (b Base64Bytes) Bytes() []byte {
|
||||
return []byte(b)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Vault and VaultItem Structures
|
||||
// ============================================================================
|
||||
|
||||
// VaultType represents the type of vault item
|
||||
type VaultType int
|
||||
|
||||
const (
|
||||
VaultTypeDefault VaultType = 0
|
||||
VaultTypeLogin VaultType = 1
|
||||
VaultTypeCard VaultType = 2
|
||||
VaultTypeTerminusTotp VaultType = 3
|
||||
VaultTypeOlaresSSHPassword VaultType = 4
|
||||
)
|
||||
|
||||
// FieldType represents the type of field in a vault item
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
FieldTypeUsername FieldType = "username"
|
||||
FieldTypePassword FieldType = "password"
|
||||
FieldTypeApiSecret FieldType = "apiSecret"
|
||||
FieldTypeMnemonic FieldType = "mnemonic"
|
||||
FieldTypeUrl FieldType = "url"
|
||||
FieldTypeEmail FieldType = "email"
|
||||
FieldTypeDate FieldType = "date"
|
||||
FieldTypeMonth FieldType = "month"
|
||||
FieldTypeCredit FieldType = "credit"
|
||||
FieldTypePhone FieldType = "phone"
|
||||
FieldTypePin FieldType = "pin"
|
||||
FieldTypeTotp FieldType = "totp"
|
||||
FieldTypeNote FieldType = "note"
|
||||
FieldTypeText FieldType = "text"
|
||||
)
|
||||
|
||||
// Field represents a field in a vault item
|
||||
type Field struct {
|
||||
Name string `json:"name"`
|
||||
Type FieldType `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// VaultItem represents an item in a vault
|
||||
type VaultItem struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type VaultType `json:"type"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
Fields []Field `json:"fields"`
|
||||
Tags []string `json:"tags"`
|
||||
Updated string `json:"updated"` // ISO 8601 format
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
}
|
||||
|
||||
// Vault represents a vault containing items
|
||||
type Vault struct {
|
||||
Kind string `json:"kind"` // Always "vault" for Vault objects
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner"`
|
||||
Created string `json:"created"` // ISO 8601 format
|
||||
Updated string `json:"updated"` // ISO 8601 format
|
||||
Revision string `json:"revision,omitempty"`
|
||||
Items []VaultItem `json:"items,omitempty"`
|
||||
KeyParams interface{} `json:"keyParams,omitempty"`
|
||||
EncryptionParams interface{} `json:"encryptionParams,omitempty"`
|
||||
Accessors interface{} `json:"accessors,omitempty"`
|
||||
EncryptedData interface{} `json:"encryptedData,omitempty"`
|
||||
Version string `json:"version,omitempty"` // Serialization version
|
||||
}
|
||||
|
||||
// ItemTemplate represents a template for creating vault items
|
||||
type ItemTemplate struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Icon string `json:"icon"`
|
||||
Fields []Field `json:"fields"`
|
||||
}
|
||||
|
||||
// GetAuthenticatorTemplate returns the authenticator template for TOTP items
|
||||
func GetAuthenticatorTemplate() *ItemTemplate {
|
||||
return &ItemTemplate{
|
||||
ID: "authenticator",
|
||||
Name: "Authenticator",
|
||||
Icon: "authenticator",
|
||||
Fields: []Field{
|
||||
{
|
||||
Name: "One-Time Password",
|
||||
Type: FieldTypeTotp,
|
||||
Value: "", // Will be set with MFA token
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// JWS-related data structures removed, using Web5 library's jwt.Sign() method directly
|
||||
// UserItem and JWSSignatureInput removed as they were not actually used
|
||||
230
cli/pkg/wizard/user_store.go
Normal file
230
cli/pkg/wizard/user_store.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/web5/crypto"
|
||||
"github.com/beclab/Olares/cli/pkg/web5/dids/did"
|
||||
"github.com/beclab/Olares/cli/pkg/web5/dids/didcore"
|
||||
"github.com/beclab/Olares/cli/pkg/web5/jwk"
|
||||
"github.com/beclab/Olares/cli/pkg/web5/jwt"
|
||||
)
|
||||
|
||||
// Note: DID key-related implementation is now in did_key_utils.go
|
||||
|
||||
// UserStore implementation using actual DID keys
|
||||
type UserStore struct {
|
||||
terminusName string
|
||||
mnemonic string
|
||||
did string
|
||||
privateJWK *jwk.JWK // Direct use of Web5 JWK structure
|
||||
mfa string // Store MFA token
|
||||
}
|
||||
|
||||
func (u *UserStore) GetTerminusName() string {
|
||||
return u.terminusName
|
||||
}
|
||||
|
||||
func (u *UserStore) GetDid() string {
|
||||
return u.did
|
||||
}
|
||||
|
||||
// SetMFA saves MFA token
|
||||
func (u *UserStore) SetMFA(mfa string) error {
|
||||
u.mfa = mfa
|
||||
log.Printf("MFA token saved to UserStore: %s", mfa)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMFA retrieves MFA token
|
||||
func (u *UserStore) GetMFA() (string, error) {
|
||||
if u.mfa == "" {
|
||||
return "", fmt.Errorf("MFA token not found")
|
||||
}
|
||||
return u.mfa, nil
|
||||
}
|
||||
|
||||
func (u *UserStore) GetPrivateJWK() *jwk.JWK {
|
||||
return u.privateJWK
|
||||
}
|
||||
|
||||
// NewUserStore creates user store, generating all keys from mnemonic (using methods from did_key_utils.go)
|
||||
func NewUserStore(mnemonic, terminusName string) (*UserStore, error) {
|
||||
log.Printf("Creating RealUserStore from mnemonic")
|
||||
|
||||
// 1. Generate complete DID key result using methods from did_key_utils.go
|
||||
result, err := GetPrivateJWK(mnemonic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate DID key: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Generated DID from mnemonic: %s", result.DID)
|
||||
|
||||
// 2. Direct use of Web5's jwk.JWK, no conversion needed
|
||||
privateJWK := &result.PrivateJWK
|
||||
|
||||
return &UserStore{
|
||||
terminusName: terminusName,
|
||||
mnemonic: mnemonic,
|
||||
did: result.DID,
|
||||
privateJWK: privateJWK,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UserStore method implementations
|
||||
func (u *UserStore) GetCurrentID() string {
|
||||
return u.did
|
||||
}
|
||||
|
||||
// GetCurrentUser method removed as it was not actually used
|
||||
|
||||
func (u *UserStore) GetCurrentUserPrivateKey() (*jwk.JWK, error) {
|
||||
return u.privateJWK, nil
|
||||
}
|
||||
|
||||
// createBearerDIDFromPrivateKey creates BearerDID from private key
|
||||
func (u *UserStore) createBearerDIDFromPrivateKey() (did.BearerDID, error) {
|
||||
// 1. Create LocalKeyManager
|
||||
keyManager := crypto.NewLocalKeyManager()
|
||||
|
||||
// 2. Direct use of our stored Web5 JWK, no conversion needed
|
||||
privateJWK := *u.privateJWK
|
||||
|
||||
// 3. Import private key to KeyManager
|
||||
keyID, err := keyManager.ImportKey(privateJWK)
|
||||
if err != nil {
|
||||
return did.BearerDID{}, fmt.Errorf("failed to import private key: %w", err)
|
||||
}
|
||||
|
||||
// 4. Get public key
|
||||
publicJWK, err := keyManager.GetPublicKey(keyID)
|
||||
if err != nil {
|
||||
return did.BearerDID{}, fmt.Errorf("failed to get public key: %w", err)
|
||||
}
|
||||
|
||||
// 5. Set public key's KID
|
||||
publicJWK.KID = u.did
|
||||
publicJWK.USE = "sig"
|
||||
publicJWK.ALG = "EdDSA"
|
||||
|
||||
// 6. Parse DID
|
||||
parsedDID, err := did.Parse(u.did)
|
||||
if err != nil {
|
||||
return did.BearerDID{}, fmt.Errorf("failed to parse DID: %w", err)
|
||||
}
|
||||
|
||||
// 7. Create DID Document
|
||||
document := didcore.Document{
|
||||
Context: []string{
|
||||
"https://www.w3.org/ns/did/v1",
|
||||
"https://w3id.org/security/suites/ed25519-2020/v1",
|
||||
},
|
||||
ID: u.did,
|
||||
VerificationMethod: []didcore.VerificationMethod{
|
||||
{
|
||||
ID: u.did,
|
||||
Type: "JsonWebKey2020",
|
||||
Controller: u.did,
|
||||
PublicKeyJwk: &publicJWK,
|
||||
},
|
||||
},
|
||||
Authentication: []string{"#" + u.did},
|
||||
AssertionMethod: []string{"#" + u.did},
|
||||
CapabilityDelegation: []string{"#" + u.did},
|
||||
CapabilityInvocation: []string{"#" + u.did},
|
||||
}
|
||||
|
||||
fmt.Printf("publicJWK: %v", document)
|
||||
// 8. Create BearerDID
|
||||
bearerDID := did.BearerDID{
|
||||
DID: parsedDID,
|
||||
KeyManager: keyManager,
|
||||
Document: document,
|
||||
}
|
||||
|
||||
return bearerDID, nil
|
||||
}
|
||||
|
||||
// SignJWS performs real DID key JWS signing (using BearerDID created from private key)
|
||||
func (u *UserStore) SignJWS(payload map[string]any) (string, error) {
|
||||
log.Printf("Creating real JWS signature for DID: %s", u.did)
|
||||
|
||||
// Create BearerDID from private key
|
||||
bearerDID, err := u.createBearerDIDFromPrivateKey()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create BearerDID from private key: %w", err)
|
||||
}
|
||||
|
||||
// Build JWT Claims (ref: example/main.go)
|
||||
claims := jwt.Claims{
|
||||
Issuer: bearerDID.URI,
|
||||
Misc: payload, // Direct use of passed payload
|
||||
}
|
||||
|
||||
// Ensure payload has necessary fields
|
||||
if claims.Misc == nil {
|
||||
claims.Misc = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// Add timestamp if not present
|
||||
if _, exists := claims.Misc["time"]; !exists {
|
||||
claims.Misc["time"] = fmt.Sprintf("%d", time.Now().UnixMilli())
|
||||
}
|
||||
|
||||
// Use Web5 JWT signing (ref: example/main.go)
|
||||
signedJWT, err := jwt.Sign(claims, bearerDID, jwt.Type("JWT"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign JWT: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Real JWS created successfully with Web5")
|
||||
log.Printf("Bearer DID: %s", bearerDID.URI)
|
||||
log.Printf("JWS: %s", signedJWT[:100]+"...")
|
||||
|
||||
return signedJWT, nil
|
||||
}
|
||||
|
||||
const TerminusDefaultDomain = "olares.cn"
|
||||
|
||||
func (u *UserStore) GetAuthURL() string {
|
||||
array := strings.Split(u.terminusName, "@")
|
||||
localURL := u.getLocalURL()
|
||||
|
||||
if len(array) == 2 {
|
||||
return fmt.Sprintf("https://auth.%s%s.%s", localURL, array[0], array[1])
|
||||
} else {
|
||||
return fmt.Sprintf("https://auth.%s%s.%s", localURL, array[0], TerminusDefaultDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserStore) GetVaultURL() string {
|
||||
array := strings.Split(u.terminusName, "@")
|
||||
localURL := u.getLocalURL()
|
||||
|
||||
if len(array) == 2 {
|
||||
return fmt.Sprintf("https://vault.%s%s.%s/server", localURL, array[0], array[1])
|
||||
} else {
|
||||
return fmt.Sprintf("https://vault.%s%s.%s/server", localURL, array[0], TerminusDefaultDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserStore) getLocalURL() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (u *UserStore) GetLocalName() string {
|
||||
array := strings.Split(u.terminusName, "@")
|
||||
return array[0]
|
||||
}
|
||||
|
||||
func (u *UserStore) GetDomainName() string {
|
||||
array := strings.Split(u.terminusName, "@")
|
||||
if len(array) == 2 {
|
||||
return array[1]
|
||||
} else {
|
||||
return TerminusDefaultDomain
|
||||
}
|
||||
}
|
||||
379
cli/pkg/wizard/wizard.go
Normal file
379
cli/pkg/wizard/wizard.go
Normal file
@@ -0,0 +1,379 @@
|
||||
package wizard
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WizardConfig contains activation wizard configuration
|
||||
type WizardConfig struct {
|
||||
System SystemConfig `json:"system"`
|
||||
Password PasswordConfig `json:"password"`
|
||||
}
|
||||
|
||||
// SystemConfig system configuration
|
||||
type SystemConfig struct {
|
||||
Location string `json:"location"` // Timezone location, e.g. "Asia/Shanghai"
|
||||
Language string `json:"language"` // Language, e.g. "zh-CN" or "en-US"
|
||||
Theme string `json:"theme"` // Theme, e.g. "dark" or "light"
|
||||
FRP *FRPConfig `json:"frp,omitempty"` // Optional FRP configuration
|
||||
}
|
||||
|
||||
type FRPConfig struct {
|
||||
Host string `json:"host"`
|
||||
Jws string `json:"jws"`
|
||||
}
|
||||
|
||||
// PasswordConfig password configuration
|
||||
type PasswordConfig struct {
|
||||
CurrentPassword string `json:"current_password"` // Current password (from wizard settings)
|
||||
NewPassword string `json:"new_password"` // New password (for reset)
|
||||
}
|
||||
|
||||
// TerminusInfo Terminus information response
|
||||
type TerminusInfo struct {
|
||||
WizardStatus string `json:"wizardStatus"`
|
||||
OlaresId string `json:"olaresId"`
|
||||
// Other fields...
|
||||
}
|
||||
|
||||
// ActivationWizard activation wizard
|
||||
type ActivationWizard struct {
|
||||
BaseURL string
|
||||
Config WizardConfig
|
||||
AccessToken string
|
||||
MaxRetries int
|
||||
PollInterval time.Duration
|
||||
}
|
||||
|
||||
// NewActivationWizard creates a new activation wizard
|
||||
func NewActivationWizard(baseURL, accessToken string, config WizardConfig) *ActivationWizard {
|
||||
return &ActivationWizard{
|
||||
BaseURL: baseURL,
|
||||
Config: config,
|
||||
AccessToken: accessToken,
|
||||
MaxRetries: 10,
|
||||
PollInterval: 2 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// RunWizard runs the complete activation wizard process (ref: ActivateWizard.vue updateInfo)
|
||||
func (w *ActivationWizard) RunWizard() error {
|
||||
log.Println("=== Starting Terminus Activation Wizard ===")
|
||||
|
||||
// Initialize state tracking variables (ref: ActivateWizard.vue)
|
||||
var getHostTerminusCount int = 0
|
||||
var updateTerminusInfoInProgress bool = false
|
||||
|
||||
// 1. Get initial status
|
||||
status, err := w.updateTerminusInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get initial status: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Initial wizard status: %s", status)
|
||||
|
||||
// 2. State machine loop processing (ref: ActivateWizard.vue updateInfo function)
|
||||
for {
|
||||
// Check failure status (ref: updateInfo line 230-236)
|
||||
if status == "vault_activate_failed" || status == "system_activate_failed" || status == "network_activate_failed" {
|
||||
return fmt.Errorf("activation failed with status: %s", status)
|
||||
}
|
||||
|
||||
// Check in-progress status (ref: updateInfo line 238-244)
|
||||
if status == "vault_activating" || status == "system_activating" || status == "network_activating" || status == "wait_activate_network" {
|
||||
log.Printf("⏳ System is %s, waiting...", status)
|
||||
} else {
|
||||
// Handle specific status (ref: updateInfo line 246-284)
|
||||
switch status {
|
||||
case "completed":
|
||||
log.Println("✅ Activation completed successfully!")
|
||||
return nil
|
||||
|
||||
case "wait_activate_system":
|
||||
log.Println("📋 Configuring system...")
|
||||
if err := w.configSystem(); err != nil {
|
||||
return fmt.Errorf("system configuration failed: %v", err)
|
||||
}
|
||||
|
||||
// case "wait_activate_network":
|
||||
// log.Println("🌐 Configuring network...")
|
||||
// return nil
|
||||
|
||||
case "wait_reset_password":
|
||||
log.Println("🔐 Resetting password...")
|
||||
status, err := w.authRequestTerminusInfo()
|
||||
if err != nil {
|
||||
log.Printf("failed to get terminus info by authurl: %v retry ...\n", err)
|
||||
} else {
|
||||
if status == "wait_reset_password" {
|
||||
// Directly perform password reset, no need for complex DNS waiting logic
|
||||
if err := w.performPasswordReset(); err != nil {
|
||||
return fmt.Errorf("password reset failed: %v", err)
|
||||
}
|
||||
log.Println("✅ Password reset completed")
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
log.Printf("⏳ Unknown status: %s, waiting...", status)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait and update status (ref: ActivateWizard.vue setInterval 2 seconds)
|
||||
time.Sleep(w.PollInterval)
|
||||
|
||||
// Update status, prevent concurrency (ref: updateInfo line 225-228)
|
||||
if !updateTerminusInfoInProgress {
|
||||
updateTerminusInfoInProgress = true
|
||||
newStatus, err := w.updateTerminusInfo()
|
||||
updateTerminusInfoInProgress = false
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to update status: %v", err)
|
||||
getHostTerminusCount++
|
||||
if getHostTerminusCount >= 10 {
|
||||
return fmt.Errorf("too many failed attempts to get terminus info")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if newStatus != status {
|
||||
log.Printf("Status changed: %s → %s", status, newStatus)
|
||||
status = newStatus
|
||||
|
||||
// Reset error count
|
||||
getHostTerminusCount = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateTerminusInfo updates Terminus information
|
||||
func (w *ActivationWizard) updateTerminusInfo() (string, error) {
|
||||
url := fmt.Sprintf("%s/bfl/info/v1/olares-info?t=%d", w.BaseURL, time.Now().UnixMilli())
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if w.AccessToken != "" {
|
||||
req.Header.Set("X-Authorization", w.AccessToken)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
// If main URL fails, try backup URL
|
||||
return w.authRequestTerminusInfo()
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var response struct {
|
||||
Data TerminusInfo `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &response); err != nil {
|
||||
return "", fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
return response.Data.WizardStatus, nil
|
||||
}
|
||||
|
||||
// authRequestTerminusInfo backup Terminus information request
|
||||
func (w *ActivationWizard) authRequestTerminusInfo() (string, error) {
|
||||
// Use globalUserStore to generate correct terminus_url
|
||||
|
||||
var terminusURL = globalUserStore.GetAuthURL()
|
||||
|
||||
// Build backup URL (usually terminus_url + '/api/olares-info')
|
||||
url := fmt.Sprintf("%s/bfl/info/v1/olares-info?t=%d", terminusURL, time.Now().UnixMilli())
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var response struct {
|
||||
Data TerminusInfo `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &response); err != nil {
|
||||
return "", fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
return response.Data.WizardStatus, nil
|
||||
}
|
||||
|
||||
// performPasswordReset performs password reset - simplified version
|
||||
func (w *ActivationWizard) performPasswordReset() error {
|
||||
log.Printf("🔐 Performing password reset...")
|
||||
|
||||
// In CLI environment, we need to get necessary information from global storage
|
||||
if globalUserStore == nil {
|
||||
return fmt.Errorf("global user store not initialized")
|
||||
}
|
||||
|
||||
terminusName := globalUserStore.GetTerminusName()
|
||||
localName := globalUserStore.GetLocalName()
|
||||
authURL := globalUserStore.GetAuthURL()
|
||||
|
||||
// If local environment (127.0.0.1), use bflURL instead of stored authURL
|
||||
if strings.Contains(w.BaseURL, "127.0.0.1") {
|
||||
authURL = w.BaseURL
|
||||
log.Printf("Detected local environment, using bflURL: %s", authURL)
|
||||
}
|
||||
|
||||
// Get passwords from wizard configuration
|
||||
currentPassword := w.getCurrentPassword()
|
||||
newPassword := w.generateNewPassword()
|
||||
|
||||
log.Printf("Resetting password for user: %s", localName)
|
||||
|
||||
// 1. First login to get access token
|
||||
token, err := LoginTerminus(authURL, terminusName, localName, currentPassword, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to login before password reset: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Login successful, proceeding with password reset...")
|
||||
|
||||
// 2. Perform password reset
|
||||
err = ResetPassword(authURL, localName, currentPassword, newPassword, token.AccessToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("password reset failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("🎉 Password reset completed successfully!")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCurrentPassword gets current password (from configuration)
|
||||
func (w *ActivationWizard) getCurrentPassword() string {
|
||||
if w.Config.Password.CurrentPassword != "" {
|
||||
return w.Config.Password.CurrentPassword
|
||||
} else {
|
||||
panic("Current password not set in wizard config")
|
||||
}
|
||||
}
|
||||
|
||||
// generateNewPassword generates new password (from configuration or generate)
|
||||
func (w *ActivationWizard) generateNewPassword() string {
|
||||
if w.Config.Password.NewPassword != "" {
|
||||
return w.Config.Password.NewPassword
|
||||
} else {
|
||||
panic("New password not set in wizard config")
|
||||
}
|
||||
}
|
||||
|
||||
// configSystem configures system
|
||||
func (w *ActivationWizard) configSystem() error {
|
||||
log.Printf("Configuring system with location: %s, language: %s",
|
||||
w.Config.System.Location, w.Config.System.Language)
|
||||
|
||||
url := fmt.Sprintf("%s/bfl/settings/v1alpha1/activate", w.BaseURL)
|
||||
|
||||
jsonData, err := json.Marshal(w.Config.System)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal system config: %v", err)
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second, // System configuration may take longer
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", url, strings.NewReader(string(jsonData)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if w.AccessToken != "" {
|
||||
req.Header.Set("X-Authorization", w.AccessToken)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 && resp.StatusCode != 201 {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
log.Printf("✅ System configuration completed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// CustomWizardConfig creates custom wizard configuration
|
||||
func CustomWizardConfig(location, language string, enableTunnel bool, host, jws, currentPassword, newPassword string) WizardConfig {
|
||||
config := WizardConfig{
|
||||
System: SystemConfig{
|
||||
Location: location,
|
||||
Language: language,
|
||||
},
|
||||
Password: PasswordConfig{
|
||||
CurrentPassword: currentPassword, // Need to set at runtime
|
||||
NewPassword: newPassword, // Need to set at runtime
|
||||
},
|
||||
}
|
||||
|
||||
// If tunnel is enabled, initialize FRP configuration
|
||||
if enableTunnel {
|
||||
config.System.FRP = &FRPConfig{
|
||||
Host: host,
|
||||
Jws: jws,
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// RunActivationWizard convenient function to run activation wizard
|
||||
func RunActivationWizard(baseURL, accessToken string, config WizardConfig) error {
|
||||
wizard := NewActivationWizard(baseURL, accessToken, config)
|
||||
return wizard.RunWizard()
|
||||
}
|
||||
@@ -26,4 +26,4 @@ build-linux-in-docker:
|
||||
-w /olaresd \
|
||||
-e DEBIAN_FRONTEND=noninteractive \
|
||||
golang:1.24 \
|
||||
sh -c "apt-get -y update; apt-get -y install libudev-dev; make build-linux"
|
||||
sh -c "apt-get -y update; apt-get -y install libudev-dev libpcap-dev; make build-linux"
|
||||
@@ -5,7 +5,7 @@ FROM golang:1.23 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY go.mod go.sum ./
|
||||
RUN apt update && apt install -y libudev-dev
|
||||
RUN apt update && apt install -y libudev-dev libpcap-dev
|
||||
RUN \
|
||||
echo ">> Downloading go modules..." && \
|
||||
go mod download
|
||||
|
||||
@@ -5,14 +5,16 @@ go 1.24.2
|
||||
toolchain go1.24.4
|
||||
|
||||
replace (
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.23
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.37
|
||||
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
|
||||
bytetrade.io/web3os/bfl => github.com/beclab/bfl v0.3.36
|
||||
github.com/labstack/echo/v4 => github.com/eball/echo/v4 v4.13.4-patch
|
||||
k8s.io/api => k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.34.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.34.0
|
||||
kubesphere.io/api => ../../kubesphere-ext/staging/src/kubesphere.io/api/
|
||||
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.6
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -24,17 +26,19 @@ require (
|
||||
github.com/containerd/containerd v1.7.28
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/eball/zeroconf v0.2.1
|
||||
github.com/eball/zeroconf v0.2.2
|
||||
github.com/godbus/dbus/v5 v5.1.0
|
||||
github.com/gofiber/fiber/v2 v2.52.9
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/jaypipes/ghw v0.13.0
|
||||
github.com/jochenvg/go-udev v0.0.0-20171110120927-d6b62d56d37b
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/klauspost/cpuid/v2 v2.2.8
|
||||
github.com/labstack/echo/v4 v4.13.4
|
||||
github.com/labstack/echo/v4 v4.0.0-00010101000000-000000000000
|
||||
github.com/libp2p/go-netroute v0.2.2
|
||||
github.com/mackerelio/go-osstat v0.2.5
|
||||
github.com/mdlayher/raw v0.1.0
|
||||
github.com/muka/network_manager v0.0.0-20200903202308-ae5ede816e07
|
||||
github.com/nxadm/tail v1.4.11
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
@@ -46,6 +50,7 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/txn2/txeh v1.5.5
|
||||
github.com/vishvananda/netlink v1.3.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
@@ -108,7 +113,6 @@ require (
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
@@ -123,6 +127,8 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93 // indirect
|
||||
github.com/mdlayher/socket v0.2.1 // indirect
|
||||
github.com/miekg/dns v1.1.55 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
@@ -160,6 +166,7 @@ require (
|
||||
github.com/valyala/fasthttp v1.51.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
|
||||
@@ -26,8 +26,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beclab/Olares/cli v0.0.0-20251016092744-6241cceceb89 h1:5s9hXV8K3faToQtE9DbiM7O6jt5kIiEsLAaKn6F0UfA=
|
||||
github.com/beclab/Olares/cli v0.0.0-20251016092744-6241cceceb89/go.mod h1:iEvZxM6PnFxFRppneTzV3hgr2tIxDnsI3dhp4pi7pFg=
|
||||
github.com/beclab/app-service v0.4.23 h1:6kjpq7rie62FafQRBGXtM9MQD3CEMGmrOC7aGPbvLJY=
|
||||
github.com/beclab/app-service v0.4.23/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beclab/app-service v0.4.37 h1:gt60wQxgPWMc3oN94TNSdiQAvzqTyCv/OUP93jNSQTY=
|
||||
github.com/beclab/app-service v0.4.37/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beclab/bfl v0.3.36 h1:PgeSPGc+XoONiwFsKq9xX8rqcL4kVM1G/ut0lYYj/js=
|
||||
github.com/beclab/bfl v0.3.36/go.mod h1:A82u38MxYk1C3Lqnm4iUUK4hBeY9HHIs+xU4V93OnJk=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -83,8 +83,10 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eball/zeroconf v0.2.1 h1:PZ89f6J2k2Z7q3oSzcZGFXJf97S7NPmj7H04ACw9v8c=
|
||||
github.com/eball/zeroconf v0.2.1/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
|
||||
github.com/eball/echo/v4 v4.13.4-patch h1:5w83KQrEqrxhc1BO0BpRBHssC37vFrWualUM27Rt2sg=
|
||||
github.com/eball/echo/v4 v4.13.4-patch/go.mod h1:ORgy8LWTq8knpwgaz538rAJMri7WgpoAD6H3zYccn84=
|
||||
github.com/eball/zeroconf v0.2.2 h1:y23X67tLFlU+b35LyM9THXGsdC88IUz803G+mzfeSeE=
|
||||
github.com/eball/zeroconf v0.2.2/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
|
||||
@@ -210,8 +212,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA=
|
||||
github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ=
|
||||
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
|
||||
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
||||
@@ -228,6 +228,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93 h1:elUwhY+HQaIV9kMgmsU9zOF413pDKoo2uFNypgP5SxM=
|
||||
github.com/mdlayher/packet v0.0.0-20220221164757-67998ac0ff93/go.mod h1:K9sWKMgN6wa78BbuJL+dT1ZZdiAfhkc2fb6XXLjHulk=
|
||||
github.com/mdlayher/raw v0.1.0 h1:K4PFMVy+AFsp0Zdlrts7yNhxc/uXoPVHi9RzRvtZF2Y=
|
||||
github.com/mdlayher/raw v0.1.0/go.mod h1:yXnxvs6c0XoF/aK52/H5PjsVHmWBCFfZUfoh/Y5s9Sg=
|
||||
github.com/mdlayher/socket v0.2.1 h1:F2aaOwb53VsBE+ebRS9bLd7yPOfYUMC8lOODdCBDY6w=
|
||||
github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
@@ -358,6 +364,10 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
|
||||
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -458,8 +468,10 @@ golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/web5/jws"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -18,7 +20,8 @@ func (h *Handlers) ResolveOlaresName(c *fiber.Ctx) error {
|
||||
klog.Errorf("Failed to resolve DID for %s: %v", olaresName, err)
|
||||
return h.ErrJSON(c, fiber.StatusInternalServerError, "Failed to resolve DID")
|
||||
}
|
||||
return h.OkJSON(c, "success", result)
|
||||
// return DID protocol resolution result
|
||||
return c.Status(http.StatusOK).JSON(result)
|
||||
}
|
||||
|
||||
func (h *Handlers) CheckJWS(c *fiber.Ctx) error {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/beclab/Olares/daemon/internel/client"
|
||||
@@ -59,7 +60,18 @@ func (h *Handlers) RequireOwner(next func(ctx *fiber.Ctx) error) func(ctx *fiber
|
||||
// get owner from release file
|
||||
envOlaresID, err := utils.GetOlaresNameFromReleaseFile()
|
||||
if err != nil {
|
||||
return h.ErrJSON(ctx, http.StatusInternalServerError, "failed to get Olares ID from release file")
|
||||
return h.ErrJSON(ctx, http.StatusInternalServerError, fmt.Sprintf("failed to get Olares ID from release file: %v", err))
|
||||
}
|
||||
|
||||
if envOlaresID == "" {
|
||||
if isInstalled, err := state.IsTerminusInstalled(); err != nil {
|
||||
return h.ErrJSON(ctx, http.StatusInternalServerError, fmt.Sprintf("failed to check if Olares is installed: %v", err))
|
||||
} else {
|
||||
// not installed, skip owner check
|
||||
if !isInstalled {
|
||||
return next(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OlaresID() != envOlaresID {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"net"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/daemon/pkg/nets"
|
||||
"github.com/eball/zeroconf"
|
||||
@@ -17,6 +18,7 @@ type DNSConfig struct {
|
||||
type instanceServer struct {
|
||||
queryServer *zeroconf.Server
|
||||
host *DNSConfig
|
||||
aliases []string
|
||||
}
|
||||
|
||||
type mDNSServer struct {
|
||||
@@ -66,6 +68,14 @@ func (s *mDNSServer) StartAll() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// add host alias
|
||||
domainTokens := strings.Split(domain, ".")
|
||||
alias := []string{strings.Join(domainTokens, "-") + ".local."}
|
||||
|
||||
// TODO: add more alias if needed
|
||||
klog.Info("add host alias, ", alias[0])
|
||||
server.AddHostAlias(alias[0])
|
||||
|
||||
s.servers[domain] = &instanceServer{
|
||||
queryServer: server,
|
||||
host: &DNSConfig{Domain: domain},
|
||||
|
||||
141
daemon/internel/intranet/dsr.go
Normal file
141
daemon/internel/intranet/dsr.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package intranet
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func ipv4Checksum(hdr []byte) uint16 {
|
||||
var sum uint32
|
||||
// header length is multiple of 2
|
||||
for i := 0; i < len(hdr); i += 2 {
|
||||
sum += uint32(binary.BigEndian.Uint16(hdr[i : i+2]))
|
||||
}
|
||||
for (sum >> 16) != 0 {
|
||||
sum = (sum & 0xffff) + (sum >> 16)
|
||||
}
|
||||
return ^uint16(sum)
|
||||
}
|
||||
|
||||
// fragmentIPv4 attempts to split an Ethernet frame carrying an IPv4 packet
|
||||
// into multiple Ethernet frames where each IP fragment fits within the
|
||||
// given interface MTU. mtu is the interface MTU (i.e., maximum IP packet
|
||||
// size including IP header). Returns a slice of full ethernet frames ready
|
||||
// to send. If the frame is not IPv4 or can't be fragmented (DF bit set)
|
||||
// an error is returned.
|
||||
func fragmentIPv4(frame []byte, mtu int) ([][]byte, error) {
|
||||
// Need at least Ethernet + minimum IP header
|
||||
if len(frame) < 14+20 {
|
||||
return nil, fmtError("frame too short for IPv4")
|
||||
}
|
||||
ethType := binary.BigEndian.Uint16(frame[12:14])
|
||||
const etherTypeIPv4 = 0x0800
|
||||
if ethType != etherTypeIPv4 {
|
||||
return nil, fmtError("not an IPv4 ethernet frame")
|
||||
}
|
||||
|
||||
ipStart := 14
|
||||
verIhl := frame[ipStart]
|
||||
if verIhl>>4 != 4 {
|
||||
return nil, fmtError("not IPv4")
|
||||
}
|
||||
ihl := int(verIhl & 0x0f)
|
||||
ipHeaderLen := ihl * 4
|
||||
if ipHeaderLen < 20 || len(frame) < ipStart+ipHeaderLen {
|
||||
return nil, fmtError("invalid ip header length")
|
||||
}
|
||||
|
||||
// Read total length from IP header
|
||||
totalLen := int(binary.BigEndian.Uint16(frame[ipStart+2 : ipStart+4]))
|
||||
if totalLen < ipHeaderLen {
|
||||
return nil, fmtError("invalid total length")
|
||||
}
|
||||
payloadLen := totalLen - ipHeaderLen
|
||||
if len(frame) < ipStart+ipHeaderLen+payloadLen {
|
||||
// allow pcap frames with extra trailing bytes (FCS); but ensure payload present
|
||||
if len(frame) < ipStart+ipHeaderLen {
|
||||
return nil, fmtError("frame shorter than ip header")
|
||||
}
|
||||
// adjust payloadLen to available bytes
|
||||
available := len(frame) - (ipStart + ipHeaderLen)
|
||||
if available <= 0 {
|
||||
return nil, fmtError("no ip payload available")
|
||||
}
|
||||
payloadLen = available
|
||||
totalLen = ipHeaderLen + payloadLen
|
||||
}
|
||||
|
||||
// Check DF (Don't Fragment)
|
||||
flagsFrag := binary.BigEndian.Uint16(frame[ipStart+6 : ipStart+8])
|
||||
const dfMask = 0x4000
|
||||
if flagsFrag&dfMask != 0 {
|
||||
return nil, fmtError("DF set; cannot fragment")
|
||||
}
|
||||
|
||||
// Compute per-fragment payload size: mtu - ipHeaderLen. Must be multiple of 8.
|
||||
if mtu <= ipHeaderLen {
|
||||
return nil, fmtError("mtu too small for ip header")
|
||||
}
|
||||
maxPayload := mtu - ipHeaderLen
|
||||
// Round down to multiple of 8
|
||||
maxPayload = maxPayload &^ 7
|
||||
if maxPayload <= 0 {
|
||||
return nil, fmtError("mtu too small for fragmentation unit")
|
||||
}
|
||||
|
||||
ipHeader := make([]byte, ipHeaderLen)
|
||||
copy(ipHeader, frame[ipStart:ipStart+ipHeaderLen])
|
||||
payload := make([]byte, payloadLen)
|
||||
copy(payload, frame[ipStart+ipHeaderLen:ipStart+ipHeaderLen+payloadLen])
|
||||
|
||||
// Iterate and build fragments
|
||||
var frags [][]byte
|
||||
offset := 0
|
||||
for offset < payloadLen {
|
||||
chunk := maxPayload
|
||||
if remaining := payloadLen - offset; remaining <= maxPayload {
|
||||
chunk = remaining
|
||||
}
|
||||
|
||||
// Create new IP header for fragment
|
||||
newIP := make([]byte, ipHeaderLen)
|
||||
copy(newIP, ipHeader)
|
||||
|
||||
// Set total length
|
||||
binary.BigEndian.PutUint16(newIP[2:4], uint16(ipHeaderLen+chunk))
|
||||
|
||||
// Set flags+offset: preserve DF, set MF for non-last
|
||||
origFlags := binary.BigEndian.Uint16(ipHeader[6:8])
|
||||
df := origFlags & dfMask
|
||||
var mf uint16
|
||||
if offset+chunk < payloadLen {
|
||||
mf = 0x2000
|
||||
}
|
||||
fragOffset := uint16(offset / 8)
|
||||
combined := df | mf | (fragOffset & 0x1fff)
|
||||
binary.BigEndian.PutUint16(newIP[6:8], combined)
|
||||
|
||||
// Zero checksum and compute
|
||||
newIP[10] = 0
|
||||
newIP[11] = 0
|
||||
csum := ipv4Checksum(newIP)
|
||||
binary.BigEndian.PutUint16(newIP[10:12], csum)
|
||||
|
||||
// Build ethernet frame: copy original ethernet header, but use the modified IP header + fragment payload
|
||||
eth := make([]byte, 14)
|
||||
copy(eth, frame[:14])
|
||||
fragFrame := make([]byte, 14+ipHeaderLen+chunk)
|
||||
copy(fragFrame[:14], eth)
|
||||
copy(fragFrame[14:14+ipHeaderLen], newIP)
|
||||
copy(fragFrame[14+ipHeaderLen:], payload[offset:offset+chunk])
|
||||
|
||||
frags = append(frags, fragFrame)
|
||||
offset += chunk
|
||||
}
|
||||
|
||||
return frags, nil
|
||||
}
|
||||
|
||||
// fmtError is a tiny helper to produce errors without importing fmt across file
|
||||
func fmtError(s string) error { return &simpleErr{s} }
|
||||
|
||||
type simpleErr struct{ s string }
|
||||
|
||||
func (e *simpleErr) Error() string { return e.s }
|
||||
47
daemon/internel/intranet/dsr_common.go
Normal file
47
daemon/internel/intranet/dsr_common.go
Normal file
@@ -0,0 +1,47 @@
|
||||
//go:build !(linux && amd64)
|
||||
// +build !linux !amd64
|
||||
|
||||
package intranet
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
type DSRProxy struct {
|
||||
}
|
||||
|
||||
func NewDSRProxy() *DSRProxy {
|
||||
return &DSRProxy{}
|
||||
}
|
||||
|
||||
func (d *DSRProxy) WithVIP(vip string, intf string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DSRProxy) WithBackend(backendIP string, backendMAC string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DSRProxy) WithCalicoInterface(intf string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DSRProxy) Close() {}
|
||||
|
||||
func (d *DSRProxy) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DSRProxy) start() error { return nil }
|
||||
|
||||
func (d *DSRProxy) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleResponse processes response packets from backend, rewriting source IP back to VIP
|
||||
func (d *DSRProxy) handleResponse(data []byte, conn net.PacketConn) {}
|
||||
|
||||
func (d *DSRProxy) regonfigure() error {
|
||||
return errors.New("unsupported operation")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user