Compare commits
425 Commits
daemon/fix
...
feat/sort-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
833d480466 | ||
|
|
4ed649bff7 | ||
|
|
e383c22fe5 | ||
|
|
ce15e2ce00 | ||
|
|
957dff10a6 | ||
|
|
da35df9280 | ||
|
|
14edf88acb | ||
|
|
939a9b5ba3 | ||
|
|
3bd0705742 | ||
|
|
6662923b87 | ||
|
|
f39fec6c68 | ||
|
|
e1362a43f7 | ||
|
|
a7c611571f | ||
|
|
f0f2d4798c | ||
|
|
9d6fd7a276 | ||
|
|
02e45a7fb3 | ||
|
|
57a003efb9 | ||
|
|
aca446a05a | ||
|
|
b1cb265654 | ||
|
|
60f3976da9 | ||
|
|
b5f175dcb8 | ||
|
|
3b0cc74984 | ||
|
|
d3b2dc3029 | ||
|
|
019e1948ce | ||
|
|
2f87901cf8 | ||
|
|
0b2c5d3835 | ||
|
|
0eeeb99620 | ||
|
|
e73480b353 | ||
|
|
2ad44d6617 | ||
|
|
93385b655d | ||
|
|
60d37998af | ||
|
|
4cf740b4f8 | ||
|
|
ba8c7faa7d | ||
|
|
6ec7f214cb | ||
|
|
8e1e71fad3 | ||
|
|
3007c78926 | ||
|
|
b0787c19a1 | ||
|
|
1a485ca959 | ||
|
|
ce8c82f9b5 | ||
|
|
3ae6852c81 | ||
|
|
380cb98b66 | ||
|
|
77d35d8890 | ||
|
|
849c098696 | ||
|
|
42f5f3108b | ||
|
|
1f7be15e51 | ||
|
|
bc0da70a85 | ||
|
|
6898ebb3a2 | ||
|
|
63f302cd82 | ||
|
|
08b7cb872e | ||
|
|
543328fa6e | ||
|
|
3334bc69e4 | ||
|
|
4d061544a6 | ||
|
|
5e58695c75 | ||
|
|
6ebb19db03 | ||
|
|
a08fd3b28c | ||
|
|
abbecf8e12 | ||
|
|
e150b9418b | ||
|
|
1e5176f17b | ||
|
|
605b862937 | ||
|
|
0110413528 | ||
|
|
0726d70b58 | ||
|
|
8abf6d8b65 | ||
|
|
b0f495c37a | ||
|
|
4e9b8d840d | ||
|
|
57579813de | ||
|
|
97dd238c44 | ||
|
|
3095530d0d | ||
|
|
3e8120baf6 | ||
|
|
0685c4326b | ||
|
|
af9e1993d1 | ||
|
|
ba8868d771 | ||
|
|
7ee1d7cae1 | ||
|
|
cb17633f57 | ||
|
|
18e94af22b | ||
|
|
b81665afe1 | ||
|
|
acb0fae406 | ||
|
|
e5fef95f4e | ||
|
|
55fe22ed4c | ||
|
|
fee742d756 | ||
|
|
36b4e792f6 | ||
|
|
8810a7657e | ||
|
|
59d87c860b | ||
|
|
97c12b0b21 | ||
|
|
9746ffdc33 | ||
|
|
8cda14a78c | ||
|
|
a4c0161cb1 | ||
|
|
505a438fa3 | ||
|
|
1a794c9fc4 | ||
|
|
03e8dd0ac7 | ||
|
|
eea2dfb67a | ||
|
|
316ffe4f35 | ||
|
|
08a380df61 | ||
|
|
faa7638353 | ||
|
|
58e869604a | ||
|
|
fc57d0b9f1 | ||
|
|
a61dff75b9 | ||
|
|
0b9c1a09b9 | ||
|
|
3178e06349 | ||
|
|
69c341060b | ||
|
|
d56daad3f0 | ||
|
|
2b239284b3 | ||
|
|
e2e8b84eef | ||
|
|
7afb59cd3a | ||
|
|
6474487e75 | ||
|
|
3fd15d418b | ||
|
|
243ad15e66 | ||
|
|
56367c964e | ||
|
|
8911b33d3e | ||
|
|
f7c7939493 | ||
|
|
8eee97f779 | ||
|
|
d3c1a37378 | ||
|
|
4a8303d050 | ||
|
|
61df0056ba | ||
|
|
75c48ef5ee | ||
|
|
4fed6bd618 | ||
|
|
581e252f30 | ||
|
|
f1d479cf1d | ||
|
|
d070e53480 | ||
|
|
89719a8d48 | ||
|
|
085bef64b5 | ||
|
|
963ca8ab48 | ||
|
|
59922bc5cf | ||
|
|
1f4b3f94ca | ||
|
|
aa9e89c0c9 | ||
|
|
760aef5521 | ||
|
|
ca1d7ebd09 | ||
|
|
a282878cfe | ||
|
|
95ad815142 | ||
|
|
984582c520 | ||
|
|
d10e6f0e20 | ||
|
|
0db6227f98 | ||
|
|
46aa153989 | ||
|
|
3cfd619d9d | ||
|
|
82e3d7d2d4 | ||
|
|
9188718cb6 | ||
|
|
7f27a03e84 | ||
|
|
202a17dd6f | ||
|
|
fe6817ff78 | ||
|
|
3991bc2e08 | ||
|
|
c84e4deded | ||
|
|
3a19d380f3 | ||
|
|
21cf7466ee | ||
|
|
9a0db453d3 | ||
|
|
3021a88e70 | ||
|
|
232c277412 | ||
|
|
d5e0523c6a | ||
|
|
03641fb388 | ||
|
|
023208603c | ||
|
|
21d10c37b3 | ||
|
|
5be2c61091 | ||
|
|
da12178933 | ||
|
|
b6484e1a19 | ||
|
|
206c946408 | ||
|
|
c57c67db24 | ||
|
|
1ed26c8264 | ||
|
|
18ece294ce | ||
|
|
2f44ae273f | ||
|
|
a6457f0a2a | ||
|
|
3f6bc2bf36 | ||
|
|
f7248a1c74 | ||
|
|
54fc939ea3 | ||
|
|
420bb1d805 | ||
|
|
39c0d2c777 | ||
|
|
d8e3a64b61 | ||
|
|
78dbda300b | ||
|
|
16440bc3c5 | ||
|
|
f5b8d226c9 | ||
|
|
a80142cdd7 | ||
|
|
e69364d329 | ||
|
|
6facfd93ee | ||
|
|
7e9b0bcdc5 | ||
|
|
bb461e8573 | ||
|
|
926058cbd0 | ||
|
|
44d56f64e1 | ||
|
|
8074e7dee9 | ||
|
|
67af7ee3fa | ||
|
|
e6b3624bae | ||
|
|
c27c8a61f1 | ||
|
|
79e6d4b6e6 | ||
|
|
ea15f6d04b | ||
|
|
dffcafbfd2 | ||
|
|
e30afb517b | ||
|
|
97a701c7e4 | ||
|
|
24c68ada0b | ||
|
|
ec5358f9b0 | ||
|
|
03bb1ab2b8 | ||
|
|
d5754b8977 | ||
|
|
8017975124 | ||
|
|
66b77ed5a1 | ||
|
|
b990d50b01 | ||
|
|
f1890e304b | ||
|
|
587ea07a61 | ||
|
|
e185931214 | ||
|
|
78fe2b29d2 | ||
|
|
9fc92b4f32 | ||
|
|
d33a8b7d31 | ||
|
|
825a05b02f | ||
|
|
6aa9b08b63 | ||
|
|
dcb2505c8e | ||
|
|
4917a2d2ab | ||
|
|
aba1d3336d | ||
|
|
7c2c68e03b | ||
|
|
ff30a31748 | ||
|
|
3d8d351996 | ||
|
|
eea8f607fa | ||
|
|
d3f357eb13 | ||
|
|
e19ef85071 | ||
|
|
1e7cc5b6ad | ||
|
|
6e4c27136a | ||
|
|
afb1e5b9f7 | ||
|
|
ed90b16fd3 | ||
|
|
2901fcfd24 | ||
|
|
c918459a8e | ||
|
|
9d3c560648 | ||
|
|
c901c54716 | ||
|
|
d925999a70 | ||
|
|
aa5aa78677 | ||
|
|
fd37490fcd | ||
|
|
d55fb76a71 | ||
|
|
ba3954dc0f | ||
|
|
faf20cdf0b | ||
|
|
6321909582 | ||
|
|
355f7c4e69 | ||
|
|
2c3c949bc9 | ||
|
|
babf756bd5 | ||
|
|
c341e22f76 | ||
|
|
0a0e52dd3d | ||
|
|
081b4064a1 | ||
|
|
9a224ea780 | ||
|
|
ab3a6ba34e | ||
|
|
2ec8300663 | ||
|
|
8762f26c04 | ||
|
|
65e50afd27 | ||
|
|
aff0b38c0b | ||
|
|
fefd635f6c | ||
|
|
a8b410a0da | ||
|
|
841b5229e6 | ||
|
|
89421058bc | ||
|
|
4d5f69e9dc | ||
|
|
8cb7ee6aad | ||
|
|
ab62c06d07 | ||
|
|
d85c81ff57 | ||
|
|
94d07adf9c | ||
|
|
3eeefb18c2 | ||
|
|
34b58757ec | ||
|
|
0df243184c | ||
|
|
99420a8a48 | ||
|
|
b013bf6ea9 | ||
|
|
1bedb4d182 | ||
|
|
f844d1221e | ||
|
|
7950d1be7d | ||
|
|
ffdeb91dcd | ||
|
|
a356b13d5a | ||
|
|
db61f05fb6 | ||
|
|
26937ab505 | ||
|
|
3dc2132e72 | ||
|
|
b50f2bbf6c | ||
|
|
16a0a5556d | ||
|
|
32166687ec | ||
|
|
db3498e0a0 | ||
|
|
2dc70ede78 | ||
|
|
694f385d2b | ||
|
|
407c126419 | ||
|
|
18746c917e | ||
|
|
01324970b4 | ||
|
|
b068669c3c | ||
|
|
bc134283d9 | ||
|
|
9f3a0f3c32 | ||
|
|
ca1ab3fef9 | ||
|
|
b6394cc39c | ||
|
|
36915f5f03 | ||
|
|
1ad305f874 | ||
|
|
58cdd7de69 | ||
|
|
4cee006a1e | ||
|
|
7bbc53bef9 | ||
|
|
1432168ec0 | ||
|
|
534ae8dd3a | ||
|
|
0a25611cf5 | ||
|
|
17990b3558 | ||
|
|
cb80d04265 | ||
|
|
0194a493ab | ||
|
|
06e49cb638 | ||
|
|
93dea60906 | ||
|
|
177f955a6b | ||
|
|
324a0b4071 | ||
|
|
132d6432cc | ||
|
|
4c51efb0b7 | ||
|
|
8f0f2e5844 | ||
|
|
0ae1524682 | ||
|
|
b24ba06794 | ||
|
|
ec6ce88e08 | ||
|
|
7839bed160 | ||
|
|
39d3689d01 | ||
|
|
ef347ff8ef | ||
|
|
908629dd9a | ||
|
|
4cea6ab238 | ||
|
|
a0e8a69848 | ||
|
|
df2b5b4274 | ||
|
|
f18d3af3b4 | ||
|
|
b4a447b596 | ||
|
|
d329630509 | ||
|
|
1af84b046d | ||
|
|
84e8543309 | ||
|
|
09f7ecd295 | ||
|
|
1a8dbf0f2c | ||
|
|
3f1e695581 | ||
|
|
8881503ca6 | ||
|
|
317da8a13e | ||
|
|
316d719d64 | ||
|
|
01e1b79674 | ||
|
|
9b7ff997b9 | ||
|
|
6d5c2a5e2b | ||
|
|
d0185a484f | ||
|
|
aadacbf729 | ||
|
|
86290d1ce9 | ||
|
|
d5ddd59997 | ||
|
|
64883f1752 | ||
|
|
ef0b8d3180 | ||
|
|
101379e6ba | ||
|
|
80947af962 | ||
|
|
9ebb80a111 | ||
|
|
37e99b977c | ||
|
|
dcbc505e7a | ||
|
|
9f518d6c4b | ||
|
|
6f88df0570 | ||
|
|
f97c9521f3 | ||
|
|
61aa638be9 | ||
|
|
6285359f31 | ||
|
|
f72987d55f | ||
|
|
33292988bb | ||
|
|
261cd45535 | ||
|
|
f9994e7e88 | ||
|
|
b0ecfefa09 | ||
|
|
e1e4528db6 | ||
|
|
6eecd514e4 | ||
|
|
5b4464533b | ||
|
|
62233642ad | ||
|
|
26910b80b9 | ||
|
|
306c7a2480 | ||
|
|
d26f4f1ac2 | ||
|
|
1509ab6435 | ||
|
|
df0fcb1801 | ||
|
|
359a269e88 | ||
|
|
f621aeef54 | ||
|
|
10ce9b44fc | ||
|
|
6d5e66b73b | ||
|
|
2f701510e0 | ||
|
|
ec38cbd285 | ||
|
|
640d8c1bf4 | ||
|
|
c570cf8fc2 | ||
|
|
9e18f11822 | ||
|
|
121482528b | ||
|
|
ac482bceae | ||
|
|
3692f5ed7d | ||
|
|
ce32e32433 | ||
|
|
fdeea2f4a1 | ||
|
|
837aa2037f | ||
|
|
45065b03e3 | ||
|
|
195f8c6ec7 | ||
|
|
20202d1cdb | ||
|
|
e4d31241da | ||
|
|
83dc24df94 | ||
|
|
890eb8ea46 | ||
|
|
d57f01f88b | ||
|
|
3297f3088e | ||
|
|
f34ab4d5ce | ||
|
|
2f775e098e | ||
|
|
56600420f1 | ||
|
|
4e579bc934 | ||
|
|
8571da9761 | ||
|
|
0a591f7a3c | ||
|
|
84dec294da | ||
|
|
e3cb3e5a54 | ||
|
|
9fb31d52b7 | ||
|
|
5a7c8f539a | ||
|
|
9305b09717 | ||
|
|
25b2ff91af | ||
|
|
7f6091afb1 | ||
|
|
fe3acf669e | ||
|
|
18950cc43b | ||
|
|
d25bde12c3 | ||
|
|
f0542c3ea5 | ||
|
|
70185da4a7 | ||
|
|
1dc859f225 | ||
|
|
7a84a51940 | ||
|
|
d5122fac17 | ||
|
|
36167790df | ||
|
|
ad5e1328c5 | ||
|
|
e2b8cf1cf2 | ||
|
|
6f8d9f15b2 | ||
|
|
64215b478f | ||
|
|
f8faecdc36 | ||
|
|
656894e46a | ||
|
|
3caaa6b63b | ||
|
|
ad5acdbf1d | ||
|
|
24ef743d24 | ||
|
|
0e3e61afe3 | ||
|
|
de254bee66 | ||
|
|
96f2aa5b30 | ||
|
|
f86c4e5e52 | ||
|
|
05c2fe8c35 | ||
|
|
dcd8413dcf | ||
|
|
b4b13b0aa9 | ||
|
|
d8d4b6d9f9 | ||
|
|
2ebc4dc700 | ||
|
|
910334101c | ||
|
|
b53dc23d80 | ||
|
|
0325f41617 | ||
|
|
99176209ea | ||
|
|
694f349e10 | ||
|
|
ea872ca156 | ||
|
|
e5bdfa2840 | ||
|
|
0a474797a6 | ||
|
|
6215da6cc0 | ||
|
|
4c3cf83106 | ||
|
|
6d7c963898 | ||
|
|
1cf8dcda3b | ||
|
|
38c6f29023 | ||
|
|
fd08ef8816 | ||
|
|
a176a5dc7a | ||
|
|
e02fd1b2de | ||
|
|
1305ffe910 | ||
|
|
5a434b5b50 | ||
|
|
d8db9c458c | ||
|
|
861c5812b3 |
26
.github/workflows/check.yaml
vendored
26
.github/workflows/check.yaml
vendored
@@ -3,12 +3,28 @@ name: Lint and Test Charts
|
||||
on:
|
||||
push:
|
||||
branches: [ "main", "release-*" ]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
paths:
|
||||
- '!docs/**'
|
||||
- 'apps/.olares/**'
|
||||
- 'build/**'
|
||||
- 'cli/**'
|
||||
- 'daemon/**'
|
||||
- 'framework/**/.olares/**'
|
||||
- 'infrastructure/**/.olares/**'
|
||||
- 'platform/**/.olares/**'
|
||||
- 'vendor/**'
|
||||
pull_request_target:
|
||||
branches: [ "main", "release-*" ]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
paths:
|
||||
- '!docs/**'
|
||||
- 'apps/.olares/**'
|
||||
- 'build/**'
|
||||
- 'cli/**'
|
||||
- 'daemon/**'
|
||||
- 'framework/**/.olares/**'
|
||||
- 'infrastructure/**/.olares/**'
|
||||
- 'platform/**/.olares/**'
|
||||
- 'vendor/**'
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -59,7 +75,7 @@ jobs:
|
||||
steps:
|
||||
- id: generate
|
||||
run: |
|
||||
v=1.12.2-$(echo $RANDOM$RANDOM)
|
||||
v=1.12.3-$(echo $RANDOM$RANDOM)
|
||||
echo "version=$v" >> "$GITHUB_OUTPUT"
|
||||
|
||||
upload-cli:
|
||||
|
||||
32
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
32
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: App-Service Build test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
paths:
|
||||
- 'framework/app-service/**'
|
||||
- '!framework/app-service/.olares/**'
|
||||
- '!framework/app-service/README.md'
|
||||
- '!framework/app-service/PROJECT'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
paths:
|
||||
- 'framework/app-service/**'
|
||||
- '!framework/app-service/.olares/**'
|
||||
- '!framework/app-service/README.md'
|
||||
- '!framework/app-service/PROJECT'
|
||||
jobs:
|
||||
build0-main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y btrfs-progs libbtrfs-dev
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
- run: make build
|
||||
working-directory: framework/app-service
|
||||
62
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
62
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish app-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-amd64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/app-service:${{ github.event.inputs.tags }} --amend beclab/app-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/app-service:${{ github.event.inputs.tags }}
|
||||
63
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
63
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Publish image-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-amd64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile.image
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
context: framework/app-service
|
||||
file: framework/app-service/Dockerfile.image
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/image-service:${{ github.event.inputs.tags }} --amend beclab/image-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/image-service:${{ github.event.inputs.tags }}
|
||||
|
||||
31
.github/workflows/module_backup_build_main.yaml
vendored
Normal file
31
.github/workflows/module_backup_build_main.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Backup Server Build test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-backup"
|
||||
paths:
|
||||
- 'framework/backup-server/**'
|
||||
- '!framework/backup-server/.olares/**'
|
||||
- '!framework/backup-server/README.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-backup"
|
||||
paths:
|
||||
- 'framework/backup-server/**'
|
||||
- '!framework/backup-server/.olares/**'
|
||||
- '!framework/backup-server/README.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21.10'
|
||||
- name: Run Build
|
||||
run: |
|
||||
make all
|
||||
working-directory: framework/backup-server
|
||||
36
.github/workflows/module_backup_publish_docker.yaml
vendored
Normal file
36
.github/workflows/module_backup_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Backup Server to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/backup-server:v${{ github.event.inputs.tags }}
|
||||
file: framework/backup-server/Dockerfile
|
||||
context: framework/backup-server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
36
.github/workflows/module_backup_publish_sidecar.yaml
vendored
Normal file
36
.github/workflows/module_backup_publish_sidecar.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Sidecar Backup Sync to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/sidecar-backup-sync:v${{ github.event.inputs.tags }}
|
||||
file: framework/backup-server/Dockerfile.sidecar
|
||||
context: framework/backup-server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
43
.github/workflows/module_bfl_build_main.yaml
vendored
Normal file
43
.github/workflows/module_bfl_build_main.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: BFL Build test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "module-bfl" ]
|
||||
paths:
|
||||
- 'framework/bfl/**'
|
||||
- '!framework/bfl/.olares/**'
|
||||
- '!framework/bfl/README.md'
|
||||
pull_request:
|
||||
branches: [ "module-bfl" ]
|
||||
paths:
|
||||
- 'framework/bfl/**'
|
||||
- '!framework/bfl/.olares/**'
|
||||
- '!framework/bfl/README.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.1'
|
||||
- name: Run Build
|
||||
working-directory: framework/bfl
|
||||
run: |
|
||||
ksDir="../../kubesphere-ext"
|
||||
version="v3.3.0-ext"
|
||||
|
||||
if [ -d "$ksDir" ]; then
|
||||
pushd "${ksDir}/"
|
||||
branch=$(git rev-parse --abbrev-ref HEAD|awk -F / '{print $2}')
|
||||
if [ x"$branch" != x"$version" ]; then
|
||||
git checkout $version
|
||||
fi
|
||||
popd &>/dev/null
|
||||
else
|
||||
git clone https://github.com/beclab/kubesphere-ext.git "${ksDir}"
|
||||
fi
|
||||
|
||||
make all
|
||||
36
.github/workflows/module_bfl_publish_docker.yaml
vendored
Normal file
36
.github/workflows/module_bfl_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish BFL-API to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/bfl:${{ github.event.inputs.tags }}
|
||||
file: framework/bfl/Dockerfile.api
|
||||
context: framework/bfl
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
35
.github/workflows/module_bfl_publish_frpc.yaml
vendored
Normal file
35
.github/workflows/module_bfl_publish_frpc.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Publish BFL-frpc to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build bfl-frpc and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/frpc:${{ github.event.inputs.tags }}
|
||||
file: framework/bfl/Dockerfile.frpc
|
||||
context: framework/bfl
|
||||
platforms: linux/amd64,linux/arm64
|
||||
35
.github/workflows/module_bfl_publish_ingress.yaml
vendored
Normal file
35
.github/workflows/module_bfl_publish_ingress.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Publish BFL-ingress to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build bfl-ingress and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/bfl-ingress:${{ github.event.inputs.tags }}
|
||||
file: framework/bfl/Dockerfile.ingress
|
||||
context: framework/bfl
|
||||
platforms: linux/amd64,linux/arm64
|
||||
58
.github/workflows/module_integration_publish_docker.yaml
vendored
Normal file
58
.github/workflows/module_integration_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: Publish Integration Server to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: "Release Tags"
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PR Conventional Commit Validation
|
||||
uses: ytanikin/PRConventionalCommits@1.1.0
|
||||
if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target'
|
||||
with:
|
||||
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert","style"]'
|
||||
add_label: "true"
|
||||
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
image: tonistiigi/binfmt:qemu-v8.1.5
|
||||
cache-image: false
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.23.3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: get latest tag
|
||||
uses: "WyriHaximus/github-action-get-previous-tag@v1"
|
||||
id: get-latest-tag
|
||||
with:
|
||||
fallback: latest
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: framework/integration/Dockerfile
|
||||
push: true
|
||||
tags: beclab/integration-server:${{ github.event.inputs.tags }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: framework/integration
|
||||
36
.github/workflows/module_kubemetrics_publish_docker.yaml
vendored
Normal file
36
.github/workflows/module_kubemetrics_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Kube State Metrics to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and Push image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/kube-state-metrics:${{ github.event.inputs.tags }}
|
||||
file: framework/kube-state-metrics/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: framework/kube-state-metrics
|
||||
29
.github/workflows/module_kubesphere_build_main.yaml
vendored
Normal file
29
.github/workflows/module_kubesphere_build_main.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Kubesphere Build Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-kubesphere"
|
||||
paths:
|
||||
- 'infrastructure/kubesphere/**'
|
||||
- '!infrastructure/kubesphere/.olares/**'
|
||||
- '!infrastructure/kubesphere/README.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-kubesphere"
|
||||
paths:
|
||||
- 'infrastructure/kubesphere/**'
|
||||
- '!infrastructure/kubesphere/.olares/**'
|
||||
- '!infrastructure/kubesphere/README.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24'
|
||||
- run: make binary
|
||||
working-directory: infrastructure/kubesphere
|
||||
36
.github/workflows/module_kubesphere_publish_docker.yaml
vendored
Normal file
36
.github/workflows/module_kubesphere_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Kubesphere to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/ks-apiserver:${{ github.event.inputs.tags }}
|
||||
file: infrastructure/kubesphere/build/ks-apiserver/Dockerfile
|
||||
context: infrastructure/kubesphere
|
||||
platforms: linux/amd64,linux/arm64
|
||||
47
.github/workflows/module_l4_build_main.yaml
vendored
Normal file
47
.github/workflows/module_l4_build_main.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: L4-BFL-Proxy Build test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "module-l4" ]
|
||||
paths:
|
||||
- 'framework/l4-bfl-proxy/**'
|
||||
- '!framework/l4-bfl-proxy/.olares/**'
|
||||
- '!framework/l4-bfl-proxy/README.md'
|
||||
pull_request:
|
||||
branches: [ "module-l4" ]
|
||||
paths:
|
||||
- 'framework/l4-bfl-proxy/**'
|
||||
- '!framework/l4-bfl-proxy/.olares/**'
|
||||
- '!framework/l4-bfl-proxy/README.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
# runs-on: self-hosted
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18.2'
|
||||
- name: Run Build
|
||||
working-directory: framework/l4-bfl-proxy
|
||||
run: |
|
||||
ksDir="../../kubesphere"
|
||||
tag="v3.3.0"
|
||||
|
||||
if [ -d "$ksDir" ]; then
|
||||
pushd "${ksDir}/"
|
||||
branch=$(git rev-parse --abbrev-ref HEAD|awk -F / '{print $2}')
|
||||
if [ x"$branch" != x"$tag" ]; then
|
||||
git checkout -b $tag
|
||||
fi
|
||||
popd &>/dev/null
|
||||
else
|
||||
git clone https://github.com/kubesphere/kubesphere.git "${ksDir}"
|
||||
pushd "${ksDir}/"
|
||||
git checkout -b $tag
|
||||
popd &>/dev/null
|
||||
fi
|
||||
|
||||
make all
|
||||
67
.github/workflows/module_l4_publish_base.yaml
vendored
Normal file
67
.github/workflows/module_l4_publish_base.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Publish L4 openresty-base to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build openresty and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: bytetrade/openresty:base-${{ github.event.inputs.tags }}-amd64
|
||||
file: framework/l4-bfl-proxy/Dockerfile.openresty
|
||||
platforms: linux/amd64
|
||||
context: framework/l4-bfl-proxy
|
||||
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build nginx-lua and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: bytetrade/openresty:base-${{ github.event.inputs.tags }}-arm64
|
||||
file: framework/l4-bfl-proxy/Dockerfile.openresty
|
||||
platforms: linux/arm64
|
||||
context: framework/l4-bfl-proxy
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create bytetrade/openresty:base-${{ github.event.inputs.tags }} --amend bytetrade/openresty:base-${{ github.event.inputs.tags }}-amd64 --amend bytetrade/openresty:base-${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push bytetrade/openresty:base-${{ github.event.inputs.tags }}
|
||||
35
.github/workflows/module_l4_publish_docker.yaml
vendored
Normal file
35
.github/workflows/module_l4_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Publish L4-BFL-Proxy to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build l4-bfl-proxy and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/l4-bfl-proxy:${{ github.event.inputs.tags }}
|
||||
file: framework/l4-bfl-proxy/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: framework/l4-bfl-proxy
|
||||
67
.github/workflows/module_l4_publish_nginx.yaml
vendored
Normal file
67
.github/workflows/module_l4_publish_nginx.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Publish L4 nginx-lua to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build nginx-lua and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: bytetrade/openresty:${{ github.event.inputs.tags }}-amd64
|
||||
file: framework/l4-bfl-proxy/Dockerfile.nginx
|
||||
platforms: linux/amd64
|
||||
context: framework/l4-bfl-proxy
|
||||
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build nginx-lua and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: bytetrade/openresty:${{ github.event.inputs.tags }}-arm64
|
||||
file: framework/l4-bfl-proxy/Dockerfile.nginx
|
||||
platforms: linux/arm64
|
||||
context: framework/l4-bfl-proxy
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create bytetrade/openresty:${{ github.event.inputs.tags }} --amend bytetrade/openresty:${{ github.event.inputs.tags }}-amd64 --amend bytetrade/openresty:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push bytetrade/openresty:${{ github.event.inputs.tags }}
|
||||
29
.github/workflows/module_nodeinit_build_main.yaml
vendored
Normal file
29
.github/workflows/module_nodeinit_build_main.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: OSNode-Init Build test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "module-nodeinit" ]
|
||||
paths:
|
||||
- 'framework/osnode-init/**'
|
||||
- '!framework/osnode-init/.olares/**'
|
||||
- '!framework/osnode-init/README.md'
|
||||
pull_request:
|
||||
branches: [ "module-nodeinit" ]
|
||||
paths:
|
||||
- 'framework/osnode-init/**'
|
||||
- '!framework/osnode-init/.olares/**'
|
||||
- '!framework/osnode-init/README.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
- name: Run Build
|
||||
working-directory: framework/osnode-init
|
||||
run: |
|
||||
make all
|
||||
42
.github/workflows/module_nodeinit_publish_docker.yaml
vendored
Normal file
42
.github/workflows/module_nodeinit_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Publish OSNode-Init to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/osnode-init:v${{ github.event.inputs.tags }}
|
||||
file: framework/osnode-init/Dockerfile
|
||||
context: framework/osnode-init
|
||||
platforms: linux/amd64, linux/arm64
|
||||
|
||||
31
.github/workflows/module_systemserver_build_main.yaml
vendored
Normal file
31
.github/workflows/module_systemserver_build_main.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: SystemServer Build test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-systemserver"
|
||||
paths:
|
||||
- 'framework/systemserver/**'
|
||||
- '!framework/systemserver/.olares/**'
|
||||
- '!framework/systemserver/README.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-systemserver"
|
||||
paths:
|
||||
- 'framework/systemserver/**'
|
||||
- '!framework/systemserver/.olares/**'
|
||||
- '!framework/systemserver/README.md'
|
||||
jobs:
|
||||
build0-main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
- run: |
|
||||
git clone https://github.com/kubernetes/code-generator.git ../code-generator
|
||||
cd ../code-generator
|
||||
git checkout -b release-1.27
|
||||
cd -
|
||||
make system-server
|
||||
working-directory: framework/system-server
|
||||
37
.github/workflows/module_systemserver_publish_docker.yaml
vendored
Normal file
37
.github/workflows/module_systemserver_publish_docker.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish SystemServer to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/system-server:${{ github.event.inputs.tags }}
|
||||
context: framework/system-server
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
37
.github/workflows/module_systemserver_publish_proxy.yaml
vendored
Normal file
37
.github/workflows/module_systemserver_publish_proxy.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish SystemServer Provider Proxy to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/provider-proxy:${{ github.event.inputs.tags }}
|
||||
file: framework/system-server/Dockerfile.provider
|
||||
context: framework/system-server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
28
.github/workflows/module_tapr_build_main.yaml
vendored
Normal file
28
.github/workflows/module_tapr_build_main.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: TAPR Build test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-tapr"
|
||||
paths:
|
||||
- 'platform/tapr/**'
|
||||
- '!platform/tapr/.olares/**'
|
||||
- '!platform/tapr/README.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-tapr"
|
||||
paths:
|
||||
- 'platform/tapr/**'
|
||||
- '!platform/tapr/.olares/**'
|
||||
- '!platform/tapr/README.md'
|
||||
jobs:
|
||||
build0-main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23.3'
|
||||
- working-directory: platform/tapr
|
||||
run: |
|
||||
make build-uploader build-vault build-middleware
|
||||
|
||||
37
.github/workflows/module_tapr_publish_citus.yaml
vendored
Normal file
37
.github/workflows/module_tapr_publish_citus.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish TAPR citus to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/citus:${{ github.event.inputs.tags }}
|
||||
file: platform/tapr/docker/citus/Dockerfile
|
||||
platforms: linux/amd64, linux/arm64
|
||||
context: platform/tapr
|
||||
|
||||
37
.github/workflows/module_tapr_publish_image.yaml
vendored
Normal file
37
.github/workflows/module_tapr_publish_image.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish TAPR image-uploader to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/images-uploader:${{ github.event.inputs.tags }}
|
||||
file: platform/tapr/docker/uploader/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64, linux/arm64
|
||||
|
||||
62
.github/workflows/module_tapr_publish_middleware.yaml
vendored
Normal file
62
.github/workflows/module_tapr_publish_middleware.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish TAPR middleware-operator to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/middleware-operator:${{ github.event.inputs.tags }}-amd64
|
||||
file: platform/tapr/docker/middleware/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/middleware-operator:${{ github.event.inputs.tags }}-arm64
|
||||
file: platform/tapr/docker/middleware/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/middleware-operator:${{ github.event.inputs.tags }} --amend beclab/middleware-operator:${{ github.event.inputs.tags }}-amd64 --amend beclab/middleware-operator:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/middleware-operator:${{ github.event.inputs.tags }}
|
||||
37
.github/workflows/module_tapr_publish_s3rver.yaml
vendored
Normal file
37
.github/workflows/module_tapr_publish_s3rver.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish TAPR s3rver to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/s3rver:${{ github.event.inputs.tags }}
|
||||
file: platform/tapr/docker/middleware/Dockerfile.s3rver
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64, linux/arm64
|
||||
|
||||
62
.github/workflows/module_tapr_publish_sysevent.yaml
vendored
Normal file
62
.github/workflows/module_tapr_publish_sysevent.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish TAPR sys-event to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/sys-event:${{ github.event.inputs.tags }}-amd64
|
||||
file: platform/tapr/docker/sys-event/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/sys-event:${{ github.event.inputs.tags }}-arm64
|
||||
file: platform/tapr/docker/sys-event/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/sys-event:${{ github.event.inputs.tags }} --amend beclab/sys-event:${{ github.event.inputs.tags }}-amd64 --amend beclab/sys-event:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/sys-event:${{ github.event.inputs.tags }}
|
||||
37
.github/workflows/module_tapr_publish_vault.yaml
vendored
Normal file
37
.github/workflows/module_tapr_publish_vault.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish TAPR secret-vault to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/secret-vault:${{ github.event.inputs.tags }}
|
||||
file: platform/tapr/docker/vault/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64, linux/arm64
|
||||
|
||||
37
.github/workflows/module_tapr_publish_wsgateway.yaml
vendored
Normal file
37
.github/workflows/module_tapr_publish_wsgateway.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Publish TAPR ws-gateway to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
jobs:
|
||||
update_dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/ws-gateway:${{ github.event.inputs.tags }}
|
||||
file: platform/tapr/docker/ws-gateway/Dockerfile
|
||||
context: platform/tapr
|
||||
platforms: linux/amd64, linux/arm64
|
||||
|
||||
4
.github/workflows/release-daemon.yaml
vendored
4
.github/workflows/release-daemon.yaml
vendored
@@ -44,9 +44,9 @@ jobs:
|
||||
with:
|
||||
go-version: 1.22.1
|
||||
|
||||
- name: install udev-devel
|
||||
- name: install udev-devel and pcap-devel
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libudev-dev
|
||||
sudo apt update && sudo apt install -y libudev-dev libpcap-dev
|
||||
|
||||
- name: Install x86_64 cross-compiler
|
||||
run: sudo apt-get update && sudo apt-get install -y build-essential
|
||||
|
||||
2
.github/workflows/release-daily.yaml
vendored
2
.github/workflows/release-daily.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
- id: generate
|
||||
run: |
|
||||
v=1.12.2-$(date +"%Y%m%d")
|
||||
v=1.12.3-$(date +"%Y%m%d")
|
||||
echo "version=$v" >> "$GITHUB_OUTPUT"
|
||||
|
||||
release-id:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -37,4 +37,6 @@ docs/.vitepress/dist/
|
||||
docs/.vitepress/cache/
|
||||
node_modules
|
||||
.idea/
|
||||
cli/olares-cli*
|
||||
cli/olares-cli*
|
||||
|
||||
framework/app-service/bin
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">Website</a> ·
|
||||
<a href="https://docs.olares.com">Documentation</a> ·
|
||||
<a href="https://larepass.olares.com">Download LarePass</a> ·
|
||||
<a href="https://www.olares.com/larepass">Download LarePass</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olares Apps</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -33,7 +35,7 @@
|
||||

|
||||
We believe you have a fundamental right to control your digital life. The most effective way to uphold this right is by hosting your data locally, on your own hardware.
|
||||
|
||||
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, SD WebUI for image generation, and Mastodon for building censor free social space. Imagine the power of the cloud, but with you in complete command.
|
||||
Olares is an **open-source personal cloud operating system** designed to empower you to own and manage your digital assets locally. Instead of relying on public cloud services, you can deploy powerful open-source alternatives locally on Olares, such as Ollama for hosting LLMs, ComfyUI for image generation, and Perplexica for private, AI-driven search and reasoning. Imagine the power of the cloud, but with you in complete command.
|
||||
|
||||
> 🌟 *Star us to receive instant notifications about new releases and updates.*
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">网站</a> ·
|
||||
<a href="https://docs.olares.com">文档</a> ·
|
||||
<a href="https://larepass.olares.com">下载 LarePass</a> ·
|
||||
<a href="https://www.olares.cn/larepass">下载 LarePass</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olares 应用</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -34,7 +36,7 @@
|
||||
|
||||
我们坚信,**您拥有掌控自己数字生活的基本权利**。维护这一权利最有效的方式,就是将您的数据托管在本地,在您自己的硬件上。
|
||||
|
||||
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 SD WebUI 用于图像生成,以及使用 Mastodon 构建不受审查的社交空间。Olares 让你坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
|
||||
Olares 是一款开源个人云操作系统,旨在让您能够轻松在本地拥有并管理自己的数字资产。您无需再依赖公有云服务,而可以在 Olares 上本地部署强大的开源平替服务或应用,例如可以使用 Ollama 托管大语言模型,使用 ComfyUI 生成图像,以及使用 Perplexica 打造本地化、注重隐私的 AI 搜索与问答体验。Olares 让您坐拥云计算的强大威力,又能完全将其置于自己掌控之下。
|
||||
|
||||
> 为 Olares 点亮 🌟 以及时获取新版本和更新的通知。
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
[](https://discord.gg/olares)
|
||||
[](https://github.com/beclab/olares/blob/main/LICENSE)
|
||||
|
||||
<a href="https://trendshift.io/repositories/15376" target="_blank"><img src="https://trendshift.io/api/badge/repositories/15376" alt="beclab%2FOlares | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
<p>
|
||||
<a href="./README.md"><img alt="Readme in English" src="https://img.shields.io/badge/English-FFFFFF"></a>
|
||||
<a href="./README_CN.md"><img alt="Readme in Chinese" src="https://img.shields.io/badge/简体中文-FFFFFF"></a>
|
||||
@@ -21,7 +23,7 @@
|
||||
<p align="center">
|
||||
<a href="https://olares.com">ウェブサイト</a> ·
|
||||
<a href="https://docs.olares.com">ドキュメント</a> ·
|
||||
<a href="https://larepass.olares.com">LarePassをダウンロード</a> ·
|
||||
<a href="https://www.olares.com/larepass">LarePassをダウンロード</a> ·
|
||||
<a href="https://github.com/beclab/apps">Olaresアプリ</a> ·
|
||||
<a href="https://space.olares.com">Olares Space</a>
|
||||
</p>
|
||||
@@ -34,8 +36,7 @@
|
||||
|
||||
私たちは、あなたが自身のデジタルライフをコントロールする基本的な権利を有すると確信しています。この権利を守る最も効果的な方法は、あなたのデータをローカルの、あなた自身のハードウェア上でホストすることです。
|
||||
|
||||
Olaresは、あなたが自身のデジタル資産をローカルで容易に所有し管理できるよう設計された、オープンソースのパーソナルクラウドOSです。もはやパブリッククラウドサービスに依存する必要はありません。Olares上で、例えばOllamaを利用した大規模言語モデルのホスティング、SD WebUIによる画像生成、Mastodonを用いた検閲のないソーシャルスペースの構築など、強力なオープンソースの代替サービスやアプリケーションをローカルにデプロイできます。Olaresは、クラウドコンピューティングの絶大な力を活用しつつ、それを完全に自身のコントロール下に置くことを可能にします。
|
||||
|
||||
Olaresは、あなたが自身のデジタル資産をローカルで所有し管理できるように設計された、オープンソースのパーソナルクラウドOSです。パブリッククラウドサービスに依存する代わりに、Olares上で強力なオープンソースの代替をローカルにデプロイできます。例えば、LLMのホスティングにはOllama、画像生成にはComfyUI、そしてプライバシーを重視したAI駆動の検索と推論にはPerplexicaを利用できます。クラウドの力をそのままに、主導権は常にあなたの手に。
|
||||
> 🌟 *新しいリリースや更新についての通知を受け取るために、スターを付けてください。*
|
||||
|
||||
## アーキテクチャ
|
||||
@@ -44,7 +45,7 @@ Olaresは、あなたが自身のデジタル資産をローカルで容易に
|
||||
|
||||

|
||||
|
||||
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/manual/concepts/system-architecture.html)(英語版)をご参照ください。
|
||||
各コンポーネントの詳細については、[Olares アーキテクチャ](https://docs.olares.com/developer/concepts/system-architecture.html)(英語版)をご参照ください。
|
||||
|
||||
> 🔍**OlaresとNASの違いは何ですか?**
|
||||
>
|
||||
|
||||
@@ -51,6 +51,8 @@ rules:
|
||||
- "/provider/get_dataset_folder_status"
|
||||
- "/provider/update_dataset_folder_paths"
|
||||
- "/seahub/api/*"
|
||||
- "/system/configuration/encoding"
|
||||
- "/api/search/get_directory/"
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
|
||||
@@ -209,6 +209,21 @@ spec:
|
||||
port: 80
|
||||
targetPort: 91
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: share-fe-service
|
||||
namespace: user-space-{{ .Values.bfl.username }}
|
||||
spec:
|
||||
selector:
|
||||
app: olares-app
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: share
|
||||
port: 80
|
||||
targetPort: 92
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -220,12 +235,12 @@ metadata:
|
||||
applications.app.bytetrade.io/owner: '{{ .Values.bfl.username }}'
|
||||
applications.app.bytetrade.io/author: bytetrade.io
|
||||
annotations:
|
||||
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
|
||||
applications.app.bytetrade.io/default-thirdlevel-domains: '[{"appName": "olares-app","entranceName":"dashboard","thirdLevelDomain":"dashboard"},{"appName":"olares-app","entranceName":"control-hub","thirdLevelDomain":"control-hub"},{"appName":"olares-app","entranceName":"files","thirdLevelDomain":"files"},{"appName":"olares-app","entranceName":"share","thirdLevelDomain":"share"},{"appName": "olares-app","entranceName":"vault","thirdLevelDomain":"vault"},{"appName":"olares-app","entranceName":"headscale","thirdLevelDomain":"headscale"},{"appName":"olares-app","entranceName":"settings","thirdLevelDomain":"settings"},{"appName": "olares-app","entranceName":"market","thirdLevelDomain":"market"},{"appName":"olares-app","entranceName":"profile","thirdLevelDomain":"profile"}]'
|
||||
applications.app.bytetrade.io/icon: https://app.cdn.olares.com/appstore/olaresapps/icon.png
|
||||
applications.app.bytetrade.io/title: 'Olares Apps'
|
||||
applications.app.bytetrade.io/version: '0.0.1'
|
||||
applications.app.bytetrade.io/policies: '{"policies":[{"entranceName":"dashboard","uriRegex":"/js/script.js", "level":"public"},{"entranceName":"dashboard","uriRegex":"/js/api/send", "level":"public"}]}'
|
||||
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
|
||||
applications.app.bytetrade.io/entrances: '[{"name":"files", "host":"files-fe-service", "port":80,"title":"Files","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png"},{"name":"share","authLevel":"public", "host":"share-fe-service", "port":80,"title":"Share","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/files/icon.png","invisible":true},{"name":"vault", "host":"vault-service", "port":80,"title":"Vault","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/vault/icon.png"},{"name":"market", "host":"appstore-fe-service", "port":80,"title":"Market","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/appstore/icon.png"},{"name":"settings", "host":"settings-service", "port":80,"title":"Settings","icon":"https://app.cdn.olares.com/appstore/settings/icon.png"},{"name":"profile", "host":"profile-service", "port":80,"title":"Profile","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/profile/icon.png"},{"name":"dashboard","host":"dashboard-service","port":80,"title":"Dashboard","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/dashboard/icon.png"},{"name":"control-hub","host":"control-hub-service","port":80,"title":"Control Hub","windowPushState":true,"icon":"https://app.cdn.olares.com/appstore/control-hub/icon.png"},{"name":"headscale", "host":"headscale-svc", "port":80,"title":"Headscale","invisible": true,"icon":"https://app.cdn.olares.com/appstore/headscale/icon.png"}]'
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
@@ -253,7 +268,7 @@ spec:
|
||||
image: owncloudci/wait-for:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: check-auth
|
||||
- name: terminus-sidecar-init
|
||||
- name: olares-sidecar-init
|
||||
image: openservicemesh/init:v1.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
@@ -303,7 +318,7 @@ spec:
|
||||
chown -R 1000:1000 /uploadstemp && \
|
||||
chown -R 1000:1000 /appdata
|
||||
- name: olares-app-init
|
||||
image: beclab/system-frontend:v1.5.9
|
||||
image: beclab/system-frontend:v1.6.23
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -315,7 +330,7 @@ spec:
|
||||
name: www-dir
|
||||
|
||||
containers:
|
||||
- name: terminus-envoy-sidecar
|
||||
- name: olares-envoy-sidecar
|
||||
image: bytetrade/envoy:v1.25.11
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
@@ -329,7 +344,7 @@ spec:
|
||||
- name: tapr
|
||||
containerPort: 15080
|
||||
volumeMounts:
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
readOnly: true
|
||||
mountPath: /etc/envoy/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
@@ -352,6 +367,7 @@ spec:
|
||||
- containerPort: 89
|
||||
- containerPort: 90
|
||||
- containerPort: 91
|
||||
- containerPort: 92
|
||||
- containerPort: 8090
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -361,7 +377,7 @@ spec:
|
||||
cp -r /www/nginxs/* /etc/nginx/conf.d/
|
||||
nginx -g 'daemon off;'
|
||||
volumeMounts:
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
readOnly: true
|
||||
mountPath: /etc/envoy/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
@@ -424,7 +440,7 @@ spec:
|
||||
- name: NATS_SUBJECT_VAULT
|
||||
value: os.vault.{{ .Values.bfl.username}}
|
||||
- name: user-service
|
||||
image: beclab/user-service:v0.0.61
|
||||
image: beclab/user-service:v0.0.78
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
@@ -500,7 +516,7 @@ spec:
|
||||
hostPath:
|
||||
type: Directory
|
||||
path: '{{ .Values.userspace.userData }}'
|
||||
- name: terminus-sidecar-config
|
||||
- name: olares-sidecar-config
|
||||
configMap:
|
||||
name: user-service-sidecar-ws-configs
|
||||
items:
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.bfl.username }}:prometheus-k8s
|
||||
annotations:
|
||||
provider-registry-ref: {{ .Values.bfl.username }}/4ae9f19e
|
||||
provider-service-ref: http://prometheus-k8s.kubesphere-monitoring-system:9090
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "*"
|
||||
verbs: ["*"]
|
||||
@@ -9,4 +9,7 @@ metadata:
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/document/search*"
|
||||
- "/task/*"
|
||||
- "/search/*"
|
||||
- "/monitorsetting/*"
|
||||
verbs: ["*"]
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
- "/api/account/all"
|
||||
- "/api/cookie/retrieve"
|
||||
- "/api/cookie"
|
||||
- "/api/abilities"
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
@@ -56,4 +57,16 @@ metadata:
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/server/intent/send"
|
||||
verbs: ["*"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.bfl.username }}:dashboard
|
||||
annotations:
|
||||
provider-registry-ref: {{ .Values.bfl.username }}/dashboard
|
||||
provider-service-ref: prometheus-k8s.kubesphere-monitoring-system:9090
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "*"
|
||||
verbs: ["*"]
|
||||
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: wizard
|
||||
image: beclab/wizard:v1.5.7
|
||||
image: beclab/wizard:v1.6.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
||||
@@ -7,10 +7,18 @@ function command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
if [[ x"$REPO_PATH" == x"" ]]; then
|
||||
export REPO_PATH="#__REPO_PATH__"
|
||||
fi
|
||||
|
||||
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
|
||||
export REPO_PATH="/"
|
||||
fi
|
||||
|
||||
if [[ x"$VERSION" == x"" ]]; then
|
||||
if [[ "$LOCAL_RELEASE" == "1" ]]; then
|
||||
ts=$(date +%Y%m%d%H%M%S)
|
||||
export VERSION="1.12.2-$ts"
|
||||
export VERSION="1.12.3-$ts"
|
||||
echo "will build and use a local release of Olares with version: $VERSION"
|
||||
echo ""
|
||||
else
|
||||
@@ -20,7 +28,7 @@ fi
|
||||
|
||||
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
|
||||
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
|
||||
echo "for example: VERSION=1.12.2-20241124 bash $0"
|
||||
echo "for example: VERSION=1.12.3-20241124 bash $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -92,13 +100,17 @@ if [[ "$LOCAL_RELEASE" == "1" ]]; then
|
||||
fi
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
else
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
|
||||
expected_vendor="main"
|
||||
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
|
||||
expected_vendor="OlaresOne"
|
||||
fi
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
echo "olares-cli already installed and is the expected version"
|
||||
echo ""
|
||||
else
|
||||
if [[ ! -f ${CLI_FILE} ]]; then
|
||||
CLI_URL="${cdn_url}/${CLI_FILE}"
|
||||
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
|
||||
|
||||
echo "downloading Olares installer from ${CLI_URL} ..."
|
||||
echo ""
|
||||
|
||||
@@ -7,6 +7,15 @@ function command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
if [[ x"$REPO_PATH" == x"" ]]; then
|
||||
export REPO_PATH="#__REPO_PATH__"
|
||||
fi
|
||||
|
||||
|
||||
if [[ "x${REPO_PATH:3}" == "xREPO_PATH__" ]]; then
|
||||
export REPO_PATH="/"
|
||||
fi
|
||||
|
||||
function read_tty() {
|
||||
echo -n $1
|
||||
read $2 < /dev/tty
|
||||
@@ -149,7 +158,7 @@ export VERSION="#__VERSION__"
|
||||
|
||||
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
|
||||
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
|
||||
echo "for example: VERSION=1.12.2-20241124 bash $0"
|
||||
echo "for example: VERSION=1.12.3-20241124 bash $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -172,15 +181,17 @@ else
|
||||
RELEASE_ID_SUFFIX=".$RELEASE_ID"
|
||||
fi
|
||||
CLI_FILE="olares-cli-v${VERSION}_linux_${ARCH}${RELEASE_ID_SUFFIX}.tar.gz"
|
||||
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]]; then
|
||||
expected_vendor="main"
|
||||
if [[ "$(basename "$REPO_PATH")" == "olares-one" ]]; then
|
||||
expected_vendor="OlaresOne"
|
||||
fi
|
||||
if command_exists olares-cli && [[ "$(olares-cli -v | awk '{print $3}')" == "$VERSION" ]] && [[ "$(olares-cli --vendor)" == "$expected_vendor" ]]; then
|
||||
INSTALL_OLARES_CLI=$(which olares-cli)
|
||||
echo "olares-cli already installed and is the expected version"
|
||||
echo ""
|
||||
else
|
||||
if [[ ! -f ${CLI_FILE} ]]; then
|
||||
CLI_URL="${cdn_url}/${CLI_FILE}"
|
||||
|
||||
CLI_URL="${cdn_url}${REPO_PATH}${CLI_FILE}"
|
||||
echo "downloading Olares installer from ${CLI_URL} ..."
|
||||
echo ""
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-platform
|
||||
|
||||
---
|
||||
@@ -27,6 +28,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-framework
|
||||
|
||||
---
|
||||
@@ -37,6 +39,7 @@ metadata:
|
||||
kubesphere.io/creator: '{{ .Values.user.name }}'
|
||||
labels:
|
||||
kubesphere.io/workspace: system-workspace
|
||||
openpolicyagent.org/webhook: ignore
|
||||
name: os-protected
|
||||
|
||||
|
||||
|
||||
@@ -66,6 +66,12 @@ if [ ! -z $RELEASE_ID ]; then
|
||||
sh -c "$SED 's/#__RELEASE_ID__/${RELEASE_ID}/' joincluster.sh"
|
||||
fi
|
||||
|
||||
# replace repo path placeholder in scripts if provided
|
||||
if [ ! -z "$REPO_PATH" ]; then
|
||||
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' install.sh"
|
||||
sh -c "$SED 's|#__REPO_PATH__|${REPO_PATH}|g' joincluster.sh"
|
||||
fi
|
||||
|
||||
$TAR --exclude=wizard/tools --exclude=.git -zcvf ${BASE_DIR}/../install-wizard-${VERSION}.tar.gz .
|
||||
|
||||
popd
|
||||
|
||||
@@ -17,7 +17,7 @@ for mod in "${PACKAGE_MODULE[@]}";do
|
||||
chart_path="${mod}/${app}"
|
||||
|
||||
if [ -d $chart_path ]; then
|
||||
find $chart_path -type f -name *.yaml | while read p; do
|
||||
find $chart_path -type f -path '*/.olares/*.yaml' | while read p; do
|
||||
bash ${BASE_DIR}/yaml2prop.sh -f $p | while read l;do
|
||||
if [[ "$l" == *".image = "* || "$l" == "output.containers."*".name"* ]]; then
|
||||
echo "$l"
|
||||
@@ -32,8 +32,7 @@ for mod in "${PACKAGE_MODULE[@]}";do
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
awk '{print $3}' ${TMP_MANIFEST} | sort | uniq | grep -v nitro | grep -v orion >> ${IMAGE_MANIFEST}
|
||||
awk '{print $3}' ${TMP_MANIFEST} | sort | uniq | grep -v nitro | grep -v orion | grep -v '^nonexisting$' >> ${IMAGE_MANIFEST}
|
||||
|
||||
# patch
|
||||
# fix backup server version
|
||||
|
||||
@@ -21,6 +21,11 @@ systemEnvs:
|
||||
type: url
|
||||
editable: true
|
||||
required: true
|
||||
# docker hub mirror endpoint for docker.io registry
|
||||
- envName: OLARES_SYSTEM_DOCKERHUB_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
required: false
|
||||
# the legacy OLARES_ROOT_DIR
|
||||
- envName: OLARES_SYSTEM_ROOT_PATH
|
||||
default: /olares
|
||||
|
||||
@@ -12,8 +12,11 @@ userEnvs:
|
||||
- envName: OLARES_USER_TIMEZONE
|
||||
type: string
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_ENABLED
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_SERVER
|
||||
type: url
|
||||
type: domain
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_PORT
|
||||
type: number
|
||||
@@ -30,13 +33,14 @@ userEnvs:
|
||||
- envName: OLARES_USER_SMTP_SECURE
|
||||
type: bool
|
||||
editable: true
|
||||
default: "true"
|
||||
- envName: OLARES_USER_SMTP_USE_TLS
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_USE_SSL
|
||||
type: bool
|
||||
editable: true
|
||||
- envName: OLARES_USER_SMTP_SMTP_SECURITY_PROTOCOLS
|
||||
- envName: OLARES_USER_SMTP_SECURITY_PROTOCOLS
|
||||
type: string
|
||||
editable: true
|
||||
- envName: OLARES_USER_OPENAI_APIKEY
|
||||
@@ -51,15 +55,18 @@ userEnvs:
|
||||
- envName: OLARES_USER_HUGGINGFACE_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://huggingface.co/"
|
||||
- envName: OLARES_USER_HUGGINGFACE_TOKEN
|
||||
type: password
|
||||
editable: true
|
||||
- envName: OLARES_USER_PYPI_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://pypi.org/simple/"
|
||||
- envName: OLARES_USER_GITHUB_SERVICE
|
||||
type: url
|
||||
editable: true
|
||||
default: "https://github.com/"
|
||||
- envName: OLARES_USER_GITHUB_TOKEN
|
||||
type: password
|
||||
editable: true
|
||||
|
||||
445
cli/cmd/ctl/disk/extend.go
Normal file
445
cli/cmd/ctl/disk/extend.go
Normal file
@@ -0,0 +1,445 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/beclab/Olares/cli/pkg/utils/lvm"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const defaultOlaresVGName = "olares-vg"
|
||||
|
||||
func NewExtendDiskCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "extend",
|
||||
Short: "extend disk operations",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// early return if no unmounted disks found
|
||||
unmountedDevices, err := lvm.FindUnmountedDevices()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding unmounted devices: %v\n", err)
|
||||
}
|
||||
|
||||
if len(unmountedDevices) == 0 {
|
||||
log.Println("No unmounted disks found to extend.")
|
||||
return
|
||||
}
|
||||
|
||||
// select volume group to extend
|
||||
currentVgs, err := lvm.FindCurrentLVM()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding current LVM: %v\n", err)
|
||||
}
|
||||
|
||||
if len(currentVgs) == 0 {
|
||||
log.Println("No valid volume groups found to extend.")
|
||||
return
|
||||
}
|
||||
|
||||
selectedVg, err := selectExtendingVG(currentVgs)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting volume group: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected volume group to extend: %s\n", selectedVg)
|
||||
|
||||
// select logical volume to extend
|
||||
lvInVg, err := lvm.FindLvByVgName(selectedVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
|
||||
}
|
||||
|
||||
if len(lvInVg) == 0 {
|
||||
log.Printf("No logical volumes found in volume group %s to extend.\n", selectedVg)
|
||||
return
|
||||
}
|
||||
|
||||
selectedLv, err := selectExtendingLV(selectedVg, lvInVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting logical volume: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected logical volume to extend: %s\n", selectedLv)
|
||||
|
||||
// select unmounted devices to create physical volume
|
||||
selectedDevice, err := selectExtendingDevices(unmountedDevices)
|
||||
if err != nil {
|
||||
log.Fatalf("Error selecting unmounted device: %v\n", err)
|
||||
}
|
||||
log.Printf("Selected unmounted device to use: %s\n", selectedDevice)
|
||||
|
||||
options := &LvmExtendOptions{
|
||||
VgName: selectedVg,
|
||||
DevicePath: selectedDevice,
|
||||
LvName: selectedLv,
|
||||
DeviceBlk: unmountedDevices[selectedDevice],
|
||||
}
|
||||
|
||||
log.Printf("Extending logical volume %s in volume group %s using device %s\n", options.LvName, options.VgName, options.DevicePath)
|
||||
cleanupNeeded, err := options.cleanupDiskParts()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during disk partition cleanup check: %v\n", err)
|
||||
}
|
||||
|
||||
if cleanupNeeded {
|
||||
do, err := options.destroyWarning()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during partition cleanup confirmation: %v\n", err)
|
||||
}
|
||||
if !do {
|
||||
log.Println("Operation aborted by user.")
|
||||
return
|
||||
}
|
||||
|
||||
err = options.deleteDevicePartitions()
|
||||
if err != nil {
|
||||
log.Fatalf("Error deleting device partitions: %v\n", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
do, err := options.makeDecision()
|
||||
if err != nil {
|
||||
log.Fatalf("Error during extension confirmation: %v\n", err)
|
||||
}
|
||||
if !do {
|
||||
log.Println("Operation aborted by user.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = options.extendLVM()
|
||||
if err != nil {
|
||||
log.Fatalf("Error extending LVM: %v\n", err)
|
||||
}
|
||||
|
||||
log.Println("Disk extension completed successfully.")
|
||||
|
||||
// end of command run, and show result
|
||||
// show the result of the extension
|
||||
lvInVg, err = lvm.FindLvByVgName(selectedVg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding logical volumes in volume group %s: %v\n", selectedVg, err)
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
|
||||
for idx, lv := range lvInVg {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type LvmExtendOptions struct {
|
||||
VgName string
|
||||
DevicePath string
|
||||
LvName string
|
||||
DeviceBlk *lvm.BlkPart
|
||||
}
|
||||
|
||||
func selectExtendingVG(vgs []*lvm.VgItem) (string, error) {
|
||||
// if only one vg, return it directly
|
||||
if len(vgs) == 1 {
|
||||
return vgs[0].VgName, nil
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple volume groups found. Please select one to extend:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tVG\tVSize\tVFree\n")
|
||||
for idx, vg := range vgs {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", idx+1, vg.VgName, vg.VgSize, vg.VgFree)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the volume group id to extend: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read volume group id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid volume group id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(vgs) {
|
||||
fmt.Printf("\ninvalid volume group id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return vgs[selectedIdx-1].VgName, nil
|
||||
}
|
||||
|
||||
func selectExtendingLV(vgName string, lvs []*lvm.LvItem) (string, error) {
|
||||
if len(lvs) == 1 {
|
||||
return lvs[0].LvName, nil
|
||||
}
|
||||
|
||||
if vgName == defaultOlaresVGName {
|
||||
selectedLv := ""
|
||||
for _, lv := range lvs {
|
||||
if lv.LvName == "root" {
|
||||
selectedLv = lv.LvName
|
||||
continue
|
||||
}
|
||||
|
||||
if lv.LvName == "data" {
|
||||
selectedLv = lv.LvName
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if selectedLv != "" {
|
||||
return selectedLv, nil
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple logical volumes found. Please select one to extend:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tLV\tVG\tLSize\tMountpoints\n")
|
||||
for idx, lv := range lvs {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n", idx+1, lv.LvName, lv.VgName, lv.LvSize, strings.Join(lv.Mountpoints, ","))
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the logical volume id to extend: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read logical volume id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid logical volume id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(lvs) {
|
||||
fmt.Printf("\ninvalid logical volume id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return lvs[selectedIdx-1].LvName, nil
|
||||
}
|
||||
|
||||
func selectExtendingDevices(unmountedDevices map[string]*lvm.BlkPart) (string, error) {
|
||||
if len(unmountedDevices) == 0 {
|
||||
return "", errors.New("no unmounted devices available for selection")
|
||||
}
|
||||
|
||||
if len(unmountedDevices) == 1 {
|
||||
for path := range unmountedDevices {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Println("Multiple unmounted devices found. Please select one to use:")
|
||||
fmt.Println("")
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "id\tDevice\tSize\n")
|
||||
idx := 1
|
||||
devicePaths := make([]string, 0, len(unmountedDevices))
|
||||
for path, device := range unmountedDevices {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\n", idx, path, device.Size)
|
||||
devicePaths = append(devicePaths, path)
|
||||
idx++
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
LOOP:
|
||||
fmt.Printf("\nEnter the device id to use: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return "", errors.Wrap(errors.WithStack(err), "read device id failed")
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
fmt.Printf("\ninvalid device id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
selectedIdx, err := strconv.Atoi(input)
|
||||
if err != nil || selectedIdx < 1 || selectedIdx > len(devicePaths) {
|
||||
fmt.Printf("\ninvalid device id, please try again")
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
return devicePaths[selectedIdx-1], nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) destroyWarning() (bool, error) {
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Printf("WARNING: This will DESTROY all data on %s\n", o.DevicePath)
|
||||
LOOP:
|
||||
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
|
||||
}
|
||||
input = strings.ToUpper(strings.TrimSpace(input))
|
||||
if input != "YES" {
|
||||
goto LOOP
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) makeDecision() (bool, error) {
|
||||
reader, err := utils.GetBufIOReaderOfTerminalInput()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to get terminal input reader")
|
||||
}
|
||||
|
||||
fmt.Printf("NOTICE: Extending LVM will begin on device %s\n", o.DevicePath)
|
||||
LOOP:
|
||||
fmt.Printf("Type 'YES' to continue, CTRL+C to abort: ")
|
||||
var input string
|
||||
input, err = reader.ReadString('\n')
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
return false, errors.Wrap(errors.WithStack(err), "read confirmation input failed")
|
||||
}
|
||||
input = strings.ToUpper(strings.TrimSpace(input))
|
||||
if input != "YES" {
|
||||
goto LOOP
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) cleanupDiskParts() (bool, error) {
|
||||
if o.DeviceBlk == nil {
|
||||
return false, errors.New("device block is nil")
|
||||
}
|
||||
|
||||
if len(o.DeviceBlk.Children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) deleteDevicePartitions() error {
|
||||
log.Printf("Selected device %s has existing partitions. Cleaning up...\n", o.DevicePath)
|
||||
if o.DeviceBlk == nil {
|
||||
return errors.New("device block is nil")
|
||||
}
|
||||
|
||||
if len(o.DeviceBlk.Children) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Deleting existing partitions on device %s...\n", o.DevicePath)
|
||||
var partitions []string
|
||||
for _, part := range o.DeviceBlk.Children {
|
||||
partitions = append(partitions, "/dev/"+part.Name)
|
||||
}
|
||||
|
||||
vgs, err := lvm.FindVgsOnDevice(partitions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to find volume groups on device partitions")
|
||||
}
|
||||
|
||||
if len(vgs) > 0 {
|
||||
log.Println("existing volume group on device, delete it first")
|
||||
for _, vg := range vgs {
|
||||
lvs, err := lvm.FindLvByVgName(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to find logical volumes in volume group %s", vg.VgName)
|
||||
}
|
||||
|
||||
err = lvm.DeactivateLv(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to deactivate volume group %s", vg.VgName)
|
||||
}
|
||||
|
||||
for _, lv := range lvs {
|
||||
err = lvm.RemoveLv(lv.LvPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove logical volume %s", lv.LvPath)
|
||||
}
|
||||
}
|
||||
|
||||
err = lvm.RemoveVg(vg.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove volume group %s", vg)
|
||||
}
|
||||
|
||||
err = lvm.RemovePv(vg.PvName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove physical volume %s", vg.PvName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
log.Printf("Deleting partitions on device %s...\n", o.DevicePath)
|
||||
err = lvm.DeleteDevicePartitions(o.DevicePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete partitions on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o LvmExtendOptions) extendLVM() error {
|
||||
log.Printf("Creating partition on device %s...\n", o.DevicePath)
|
||||
err := lvm.MakePartOnDevice(o.DevicePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create partition on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
log.Printf("Creating physical volume on device %s...\n", o.DevicePath)
|
||||
err = lvm.AddNewPV(o.DevicePath, o.VgName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create physical volume on device %s", o.DevicePath)
|
||||
}
|
||||
|
||||
log.Printf("Extending volume group %s with logic volume %s on device %s...\n", o.VgName, o.LvName, o.DevicePath)
|
||||
err = lvm.ExtendLv(o.VgName, o.LvName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to extend logical volume %s in volume group %s", o.LvName, o.VgName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
cli/cmd/ctl/disk/listumounted.go
Normal file
34
cli/cmd/ctl/disk/listumounted.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/utils/lvm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewListUnmountedDisksCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-unmounted",
|
||||
Short: "List unmounted disks",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
unmountedDevices, err := lvm.FindUnmountedDevices()
|
||||
if err != nil {
|
||||
log.Fatalf("Error finding unmounted devices: %v\n", err)
|
||||
}
|
||||
|
||||
// print header
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
|
||||
fmt.Fprint(w, "Device\tSize\n")
|
||||
for path, device := range unmountedDevices {
|
||||
fmt.Fprintf(w, "%s\t%s\n", path, device.Size)
|
||||
}
|
||||
w.Flush()
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
15
cli/cmd/ctl/disk/root.go
Normal file
15
cli/cmd/ctl/disk/root.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package disk
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
func NewDiskCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disk",
|
||||
Short: "disk management operations",
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewListUnmountedDisksCommand())
|
||||
cmd.AddCommand(NewExtendDiskCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
23
cli/cmd/ctl/gpu/disable_nouveau.go
Normal file
23
cli/cmd/ctl/gpu/disable_nouveau.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewCmdDisableNouveau() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "disable-nouveau",
|
||||
Short: "Blacklist and disable the nouveau kernel module",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := pipelines.DisableNouveau(); err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ func NewCmdGpu() *cobra.Command {
|
||||
rootGpuCmd.AddCommand(NewCmdUninstallpu())
|
||||
rootGpuCmd.AddCommand(NewCmdEnableGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdDisableGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdUpgradeGpu())
|
||||
rootGpuCmd.AddCommand(NewCmdGpuStatus())
|
||||
rootGpuCmd.AddCommand(NewCmdDisableNouveau())
|
||||
return rootGpuCmd
|
||||
}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/options"
|
||||
"github.com/beclab/Olares/cli/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewCmdUpgradeGpu() *cobra.Command {
|
||||
o := options.NewInstallGpuOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "upgrade GPU drivers for Olares",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := pipelines.UpgradeGpuDrivers(o); err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func NewCmdRelease() *cobra.Command {
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
version = fmt.Sprintf("1.12.2-%s", time.Now().Format("20060102150405"))
|
||||
version = fmt.Sprintf("1.12.3-%s", time.Now().Format("20060102150405"))
|
||||
fmt.Printf("--version unspecified, using: %s\n", version)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package ctl
|
||||
|
||||
import (
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/disk"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/gpu"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/node"
|
||||
"github.com/beclab/Olares/cli/cmd/ctl/os"
|
||||
@@ -33,6 +34,7 @@ func NewDefaultCommand() *cobra.Command {
|
||||
cmds.AddCommand(node.NewNodeCommand())
|
||||
cmds.AddCommand(gpu.NewCmdGpu())
|
||||
cmds.AddCommand(user.NewUserCommand())
|
||||
cmds.AddCommand(disk.NewDiskCommand())
|
||||
|
||||
return cmds
|
||||
}
|
||||
|
||||
122
cli/cmd/ctl/user/activate.go
Normal file
122
cli/cmd/ctl/user/activate.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/wizard"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type activateUserOptions struct {
|
||||
Mnemonic string
|
||||
BflUrl string
|
||||
VaultUrl string
|
||||
Password string
|
||||
OlaresId string
|
||||
ResetPassword string
|
||||
|
||||
Location string
|
||||
Language string
|
||||
EnableTunnel bool
|
||||
Host string
|
||||
Jws string
|
||||
}
|
||||
|
||||
func NewCmdActivateUser() *cobra.Command {
|
||||
o := &activateUserOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "activate {Olares ID (e.g., user@example.com)}",
|
||||
Short: "activate a new user",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o.OlaresId = args[0]
|
||||
if err := o.Validate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := o.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *activateUserOptions) AddFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().StringVar(&o.Mnemonic, "mnemonic", "", "12-word mnemonic phrase, required for activation")
|
||||
cmd.Flags().StringVar(&o.BflUrl, "bfl", "http://127.0.0.1:30180", "Bfl URL (e.g., https://example.com, default: http://127.0.0.1:30180)")
|
||||
cmd.Flags().StringVar(&o.VaultUrl, "vault", "http://127.0.0.1:30180", "Vault URL (e.g., https://example.com, default: http://127.0.0.1:30181)")
|
||||
cmd.Flags().StringVarP(&o.Password, "password", "p", "", "OS password for authentication, required for activation")
|
||||
cmd.Flags().StringVar(&o.Location, "location", "Asia/Shanghai", "Timezone location (default: Asia/Shanghai)")
|
||||
cmd.Flags().StringVar(&o.Language, "language", "en-US", "System language (default: en-US)")
|
||||
cmd.Flags().BoolVar(&o.EnableTunnel, "enable-tunnel", false, "Enable tunnel mode (default: false)")
|
||||
cmd.Flags().StringVar(&o.Host, "host", "", "FRP host (only used when tunnel is enabled)")
|
||||
cmd.Flags().StringVar(&o.Jws, "jws", "", "FRP JWS token (only used when tunnel is enabled)")
|
||||
cmd.Flags().StringVar(&o.ResetPassword, "reset-password", "", "New password for resetting (required for password reset)")
|
||||
}
|
||||
|
||||
func (o *activateUserOptions) Validate() error {
|
||||
if o.OlaresId == "" {
|
||||
return fmt.Errorf("Olares ID is required")
|
||||
}
|
||||
if o.Password == "" {
|
||||
return fmt.Errorf("Password is required")
|
||||
}
|
||||
if o.Mnemonic == "" {
|
||||
return fmt.Errorf("Mnemonic is required")
|
||||
}
|
||||
if o.ResetPassword == "" {
|
||||
return fmt.Errorf("Reset password is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *activateUserOptions) Run() error {
|
||||
log.Println("=== TermiPass CLI - User Bind Terminus ===")
|
||||
|
||||
localName := c.OlaresId
|
||||
if strings.Contains(c.OlaresId, "@") {
|
||||
localName = strings.Split(c.OlaresId, "@")[0]
|
||||
}
|
||||
|
||||
log.Printf("Parameters:")
|
||||
log.Printf(" BflUrl: %s", c.BflUrl)
|
||||
log.Printf(" VaultUrl: %s", c.VaultUrl)
|
||||
log.Printf(" Terminus Name: %s", c.OlaresId)
|
||||
log.Printf(" Local Name: %s", localName)
|
||||
|
||||
log.Printf("Initializing global stores with mnemonic...")
|
||||
err := wizard.InitializeGlobalStores(c.Mnemonic, c.OlaresId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize global stores: %v", err)
|
||||
}
|
||||
|
||||
accessToken, err := wizard.UserBindTerminus(c.Mnemonic, c.BflUrl, c.VaultUrl, c.Password, c.OlaresId, localName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("user bind failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("✅ Vault activation completed successfully!")
|
||||
log.Printf("🚀 Starting system activation wizard...")
|
||||
|
||||
wizardConfig := wizard.CustomWizardConfig(c.Location, c.Language, c.EnableTunnel, c.Host, c.Jws, c.Password, c.ResetPassword)
|
||||
|
||||
log.Printf("Wizard configuration:")
|
||||
log.Printf(" Location: %s", wizardConfig.System.Location)
|
||||
log.Printf(" Language: %s", wizardConfig.System.Language)
|
||||
log.Printf(" Enable Tunnel: %t", c.EnableTunnel)
|
||||
if c.EnableTunnel && wizardConfig.System.FRP != nil {
|
||||
log.Printf(" FRP Host: %s", wizardConfig.System.FRP.Host)
|
||||
log.Printf(" FRP JWS: %s", wizardConfig.System.FRP.Jws)
|
||||
}
|
||||
|
||||
err = wizard.RunActivationWizard(c.BflUrl, accessToken, wizardConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("activation wizard failed: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("🎉 Complete Terminus activation finished successfully!")
|
||||
return nil
|
||||
}
|
||||
170
cli/cmd/ctl/user/reset_password.go
Normal file
170
cli/cmd/ctl/user/reset_password.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
resetNamespace = "os-framework"
|
||||
resetServiceAccount = "olares-cli-sa"
|
||||
resetServiceName = "auth-provider-svc"
|
||||
resetServicePortName = "server"
|
||||
defaultServicePort = 28080
|
||||
passwordSaltSuffix = "@Olares2025"
|
||||
authHeaderBearer = "Bearer "
|
||||
cliAuthHeader = "Olares-CLI-Authorization"
|
||||
resetRequestPathTmpl = "http://%s:%d/cli/api/reset/%s/password"
|
||||
)
|
||||
|
||||
type resetPasswordOptions struct {
|
||||
username string
|
||||
password string
|
||||
kubeConfig string
|
||||
}
|
||||
|
||||
func NewCmdResetPassword() *cobra.Command {
|
||||
o := &resetPasswordOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "reset-password {username}",
|
||||
Short: "forcefully reset a user's password via auth-provider",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o.username = args[0]
|
||||
if err := o.Validate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := o.Run(cmd.Context()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *resetPasswordOptions) AddFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().StringVarP(&o.password, "password", "p", "", "new password to set")
|
||||
cmd.Flags().StringVar(&o.kubeConfig, "kubeconfig", "", "path to kubeconfig file (optional)")
|
||||
}
|
||||
|
||||
func (o *resetPasswordOptions) Validate() error {
|
||||
if o.username == "" {
|
||||
return fmt.Errorf("username is required")
|
||||
}
|
||||
if o.password == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *resetPasswordOptions) Run(ctx context.Context) error {
|
||||
cfgPath := o.kubeConfig
|
||||
if cfgPath == "" {
|
||||
cfgPath = os.Getenv("KUBECONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = clientcmd.RecommendedHomeFile
|
||||
}
|
||||
}
|
||||
restCfg, err := clientcmd.BuildConfigFromFlags("", cfgPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load kubeconfig: %w", err)
|
||||
}
|
||||
k8s, err := kubernetes.NewForConfig(restCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create k8s client: %w", err)
|
||||
}
|
||||
|
||||
expires := int64(3600)
|
||||
tr := &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
ExpirationSeconds: &expires,
|
||||
},
|
||||
}
|
||||
tokenResp, err := k8s.CoreV1().ServiceAccounts(resetNamespace).CreateToken(ctx, resetServiceAccount, tr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create service account token: %w", err)
|
||||
}
|
||||
saToken := tokenResp.Status.Token
|
||||
if saToken == "" {
|
||||
return fmt.Errorf("received empty token for service account %s/%s", resetNamespace, resetServiceAccount)
|
||||
}
|
||||
|
||||
svc, err := k8s.CoreV1().Services(resetNamespace).Get(ctx, resetServiceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get service %s/%s: %w", resetNamespace, resetServiceName, err)
|
||||
}
|
||||
clusterIP := svc.Spec.ClusterIP
|
||||
port := int32(defaultServicePort)
|
||||
if len(svc.Spec.Ports) > 0 {
|
||||
chosen := svc.Spec.Ports[0].Port
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.Name == resetServicePortName {
|
||||
chosen = p.Port
|
||||
break
|
||||
}
|
||||
}
|
||||
port = chosen
|
||||
}
|
||||
if clusterIP == "" {
|
||||
return fmt.Errorf("service %s/%s has empty ClusterIP", resetNamespace, resetServiceName)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf(resetRequestPathTmpl, clusterIP, port, o.username)
|
||||
bodyMap := map[string]string{
|
||||
"password": saltedMD5(o.password),
|
||||
}
|
||||
payload, err := json.Marshal(bodyMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create http request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", authHeaderBearer+saToken)
|
||||
req.Header.Set(cliAuthHeader, authHeaderBearer+saToken)
|
||||
req.Header.Set("X-Forwarded-Host", fmt.Sprintf("%s.%s:%d", resetServiceName, resetNamespace, port))
|
||||
|
||||
httpClient := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reset password request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
codeText := http.StatusText(resp.StatusCode)
|
||||
if len(body) > 0 {
|
||||
return fmt.Errorf("reset password failed: %d(%s), %s", resp.StatusCode, codeText, string(body))
|
||||
}
|
||||
return fmt.Errorf("reset password failed: %d(%s)", resp.StatusCode, codeText)
|
||||
}
|
||||
|
||||
fmt.Printf("Password for user '%s' reset successfully\n", o.username)
|
||||
return nil
|
||||
}
|
||||
|
||||
func saltedMD5(s string) string {
|
||||
sum := md5.Sum([]byte(s + passwordSaltSuffix))
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
@@ -11,6 +11,8 @@ func NewUserCommand() *cobra.Command {
|
||||
cmd.AddCommand(NewCmdDeleteUser())
|
||||
cmd.AddCommand(NewCmdListUsers())
|
||||
cmd.AddCommand(NewCmdGetUser())
|
||||
cmd.AddCommand(NewCmdActivateUser())
|
||||
cmd.AddCommand(NewCmdResetPassword())
|
||||
// cmd.AddCommand(NewCmdUpdateUserLimits())
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.24.2
|
||||
toolchain go1.24.6
|
||||
|
||||
replace (
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.10
|
||||
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.4.41
|
||||
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
|
||||
github.com/containers/image/v5 => github.com/containers/image/v5 v5.21.1
|
||||
github.com/containers/storage => github.com/containers/storage v1.40.0
|
||||
@@ -46,6 +46,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/sys v0.35.0
|
||||
@@ -55,6 +56,7 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.18.6
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apiextensions-apiserver v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
@@ -214,7 +216,6 @@ require (
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.0 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
|
||||
@@ -45,8 +45,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beclab/api v0.0.2 h1:aD5RcMie2uqa/FZI7aQBa1F4yVEib8/x3IIZSLiHkBM=
|
||||
github.com/beclab/api v0.0.2/go.mod h1:ESZLe8cf4934QFkU6cqbskKfiTyNk67i1qbv/ctS6js=
|
||||
github.com/beclab/app-service v0.4.10 h1:0CT8sl5K+qwQsrKO6FYxbUFNXcRJVkkErw3sB7V7OQw=
|
||||
github.com/beclab/app-service v0.4.10/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beclab/app-service v0.4.41 h1:WSIXEqHSAepHweBooPkc+pedVaGGn335RugNwixkciY=
|
||||
github.com/beclab/app-service v0.4.41/go.mod h1:0vEg3rv/DbR7dYznvTlXNXyYNn+TXNMaxz03GQYRWUQ=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
@@ -468,6 +468,8 @@ github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8O
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
|
||||
@@ -447,6 +447,7 @@ var (
|
||||
"/etc/kubekey",
|
||||
"/etc/kke/version",
|
||||
"/etc/systemd/system/olares-swap.service",
|
||||
"/tmp/vgpulock",
|
||||
}
|
||||
|
||||
networkResetCmds = []string{
|
||||
|
||||
@@ -78,6 +78,8 @@ func (m *RunPrechecksModule) Init() {
|
||||
new(SystemdCheck),
|
||||
new(RequiredPortsCheck),
|
||||
new(ConflictingContainerdCheck),
|
||||
new(NvidiaCardArchChecker),
|
||||
new(NouveauChecker),
|
||||
new(CudaChecker),
|
||||
}
|
||||
runPreChecks := &task.LocalTask{
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -114,7 +115,7 @@ func (t *RequiredPortsCheck) Check(runtime connector.Runtime) error {
|
||||
defer l.Close()
|
||||
}
|
||||
if len(unbindablePorts) > 0 {
|
||||
return fmt.Errorf("port %v required by Olares cannot be bound", unbindablePorts)
|
||||
return fmt.Errorf("port %v required by Olares cannot be bound, you can check which process using the command `sudo netstat -tlnp`", unbindablePorts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -133,13 +134,15 @@ func (t *ConflictingContainerdCheck) Check(runtime connector.Runtime) error {
|
||||
if kubeRuntime.Arg.IsCloudInstance {
|
||||
return nil
|
||||
}
|
||||
fixMSG := "\nIf it is installed as a component of Docker, it should be uninstalled per the official doc https://docs.docker.com/engine/install/ubuntu/#uninstall-old-versions"
|
||||
fixMSG += "\nIf it is left over from a previous installation of Olares, clean it up using the command `sudo olares-cli uninstall --all`"
|
||||
containerdBin, err := util.GetCommand("containerd")
|
||||
if err == nil && containerdBin != "" {
|
||||
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function", containerdBin)
|
||||
return fmt.Errorf("found existing containerd binary: %s, a containerd managed by Olares is required to ensure normal function%s", containerdBin, fixMSG)
|
||||
}
|
||||
containerdSocket := "/run/containerd/containerd.sock"
|
||||
if util.IsExist(containerdSocket) {
|
||||
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function", containerdSocket)
|
||||
return fmt.Errorf("found existing containerd socket: %s, a containerd managed by Olares is required to ensure normal function%s", containerdSocket, fixMSG)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -269,20 +272,104 @@ func (t *ValidResolvConfCheck) Check(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type CudaChecker struct {
|
||||
CudaCheckTask
|
||||
type NvidiaCardArchChecker struct{}
|
||||
|
||||
func (t *NvidiaCardArchChecker) Name() string {
|
||||
return "NvidiaCardArch"
|
||||
}
|
||||
|
||||
func (c *CudaChecker) Check(runtime connector.Runtime) error {
|
||||
err := c.CudaCheckTask.Execute(runtime)
|
||||
func (t *NvidiaCardArchChecker) Check(runtime connector.Runtime) error {
|
||||
supportedArchs := []string{"Blackwell", "Hopper", "Ada Lovelace", "Ampere", "Turing"}
|
||||
model, arch, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil
|
||||
}
|
||||
if !slices.Contains(supportedArchs, arch) {
|
||||
return fmt.Errorf("unsupported NVIDIA card %s of architecture: %s, Olares only supports the following architectures: %s", model, arch, strings.Join(supportedArchs, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// the command `precheck` will check the cuda version,
|
||||
// only if the cuda is installed and the current version is not supported, it will return an error
|
||||
if err == ErrCudaInstalled {
|
||||
// NouveauChecker checks whether nouveau is loaded and has modeset=1 or -1.
|
||||
// This check only runs when an NVIDIA GPU is present.
|
||||
type NouveauChecker struct{}
|
||||
|
||||
func (n *NouveauChecker) Name() string {
|
||||
return "NouveauKernelModule"
|
||||
}
|
||||
|
||||
func (n *NouveauChecker) Check(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
fmt.Println("Error detecting NVIDIA card:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
if !util.IsExist("/sys/module/nouveau") {
|
||||
return nil
|
||||
}
|
||||
|
||||
const modesetPath = "/sys/module/nouveau/parameters/modeset"
|
||||
data, err := os.ReadFile(modesetPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading modeset parameter of nouveau kernel module by reading file %s: %v", modesetPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
val := strings.TrimSpace(string(data))
|
||||
if val == "1" || val == "-1" {
|
||||
return fmt.Errorf("detected nouveau kernel module loaded with modeset=%s; this conflicts with the NVIDIA driver that Olares will install, please disable it by running `sudo olares-cli gpu disable-nouveau`, REBOOT your machine, and try again", val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CudaChecker struct{}
|
||||
|
||||
func (c *CudaChecker) Name() string {
|
||||
return "CUDA"
|
||||
}
|
||||
|
||||
func (c *CudaChecker) Check(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if st == nil || !st.Installed {
|
||||
if st != nil && st.Running {
|
||||
return ErrKernelDriverUninstalledButRunning
|
||||
}
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
}
|
||||
if st.Mismatch {
|
||||
return ErrDriverLibraryVersionMismatch
|
||||
}
|
||||
if st.InstallMethod != utils.GPUDriverInstallMethodRunfile && !runtime.GetSystemInfo().IsWsl() {
|
||||
return ErrNotInstalledByRunfile
|
||||
}
|
||||
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", st.DriverVersion, st.CudaVersion)
|
||||
oldestVer := semver.MustParse(supportedCudaVersions[0])
|
||||
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
|
||||
currentVer := semver.MustParse(st.CudaVersion)
|
||||
if oldestVer.GreaterThan(currentVer) {
|
||||
return ErrUnsupportedCudaVersion
|
||||
}
|
||||
if newestVer.LessThan(currentVer) {
|
||||
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@@ -474,44 +561,8 @@ func (t *RemoveWSLChattr) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it, REBOOT your machine, and try again")
|
||||
var ErrCudaInstalled = errors.New("cuda is installed")
|
||||
var supportedCudaVersions = []string{"12.8", common.CurrentVerifiedCudaVersion}
|
||||
|
||||
// CudaCheckTask checks the cuda version, if the current version is not supported, it will return an error
|
||||
// before executing the command `olares-cli gpu install`, we need to check the cuda version
|
||||
// if the cuda if not installed, it will return nil and the command can be executed.
|
||||
// if the cuda is installed and the version is unsupported, the command can not be executed,
|
||||
// or the cuda version is supported, executing the command is unnecessary.
|
||||
type CudaCheckTask struct{}
|
||||
|
||||
func (t *CudaCheckTask) Name() string {
|
||||
return "Cuda"
|
||||
}
|
||||
|
||||
func (t *CudaCheckTask) Execute(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
|
||||
info, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !installed:
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
default:
|
||||
logger.Infof("NVIDIA driver is installed, version: %s, cuda version: %s", info.DriverVersion, info.CudaVersion)
|
||||
oldestVer := semver.MustParse(supportedCudaVersions[0])
|
||||
newestVer := semver.MustParse(supportedCudaVersions[len(supportedCudaVersions)-1])
|
||||
currentVer := semver.MustParse(info.CudaVersion)
|
||||
if oldestVer.GreaterThan(currentVer) {
|
||||
return ErrUnsupportedCudaVersion
|
||||
}
|
||||
if newestVer.LessThan(currentVer) {
|
||||
logger.Info("CUDA version is too new, there might be compatibility issues with some applications, use at your own risk")
|
||||
}
|
||||
return ErrCudaInstalled
|
||||
}
|
||||
}
|
||||
var ErrUnsupportedCudaVersion = errors.New("unsupported cuda version, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
|
||||
var ErrKernelDriverUninstalledButRunning = errors.New("NVIDIA driver is uninstalled, but the kernel driver is still running, please REBOOT your machine, and try again")
|
||||
var ErrNotInstalledByRunfile = errors.New("NVIDIA driver is installed, but not installed by runfile, please uninstall it using the command `sudo olares-cli gpu uninstall`, REBOOT your machine, and try again")
|
||||
var ErrDriverLibraryVersionMismatch = errors.New("NVIDIA driver is installed, but the library version with the running version is mismatched, please REBOOT your machine, and try again")
|
||||
var supportedCudaVersions = []string{common.CurrentVerifiedCudaVersion}
|
||||
|
||||
@@ -25,8 +25,7 @@ const (
|
||||
DefaultK3sVersion = "v1.33.3-k3s"
|
||||
DefaultKubernetesVersion = ""
|
||||
DefaultKubeSphereVersion = "v3.3.0"
|
||||
DefaultTokenMaxAge = 31536000
|
||||
CurrentVerifiedCudaVersion = "12.9"
|
||||
CurrentVerifiedCudaVersion = "13.0"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -236,7 +235,6 @@ const (
|
||||
CacheNodeNum = "node_num"
|
||||
CacheRedisPassword = "redis_password"
|
||||
CacheSecretsNum = "secrets_num"
|
||||
CacheJwtSecret = "jwt_secret"
|
||||
CacheCrdsNUm = "users_iam_num"
|
||||
|
||||
CacheMinioPath = "minio_binary_path"
|
||||
@@ -279,30 +277,28 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
|
||||
ENV_OLARES_VERSION = "OLARES_VERSION"
|
||||
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
|
||||
ENV_KUBE_TYPE = "KUBE_TYPE"
|
||||
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
|
||||
ENV_NVIDIA_CONTAINER_REPO_MIRROR = "NVIDIA_CONTAINER_REPO_MIRROR"
|
||||
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
|
||||
ENV_STORAGE = "STORAGE"
|
||||
ENV_S3_BUCKET = "S3_BUCKET"
|
||||
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
|
||||
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
|
||||
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
|
||||
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
|
||||
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
|
||||
ENV_BACKUP_SECRET = "BACKUP_SECRET"
|
||||
ENV_CLUSTER_ID = "CLUSTER_ID"
|
||||
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
|
||||
ENV_TOKEN_MAX_AGE = "TOKEN_MAX_AGE"
|
||||
ENV_HOST_IP = "HOST_IP"
|
||||
ENV_PREINSTALL = "PREINSTALL"
|
||||
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
|
||||
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
|
||||
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
|
||||
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
|
||||
ENV_OLARES_BASE_DIR = "OLARES_BASE_DIR"
|
||||
ENV_OLARES_VERSION = "OLARES_VERSION"
|
||||
ENV_TERMINUS_IS_CLOUD_VERSION = "TERMINUS_IS_CLOUD_VERSION"
|
||||
ENV_KUBE_TYPE = "KUBE_TYPE"
|
||||
ENV_REGISTRY_MIRRORS = "REGISTRY_MIRRORS"
|
||||
ENV_OLARES_CDN_SERVICE = "OLARES_SYSTEM_CDN_SERVICE"
|
||||
ENV_STORAGE = "STORAGE"
|
||||
ENV_S3_BUCKET = "S3_BUCKET"
|
||||
ENV_LOCAL_GPU_ENABLE = "LOCAL_GPU_ENABLE"
|
||||
ENV_AWS_ACCESS_KEY_ID_SETUP = "AWS_ACCESS_KEY_ID_SETUP"
|
||||
ENV_AWS_SECRET_ACCESS_KEY_SETUP = "AWS_SECRET_ACCESS_KEY_SETUP"
|
||||
ENV_AWS_SESSION_TOKEN_SETUP = "AWS_SESSION_TOKEN_SETUP"
|
||||
ENV_BACKUP_KEY_PREFIX = "BACKUP_KEY_PREFIX"
|
||||
ENV_BACKUP_SECRET = "BACKUP_SECRET"
|
||||
ENV_CLUSTER_ID = "CLUSTER_ID"
|
||||
ENV_BACKUP_CLUSTER_BUCKET = "BACKUP_CLUSTER_BUCKET"
|
||||
ENV_HOST_IP = "HOST_IP"
|
||||
ENV_PREINSTALL = "PREINSTALL"
|
||||
ENV_DISABLE_HOST_IP_PROMPT = "DISABLE_HOST_IP_PROMPT"
|
||||
ENV_AUTO_ADD_FIREWALL_RULES = "AUTO_ADD_FIREWALL_RULES"
|
||||
ENV_TERMINUS_OS_DOMAINNAME = "TERMINUS_OS_DOMAINNAME"
|
||||
ENV_DEFAULT_WSL_DISTRO_LOCATION = "DEFAULT_WSL_DISTRO_LOCATION" // If set to 1, the default WSL distro storage will be used.
|
||||
|
||||
ENV_CONTAINER = "container"
|
||||
ENV_CONTAINER_MODE = "CONTAINER_MODE" // running in docker container
|
||||
|
||||
@@ -101,7 +101,6 @@ type Argument struct {
|
||||
Storage *Storage `json:"storage"`
|
||||
NetworkSettings *NetworkSettings `json:"network_settings"`
|
||||
GPU *GPU `json:"gpu"`
|
||||
TokenMaxAge int64 `json:"token_max_age"` // nanosecond
|
||||
|
||||
Request any `json:"-"`
|
||||
|
||||
@@ -353,15 +352,6 @@ func (a *Argument) SetOlaresCDNService(url string) {
|
||||
a.OlaresCDNService = u
|
||||
}
|
||||
|
||||
func (a *Argument) SetTokenMaxAge() {
|
||||
s := os.Getenv(ENV_TOKEN_MAX_AGE)
|
||||
age, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil || age == 0 {
|
||||
age = DefaultTokenMaxAge
|
||||
}
|
||||
a.TokenMaxAge = age
|
||||
}
|
||||
|
||||
func (a *Argument) SetGPU(enable bool) {
|
||||
if a.GPU == nil {
|
||||
a.GPU = new(GPU)
|
||||
|
||||
@@ -58,7 +58,7 @@ func NewLocalRuntime(debug, ingoreErr bool) (LocalRuntime, error) {
|
||||
host.Address = ""
|
||||
host.InternalAddress = ""
|
||||
host.Port = 22
|
||||
host.User = u.Name
|
||||
host.User = u.Username
|
||||
host.Password = ""
|
||||
host.PrivateKeyPath = fmt.Sprintf("%s/.ssh/id_rsa", u.HomeDir)
|
||||
host.Arch = ""
|
||||
|
||||
@@ -6,9 +6,9 @@ const (
|
||||
NamespaceKubePublic = "kube-public"
|
||||
NamespaceKubeSystem = "kube-system"
|
||||
NamespaceKubekeySystem = "kubekey-system"
|
||||
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
|
||||
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
|
||||
NamespaceKubesphereSystem = "kubesphere-system"
|
||||
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
|
||||
NamespaceOsFramework = "os-framework"
|
||||
NamespaceOsPlatform = "os-platform"
|
||||
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
"github.com/beclab/Olares/cli/pkg/core/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Skip struct {
|
||||
KubePrepare
|
||||
Not bool
|
||||
}
|
||||
|
||||
func (p *Skip) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
return !p.Not, nil
|
||||
}
|
||||
|
||||
type Stop struct {
|
||||
prepare.BasePrepare
|
||||
}
|
||||
|
||||
func (p *Stop) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
return true, nil
|
||||
// return false, fmt.Errorf("STOP !!!!!!")
|
||||
}
|
||||
|
||||
type GetCommandKubectl struct {
|
||||
prepare.BasePrepare
|
||||
}
|
||||
|
||||
func (p *GetCommandKubectl) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
|
||||
cmd, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("command -v %s", CommandKubectl), false, false)
|
||||
if err != nil {
|
||||
return true, nil
|
||||
}
|
||||
if cmd != "" {
|
||||
p.PipelineCache.Set(CacheKubectlKey, cmd)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type GetMasterNum struct {
|
||||
prepare.BasePrepare
|
||||
}
|
||||
|
||||
func (p *GetMasterNum) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
var kubectlpath, err = util.GetCommand(CommandKubectl)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("kubectl not found")
|
||||
}
|
||||
|
||||
var cmd = fmt.Sprintf("%s get node | awk '{if(NR>1){print $3}}' | grep master | wc -l", kubectlpath)
|
||||
stdout, err := runtime.GetRunner().SudoCmd(cmd, false, false)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(errors.WithStack(err), "get master num failed")
|
||||
}
|
||||
|
||||
masterNum, _ := strconv.ParseInt(stdout, 10, 64)
|
||||
|
||||
p.PipelineCache.Set(CacheMasterNum, masterNum)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type GetNodeNum struct {
|
||||
prepare.BasePrepare
|
||||
}
|
||||
|
||||
func (p *GetNodeNum) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
var kubectlpath, err = util.GetCommand(CommandKubectl)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("kubectl not found")
|
||||
}
|
||||
|
||||
var cmd = fmt.Sprintf("%s get node | wc -l", kubectlpath)
|
||||
stdout, err := runtime.GetRunner().SudoCmd(cmd, false, false)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(errors.WithStack(err), "get node num failed")
|
||||
}
|
||||
|
||||
nodeNum, _ := strconv.ParseInt(stdout, 10, 64)
|
||||
|
||||
p.PipelineCache.Set(CacheNodeNum, nodeNum)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type ClusterType struct {
|
||||
KubePrepare
|
||||
ClusterType string
|
||||
Not bool
|
||||
}
|
||||
|
||||
func (p *ClusterType) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
if p.KubeConf == nil || p.KubeConf.Cluster == nil {
|
||||
return false, nil
|
||||
}
|
||||
var isK3s = p.KubeConf.Cluster.Kubernetes.Type == p.ClusterType
|
||||
if p.Not {
|
||||
return !isK3s, nil
|
||||
}
|
||||
|
||||
return isK3s, nil
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var header = JWTHeader{
|
||||
Alg: "HS256",
|
||||
Typ: "JWT",
|
||||
}
|
||||
|
||||
var payload = JWTPayload{
|
||||
Email: "admin@kubesphere.io",
|
||||
Username: "admin",
|
||||
TokenType: "static_token",
|
||||
}
|
||||
|
||||
type JWTHeader struct {
|
||||
Alg string `json:"alg"`
|
||||
Typ string `json:"typ"`
|
||||
}
|
||||
|
||||
type JWTPayload struct {
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
|
||||
func EncryptToken(secret string) (string, error) {
|
||||
headerJson, _ := json.Marshal(header)
|
||||
headerBase64 := Base64URLEncode(headerJson)
|
||||
|
||||
payloadJson, _ := json.Marshal(payload)
|
||||
payloadBase64 := Base64URLEncode(payloadJson)
|
||||
|
||||
headerPayload := fmt.Sprintf("%s.%s", headerBase64, payloadBase64)
|
||||
|
||||
var secretBytes = []byte(secret)
|
||||
|
||||
signature := HMACSHA256([]byte(headerPayload), secretBytes)
|
||||
|
||||
// Encode the signature to base64 URL encoding.
|
||||
signatureBase64 := Base64URLEncode(signature)
|
||||
|
||||
return fmt.Sprintf("%s.%s", headerPayload, signatureBase64), nil
|
||||
}
|
||||
|
||||
func Base64URLEncode(data []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=")
|
||||
|
||||
}
|
||||
|
||||
// HMACSHA256 signs a message using a secret key with HMAC SHA256.
|
||||
func HMACSHA256(message, secret []byte) []byte {
|
||||
h := hmac.New(sha256.New, secret)
|
||||
h.Write(message)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestToken(t *testing.T) {
|
||||
var a = "n7X2dggXApH91fnVUzgPr1Fr1vAO0Upo"
|
||||
// var b = `{"email": "admin@kubesphere.io","username": "admin","token_type": "static_token"}`
|
||||
|
||||
// eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6ImFkbWluQGt1YmVzcGhlcmUuaW8iLCJ1c2VybmFtZSI6ImFkbWluIiwidG9rZW5fdHlwZSI6InN0YXRpY190b2tlbiJ9.iwsRH37tcqE8HyI_S98AEM6KUH7bVdxDasR3V8QasXI
|
||||
var data, _ = EncryptToken(a)
|
||||
|
||||
fmt.Println("---data---", data)
|
||||
}
|
||||
@@ -74,7 +74,6 @@ func (g *GenerateTerminusdServiceEnv) Execute(runtime connector.Runtime) error {
|
||||
"RegistryMirrors": g.KubeConf.Arg.RegistryMirrors,
|
||||
"BaseDir": baseDir,
|
||||
"GpuEnable": utils.FormatBoolToInt(g.KubeConf.Arg.GPU.Enable),
|
||||
"TokenMaxAge": g.KubeConf.Arg.TokenMaxAge,
|
||||
},
|
||||
PrintContent: true,
|
||||
}
|
||||
|
||||
@@ -14,5 +14,4 @@ KUBE_TYPE={{ .KubeType }}
|
||||
REGISTRY_MIRRORS={{ .RegistryMirrors }}
|
||||
BASE_DIR={{ .BaseDir }}
|
||||
LOCAL_GPU_ENABLE={{ .GpuEnable }}
|
||||
TOKEN_MAX_AGE={{ .TokenMaxAge }}
|
||||
`)))
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"github.com/beclab/Olares/cli/pkg/container"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/container"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
@@ -20,11 +21,6 @@ type InstallDriversModule struct {
|
||||
// 1. no card is found (which skips the driver installation)
|
||||
// 2. no driver is found (which skips the container toolkit installation)
|
||||
FailOnNoInstallation bool
|
||||
|
||||
// currently, this is only used to skip the nvidia-smi check after driver upgrade
|
||||
// because the nvidia-smi will not work after upgrade (Failed to initialize NVML: Driver/library version mismatch)
|
||||
// otherwise, always check the driver is running properly after installation to fail early and avoid other issues
|
||||
SkipNVMLCheckAfterInstall bool
|
||||
}
|
||||
|
||||
func (m *InstallDriversModule) IsSkip() bool {
|
||||
@@ -34,14 +30,14 @@ func (m *InstallDriversModule) IsSkip() bool {
|
||||
func (m *InstallDriversModule) Init() {
|
||||
m.Name = "InstallGPUDriver"
|
||||
|
||||
installCudaDeps := &task.RemoteTask{
|
||||
Name: "InstallCudaKeyRing",
|
||||
installCudaDriver := &task.RemoteTask{ // not for WSL
|
||||
Name: "InstallNvidiaDriver",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(CudaNotInstalled),
|
||||
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
|
||||
},
|
||||
Action: &InstallCudaDeps{
|
||||
Action: &InstallCudaDriver{
|
||||
ManifestAction: manifest.ManifestAction{
|
||||
Manifest: m.Manifest,
|
||||
BaseDir: m.BaseDir,
|
||||
@@ -51,20 +47,7 @@ func (m *InstallDriversModule) Init() {
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
installCudaDriver := &task.RemoteTask{ // not for WSL
|
||||
Name: "InstallNvidiaDriver",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(CudaNotInstalled),
|
||||
&NvidiaGraphicsCard{ExitOnNotFound: m.FailOnNoInstallation},
|
||||
},
|
||||
Action: &InstallCudaDriver{SkipNVMLCheckAfterInstall: m.SkipNVMLCheckAfterInstall},
|
||||
Parallel: false,
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
installCudaDeps,
|
||||
installCudaDriver,
|
||||
}
|
||||
}
|
||||
@@ -364,13 +347,20 @@ func (l *UninstallCudaModule) Init() {
|
||||
|
||||
}
|
||||
|
||||
type ExitIfNoDriverUpgradeNeededModule struct {
|
||||
type DisableNouveauModule struct {
|
||||
common.KubeModule
|
||||
}
|
||||
|
||||
func (l *ExitIfNoDriverUpgradeNeededModule) Init() {
|
||||
l.Tasks = append(l.Tasks, &task.LocalTask{
|
||||
Action: new(ExitIfNoDriverUpgradeNeeded),
|
||||
})
|
||||
func (m *DisableNouveauModule) Init() {
|
||||
m.Name = "DisableNouveau"
|
||||
|
||||
writeBlacklist := &task.LocalTask{
|
||||
Name: "WriteNouveauBlacklist",
|
||||
Action: new(WriteNouveauBlacklist),
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
writeBlacklist,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ package gpu
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
|
||||
"github.com/beclab/Olares/cli/pkg/clientset"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -32,35 +34,33 @@ func (p *GPUEnablePrepare) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
|
||||
type CudaInstalled struct {
|
||||
common.KubePrepare
|
||||
precheck.CudaCheckTask
|
||||
FailOnNoInstallation bool
|
||||
}
|
||||
|
||||
func (p *CudaInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
err := p.CudaCheckTask.Execute(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
if err == precheck.ErrCudaInstalled {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if st == nil || !st.Installed {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type CudaNotInstalled struct {
|
||||
common.KubePrepare
|
||||
precheck.CudaCheckTask
|
||||
}
|
||||
|
||||
func (p *CudaNotInstalled) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
err := p.CudaCheckTask.Execute(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
if err == precheck.ErrCudaInstalled {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
if st == nil || !st.Installed {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type K8sNodeInstalled struct {
|
||||
@@ -97,9 +97,6 @@ type NvidiaGraphicsCard struct {
|
||||
}
|
||||
|
||||
func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, err error) {
|
||||
if runtime.RemoteHost().GetOs() == common.Darwin {
|
||||
return false, nil
|
||||
}
|
||||
defer func() {
|
||||
if !p.ExitOnNotFound {
|
||||
return
|
||||
@@ -109,20 +106,15 @@ func (p *NvidiaGraphicsCard) PreCheck(runtime connector.Runtime) (found bool, er
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
output, err := runtime.GetRunner().SudoCmd(
|
||||
"lspci | grep -i -e vga -e 3d | grep -i nvidia", false, false)
|
||||
// an empty grep also results in the exit code to be 1
|
||||
// and thus a non-nil err
|
||||
model, _, err := utils.DetectNvidiaModelAndArch(runtime)
|
||||
if err != nil {
|
||||
logger.Debug("try to find nvidia graphics card error ", err)
|
||||
logger.Debug("ignore card driver installation")
|
||||
logger.Debugf("detect NVIDIA GPU error: %v", err)
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
logger.Info("found nvidia graphics card: ", output)
|
||||
}
|
||||
return output != "", nil
|
||||
logger.Infof("found NVIDIA GPU: %s", model)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type ContainerdInstalled struct {
|
||||
|
||||
@@ -10,11 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/beclab/Olares/cli/apis/kubekey/v1alpha2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
|
||||
"github.com/beclab/Olares/cli/pkg/clientset"
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
@@ -39,10 +36,7 @@ type CheckWslGPU struct {
|
||||
|
||||
func (t *CheckWslGPU) CheckNvidiaSmiFileExists() bool {
|
||||
var nvidiaSmiFile = "/usr/lib/wsl/lib/nvidia-smi"
|
||||
if !util.IsExist(nvidiaSmiFile) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return util.IsExist(nvidiaSmiFile)
|
||||
}
|
||||
|
||||
func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
|
||||
@@ -66,88 +60,41 @@ func (t *CheckWslGPU) Execute(runtime *common.KubeRuntime) {
|
||||
runtime.Arg.SetGPU(true)
|
||||
}
|
||||
|
||||
type InstallCudaDeps struct {
|
||||
type InstallCudaDriver struct {
|
||||
common.KubeAction
|
||||
manifest.ManifestAction
|
||||
}
|
||||
|
||||
func (t *InstallCudaDeps) Execute(runtime connector.Runtime) error {
|
||||
var systemInfo = runtime.GetSystemInfo()
|
||||
var cudaKeyringVersion string
|
||||
var osVersion string
|
||||
switch {
|
||||
case systemInfo.IsUbuntu():
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_0
|
||||
if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu24) || systemInfo.IsUbuntuVersionEqual(connector.Ubuntu25) {
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
|
||||
osVersion = "24.04"
|
||||
} else if systemInfo.IsUbuntuVersionEqual(connector.Ubuntu22) {
|
||||
osVersion = "22.04"
|
||||
} else {
|
||||
osVersion = "20.04"
|
||||
}
|
||||
case systemInfo.IsDebian():
|
||||
cudaKeyringVersion = v1alpha2.CudaKeyringVersion1_1
|
||||
if systemInfo.IsDebianVersionEqual(connector.Debian12) {
|
||||
osVersion = connector.Debian12.String()
|
||||
} else {
|
||||
osVersion = connector.Debian11.String()
|
||||
}
|
||||
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
|
||||
_, _ = runtime.GetRunner().SudoCmd("apt-get update", false, true)
|
||||
// install build deps for dkms
|
||||
if _, err := runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends dkms build-essential linux-headers-$(uname -r)", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install kernel build dependencies for nvidia runfile")
|
||||
}
|
||||
var fileId = fmt.Sprintf("%s-%s_cuda-keyring_%s-1",
|
||||
strings.ToLower(systemInfo.GetOsPlatformFamily()), osVersion, cudaKeyringVersion)
|
||||
|
||||
cudakeyring, err := t.Manifest.Get(fileId)
|
||||
// fetch runfile from manifest
|
||||
item, err := t.Manifest.Get("cuda-driver")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := cudakeyring.FilePath(t.BaseDir)
|
||||
var exists = util.IsExist(path)
|
||||
if !exists {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", cudakeyring.Filename, path)
|
||||
runfile := item.FilePath(t.BaseDir)
|
||||
if !util.IsExist(runfile) {
|
||||
return fmt.Errorf("failed to find %s binary in %s", item.Filename, runfile)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("dpkg -i --force all %s", path), false, true); err != nil {
|
||||
return err
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", runfile), false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to chmod +x runfile")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type InstallCudaDriver struct {
|
||||
common.KubeAction
|
||||
|
||||
SkipNVMLCheckAfterInstall bool
|
||||
}
|
||||
|
||||
func (t *InstallCudaDriver) Execute(runtime connector.Runtime) error {
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get update")
|
||||
}
|
||||
|
||||
if runtime.GetSystemInfo().IsDebian() {
|
||||
_, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-open", false, true)
|
||||
return errors.Wrap(err, "failed to apt-get install nvidia-open")
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y install nvidia-kernel-open-575", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-kernel-open-575")
|
||||
}
|
||||
|
||||
if t.SkipNVMLCheckAfterInstall {
|
||||
return nil
|
||||
// execute runfile with required flags
|
||||
cmd := fmt.Sprintf("sh %s -z --no-x-check --allow-installation-with-running-driver --no-check-for-alternate-installs --dkms --rebuild-initramfs -s", runfile)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install nvidia driver via runfile")
|
||||
}
|
||||
|
||||
// now that the nvidia driver is installed,
|
||||
// the nvidia-smi should work correctly,
|
||||
// if not, a manual reboot is needed by the user
|
||||
_, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check nvidia driver status by executing nvidia-smi: %v", err)
|
||||
}
|
||||
|
||||
if !installed {
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil || st == nil || !st.Installed || st.Mismatch {
|
||||
logger.Error("ERROR: nvidia driver has been installed, but is not running properly, please reboot the machine and try again")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -170,7 +117,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
keyPath := gpgkey.FilePath(t.BaseDir)
|
||||
|
||||
if !util.IsExist(keyPath) {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", gpgkey.Filename, keyPath)
|
||||
return fmt.Errorf("failed to find %s binary in %s", gpgkey.Filename, keyPath)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 /usr/share/keyrings", false, true); err != nil {
|
||||
@@ -190,7 +137,7 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
libPath := libnvidia.FilePath(t.BaseDir)
|
||||
|
||||
if !util.IsExist(libPath) {
|
||||
return fmt.Errorf("Failed to find %s binary in %s", libnvidia.Filename, libPath)
|
||||
return fmt.Errorf("failed to find %s binary in %s", libnvidia.Filename, libPath)
|
||||
}
|
||||
|
||||
// remove any conflicting libnvidia-container.list
|
||||
@@ -209,19 +156,30 @@ func (t *UpdateNvidiaContainerToolkitSource) Execute(runtime connector.Runtime)
|
||||
return err
|
||||
}
|
||||
|
||||
mirrorRepo := os.Getenv(common.ENV_NVIDIA_CONTAINER_REPO_MIRROR)
|
||||
if mirrorRepo == "" {
|
||||
// decide mirror based on OLARES_SYSTEM_CDN_SERVICE
|
||||
var mirrorHost string
|
||||
cdnService := os.Getenv(common.ENV_OLARES_CDN_SERVICE)
|
||||
if cdnService != "" {
|
||||
cdnRaw := cdnService
|
||||
if !strings.HasPrefix(cdnRaw, "http") {
|
||||
cdnRaw = "https://" + cdnRaw
|
||||
}
|
||||
if cdnURL, err := url.Parse(cdnRaw); err == nil {
|
||||
host := cdnURL.Host
|
||||
if host == "" {
|
||||
host = cdnService
|
||||
}
|
||||
if strings.HasSuffix(host, "olares.cn") {
|
||||
mirrorHost = "mirrors.ustc.edu.cn"
|
||||
}
|
||||
} else if strings.HasSuffix(cdnService, "olares.cn") {
|
||||
mirrorHost = "mirrors.ustc.edu.cn"
|
||||
}
|
||||
}
|
||||
if mirrorHost == "" {
|
||||
return nil
|
||||
}
|
||||
mirrorRepoRawURL := mirrorRepo
|
||||
if !strings.HasPrefix(mirrorRepoRawURL, "http") {
|
||||
mirrorRepoRawURL = "https://" + mirrorRepoRawURL
|
||||
}
|
||||
mirrorRepoURL, err := url.Parse(mirrorRepoRawURL)
|
||||
if err != nil || mirrorRepoURL.Host == "" {
|
||||
return fmt.Errorf("invalid mirror for nvidia container: %s", mirrorRepo)
|
||||
}
|
||||
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorRepoURL.Host, dstPath)
|
||||
cmd = fmt.Sprintf("sed -i 's#nvidia.github.io#%s#g' %s", mirrorHost, dstPath)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to switch nvidia container repo to mirror site")
|
||||
}
|
||||
@@ -233,9 +191,21 @@ type InstallNvidiaContainerToolkit struct {
|
||||
}
|
||||
|
||||
func (t *InstallNvidiaContainerToolkit) Execute(runtime connector.Runtime) error {
|
||||
containerdDropInDir := "/etc/containerd/config.d"
|
||||
containerdConfigFile := "/etc/containerd/config.toml"
|
||||
if util.IsExist(containerdDropInDir) {
|
||||
if err := os.RemoveAll(containerdDropInDir); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove containerd drop-in directory")
|
||||
}
|
||||
}
|
||||
if util.IsExist(containerdConfigFile) {
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("sed -i '/^import/d' %s", containerdConfigFile), false, false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove import section from containerd config file")
|
||||
}
|
||||
}
|
||||
logger.Debugf("install nvidia-container-toolkit")
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y nvidia-container-toolkit jq", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get install nvidia-container-toolkit")
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get update && sudo apt-get install -y --allow-downgrades nvidia-container-toolkit=1.17.9-1 nvidia-container-toolkit-base=1.17.9-1 jq", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to apt-get install nvidia-container-toolkit")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -382,7 +352,7 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
|
||||
|
||||
lines := strings.Split(res, "\n")
|
||||
|
||||
if lines == nil || len(lines) == 0 {
|
||||
if len(lines) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, line := range lines {
|
||||
@@ -403,7 +373,6 @@ func (g *GetCudaVersion) Execute(runtime connector.Runtime) error {
|
||||
|
||||
type UpdateNodeLabels struct {
|
||||
common.KubeAction
|
||||
precheck.CudaCheckTask
|
||||
}
|
||||
|
||||
func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
|
||||
@@ -412,32 +381,26 @@ func (u *UpdateNodeLabels) Execute(runtime connector.Runtime) error {
|
||||
return errors.Wrap(errors.WithStack(err), "kubeclient create error")
|
||||
}
|
||||
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !installed {
|
||||
logger.Info("nvidia-smi not exists")
|
||||
if st == nil || !st.Installed {
|
||||
logger.Info("NVIDIA driver is not installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
supported := "false"
|
||||
|
||||
err = u.CudaCheckTask.Execute(runtime)
|
||||
switch {
|
||||
case err == precheck.ErrCudaInstalled:
|
||||
if st.Installed {
|
||||
supported = "true"
|
||||
case err == precheck.ErrUnsupportedCudaVersion:
|
||||
// bypass
|
||||
case err != nil:
|
||||
return err
|
||||
case err == nil:
|
||||
// impossible
|
||||
logger.Warn("check impossible")
|
||||
}
|
||||
|
||||
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &gpuInfo.DriverVersion, &gpuInfo.CudaVersion, &supported)
|
||||
driverVersion := st.DriverVersion
|
||||
if st.Mismatch && st.LibraryVersion != "" {
|
||||
driverVersion = st.LibraryVersion
|
||||
}
|
||||
|
||||
return UpdateNodeGpuLabel(context.Background(), client.Kubernetes(), &driverVersion, &st.CudaVersion, &supported)
|
||||
}
|
||||
|
||||
type RemoveNodeLabels struct {
|
||||
@@ -586,16 +549,44 @@ type UninstallNvidiaDrivers struct {
|
||||
}
|
||||
|
||||
func (t *UninstallNvidiaDrivers) Execute(runtime connector.Runtime) error {
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove nvidia*", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove nvidia*")
|
||||
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
|
||||
_, _ = runtime.GetRunner().SudoCmd("dpkg --configure -a || true", false, true)
|
||||
listCmd := "dpkg -l | awk '/^(ii|i[UuFHWt]|rc|..R)/ {print $2}' | grep nvidia | grep -v container"
|
||||
pkgs, _ := runtime.GetRunner().SudoCmd(listCmd, false, false)
|
||||
pkgs = strings.ReplaceAll(pkgs, "\n", " ")
|
||||
pkgs = strings.TrimSpace(pkgs)
|
||||
if pkgs != "" {
|
||||
removeCmd := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get -y --auto-remove --purge remove %s", pkgs)
|
||||
if _, err := runtime.GetRunner().SudoCmd(removeCmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to remove nvidia packages via apt-get")
|
||||
}
|
||||
_, _ = runtime.GetRunner().SudoCmd("DEBIAN_FRONTEND=noninteractive apt-get -y autoremove --purge", false, true)
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("apt-get -y remove libnvidia*", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to apt-get remove libnvidia*")
|
||||
// also try to uninstall runfile-installed drivers if present
|
||||
if out, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-uninstall && echo yes || true", false, false); strings.TrimSpace(out) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-uninstall -s", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-uninstall")
|
||||
}
|
||||
} else if out2, _ := runtime.GetRunner().SudoCmd("test -x /usr/bin/nvidia-installer && echo yes || true", false, false); strings.TrimSpace(out2) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("/usr/bin/nvidia-installer --uninstall -s", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to uninstall NVIDIA driver via nvidia-installer --uninstall")
|
||||
}
|
||||
}
|
||||
|
||||
// clean up any leftover dkms-installed kernel modules for nvidia if present
|
||||
// only remove .ko files under updates/dkms to avoid removing other modules
|
||||
checkLeftoverCmd := "sh -c 'test -d /lib/modules/$(uname -r)/updates/dkms && find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name \"nvidia*.ko\" -print -quit | grep -q . && echo yes || true'"
|
||||
if out, _ := runtime.GetRunner().SudoCmd(checkLeftoverCmd, false, false); strings.TrimSpace(out) == "yes" {
|
||||
if _, err := runtime.GetRunner().SudoCmd("find /lib/modules/$(uname -r)/updates/dkms -maxdepth 1 -type f -name 'nvidia*.ko' -print -delete", false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "Failed to remove leftover nvidia dkms kernel modules")
|
||||
}
|
||||
// refresh module dependency maps
|
||||
if _, err := runtime.GetRunner().SudoCmd("depmod -a $(uname -r)", false, true); err != nil {
|
||||
logger.Error("Failed to refresh module dependency maps: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("uninstall nvidia drivers success, please reboot the system to take effect if you reinstall the new nvidia drivers")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -604,19 +595,43 @@ type PrintGpuStatus struct {
|
||||
}
|
||||
|
||||
func (t *PrintGpuStatus) Execute(runtime connector.Runtime) error {
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
st, err := utils.GetNvidiaStatus(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !installed {
|
||||
logger.Info("cuda not exists")
|
||||
if st == nil {
|
||||
logger.Info("no NVIDIA GPU status available")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Infof("GPU Driver Version: %s", gpuInfo.DriverVersion)
|
||||
logger.Infof("CUDA Version: %s", gpuInfo.CudaVersion)
|
||||
|
||||
// basic status
|
||||
logger.Infof("Installed: %t", st.Installed)
|
||||
if st.Installed {
|
||||
logger.Infof("Install method: %s", st.InstallMethod)
|
||||
}
|
||||
logger.Infof("Running: %t", st.Running)
|
||||
// running (kernel) driver version
|
||||
if st.Running && strings.TrimSpace(st.DriverVersion) != "" {
|
||||
logger.Infof("Running driver version (kernel): %s", st.DriverVersion)
|
||||
}
|
||||
// userland info from nvidia-smi (when available)
|
||||
if st.Installed {
|
||||
if st.Info != nil && strings.TrimSpace(st.Info.DriverVersion) != "" {
|
||||
logger.Infof("Installed driver version (nvidia-smi): %s", st.Info.DriverVersion)
|
||||
}
|
||||
if strings.TrimSpace(st.CudaVersion) != "" {
|
||||
logger.Infof("CUDA version (nvidia-smi): %s", st.CudaVersion)
|
||||
}
|
||||
if st.Mismatch {
|
||||
if strings.TrimSpace(st.LibraryVersion) != "" {
|
||||
logger.Warnf("Driver/library version mismatch, NVML library version: %s", st.LibraryVersion)
|
||||
} else {
|
||||
logger.Warn("Driver/library version mismatch detected")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !st.Installed && !st.Running {
|
||||
logger.Info("no NVIDIA driver detected (neither installed nor running)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -688,31 +703,39 @@ func (t *RestartPlugin) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExitIfNoDriverUpgradeNeeded struct {
|
||||
type WriteNouveauBlacklist struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *ExitIfNoDriverUpgradeNeeded) Execute(runtime connector.Runtime) error {
|
||||
gpuInfo, installed, err := utils.ExecNvidiaSmi(runtime)
|
||||
if err != nil {
|
||||
logger.Warn("error checking whether the GPU need upgrade:")
|
||||
logger.Warn(err.Error())
|
||||
logger.Warn("assuming an upgrade is needed and continue upgrading")
|
||||
func (t *WriteNouveauBlacklist) Execute(runtime connector.Runtime) error {
|
||||
if !runtime.GetSystemInfo().IsLinux() {
|
||||
return nil
|
||||
}
|
||||
if !installed {
|
||||
logger.Info("GPU driver not installed, will just install it")
|
||||
return nil
|
||||
const dir = "/usr/lib/modprobe.d"
|
||||
const dst = "/usr/lib/modprobe.d/olares-disable-nouveau.conf"
|
||||
const content = "blacklist nouveau\nblacklist lbm-nouveau\nalias nouveau off\nalias lbm-nouveau off\n"
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("install -d -m 0755 "+dir, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to ensure /usr/lib/modprobe.d exists")
|
||||
}
|
||||
installedVersion, err := semver.NewVersion(gpuInfo.CudaVersion)
|
||||
if err != nil {
|
||||
logger.Warn("error parsing the current CUDA version of GPU driver \"%s\": %v", gpuInfo.CudaVersion, err)
|
||||
logger.Warn("assuming an upgrade is needed and continue installing")
|
||||
return nil
|
||||
|
||||
tmpPath := path.Join(runtime.GetBaseDir(), cc.PackageCacheDir, "gpu", "olares-disable-nouveau.conf")
|
||||
if err := os.MkdirAll(path.Dir(tmpPath), 0755); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to create temp dir for nouveau blacklist")
|
||||
}
|
||||
targetVersion, _ := semver.NewVersion(common.CurrentVerifiedCudaVersion)
|
||||
if !targetVersion.GreaterThan(installedVersion) {
|
||||
logger.Info("current GPU driver version is up to date, no need to upgrade")
|
||||
if err := util.WriteFile(tmpPath, []byte(content), 0644); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to write temp nouveau blacklist file")
|
||||
}
|
||||
if err := runtime.GetRunner().SudoScp(tmpPath, dst); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to install nouveau blacklist file")
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("update-initramfs -u", false, false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to update initramfs")
|
||||
}
|
||||
|
||||
if out, _ := runtime.GetRunner().SudoCmd("test -d /sys/module/nouveau && echo loaded || true", false, false); strings.TrimSpace(out) == "loaded" {
|
||||
logger.Infof("the disable file for nouveau kernel module has been written, but the nouveau kernel module is currently loaded. Please REBOOT your machine to make the disabling effective.")
|
||||
os.Exit(0)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -202,12 +202,17 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
|
||||
"runtime-request-timeout": "5m",
|
||||
"image-gc-high-threshold": "91",
|
||||
"image-gc-low-threshold": "90",
|
||||
"housekeeping_interval": "5s",
|
||||
}
|
||||
defaultKubeProxyArgs := map[string]string{
|
||||
"proxy-mode": "ipvs",
|
||||
}
|
||||
|
||||
kubeApiserverArgs, _ := util.GetArgs(map[string]string{}, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
defaultKubeApiServerArgs := map[string]string{
|
||||
"service-node-port-range": "445-32767",
|
||||
}
|
||||
|
||||
kubeApiserverArgs, _ := util.GetArgs(defaultKubeApiServerArgs, g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
kubeControllerManager, _ := util.GetArgs(map[string]string{
|
||||
"terminated-pod-gc-threshold": "1",
|
||||
}, g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
|
||||
|
||||
@@ -162,17 +162,19 @@ var (
|
||||
}
|
||||
|
||||
ApiServerArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"service-node-port-range": "445-32767",
|
||||
}
|
||||
ApiServerSecurityArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"service-node-port-range": "445-32767",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
// --enable-admission-plugins=EventRateLimit must have a configuration file
|
||||
"enable-admission-plugins": "AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity",
|
||||
// "audit-log-path": "/var/log/apiserver/audit.log", // need audit policy
|
||||
|
||||
@@ -3,13 +3,14 @@ package kubesphere
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/beclab/Olares/cli/pkg/storage"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/storage"
|
||||
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/pelletier/go-toml"
|
||||
|
||||
@@ -470,6 +471,7 @@ func (t *InitMinikubeNs) Execute(runtime connector.Runtime) error {
|
||||
common.NamespaceKubekeySystem,
|
||||
common.NamespaceKubesphereSystem,
|
||||
common.NamespaceKubesphereMonitoringSystem,
|
||||
common.NamespaceKubesphereControlsSystem,
|
||||
}
|
||||
|
||||
for _, ns := range allNs {
|
||||
|
||||
@@ -17,12 +17,9 @@
|
||||
package kubesphere
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/core/action"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/version/kubesphere/templates"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
@@ -61,81 +58,19 @@ func (d *DeployModule) Init() {
|
||||
d.Name = "DeployKubeSphereModule"
|
||||
d.Desc = "Deploy KubeSphere"
|
||||
|
||||
generateManifests := &task.RemoteTask{
|
||||
Name: "GenerateKsInstallerCRD",
|
||||
Desc: "Generate KubeSphere ks-installer crd manifests",
|
||||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: &action.Template{
|
||||
Name: "GenerateKsInstallerCRD",
|
||||
Template: templates.KsInstaller,
|
||||
Dst: filepath.Join(common.KubeAddonsDir, templates.KsInstaller.Name()),
|
||||
},
|
||||
Parallel: false,
|
||||
}
|
||||
|
||||
addConfig := &task.RemoteTask{
|
||||
Name: "AddKsInstallerConfig",
|
||||
Desc: "Add config to ks-installer manifests",
|
||||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(AddInstallerConfig),
|
||||
Parallel: false,
|
||||
}
|
||||
|
||||
createNamespace := &task.RemoteTask{
|
||||
Name: "CreateKubeSphereNamespace",
|
||||
Desc: "Create the kubesphere namespace",
|
||||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CreateNamespace),
|
||||
Parallel: false,
|
||||
}
|
||||
|
||||
setup := &task.RemoteTask{
|
||||
Name: "SetupKsInstallerConfig",
|
||||
Desc: "Setup ks-installer config",
|
||||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(Setup), // todo
|
||||
Parallel: false,
|
||||
Retry: 1,
|
||||
}
|
||||
|
||||
apply := &task.RemoteTask{
|
||||
Name: "ApplyKsInstaller",
|
||||
Desc: "Apply ks-installer",
|
||||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(Apply),
|
||||
Parallel: false,
|
||||
Retry: 10,
|
||||
Delay: 5 * time.Second,
|
||||
}
|
||||
|
||||
d.Tasks = []task.Interface{
|
||||
generateManifests,
|
||||
// apply crd installer.kubesphere.io/v1alpha1
|
||||
// apply,
|
||||
addConfig,
|
||||
createNamespace,
|
||||
setup,
|
||||
apply,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,7 +92,6 @@ func (c *CheckResultModule) Init() {
|
||||
Hosts: c.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(Check),
|
||||
Parallel: false,
|
||||
@@ -170,7 +104,6 @@ func (c *CheckResultModule) Init() {
|
||||
Hosts: c.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(GetKubeCommand),
|
||||
Parallel: false,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -11,5 +11,5 @@ data:
|
||||
notification:
|
||||
endpoint: http://notification-manager-svc.kubesphere-monitoring-system.svc:19093
|
||||
terminal:
|
||||
image: alpine:3.14
|
||||
timeout: 600
|
||||
image: beclab/alpine:3.14
|
||||
timeout: 7200
|
||||
|
||||
@@ -1,243 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:kubesphere-router-clusterrole
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: system:kubesphere-router-role
|
||||
namespace: kubesphere-controls-system
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
# Defaults to "<election-id>-<ingress-class>"
|
||||
# Here: "<ingress-controller-leader>-<nginx>"
|
||||
# This has to be adapted if you change either parameter
|
||||
# when launching the nginx-ingress-controller.
|
||||
- "ingress-controller-leader-nginx"
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kubesphere-router-serviceaccount
|
||||
namespace: kubesphere-controls-system
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:nginx-ingress-clusterrole-nisa-binding
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kubesphere-router-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubesphere-router-serviceaccount
|
||||
namespace: kubesphere-controls-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-role-nisa-binding
|
||||
namespace: kubesphere-controls-system
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: system:kubesphere-router-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubesphere-router-serviceaccount
|
||||
namespace: kubesphere-controls-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: kubesphere-controls-system
|
||||
labels:
|
||||
app: kubesphere
|
||||
component: kubesphere-router
|
||||
version: express-1.0.alpha
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kubesphere
|
||||
component: kubesphere-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kubesphere
|
||||
component: kubesphere-router
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: {{ .Values.image.defaultbackend_repo }}:{{ .Values.image.defaultbackend_tag | default "latest" }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: kubesphere-controls-system
|
||||
labels:
|
||||
app: kubesphere
|
||||
component: kubesphere-router
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-router
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: kubesphere
|
||||
component: kubesphere-router
|
||||
|
||||
---
|
||||
# create a seviceaccount for kubectl pod
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kubesphere-cluster-admin
|
||||
namespace: kubesphere-controls-system
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/kubectl
|
||||
---
|
||||
# bind kubesphere-cluster-admin sa to clusterrole cluster-admin
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:kubesphere-cluster-admin
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/kubectl
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubesphere-cluster-admin
|
||||
namespace: kubesphere-controls-system
|
||||
@@ -28,6 +28,7 @@ spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- command:
|
||||
- ks-apiserver
|
||||
|
||||
@@ -1,466 +0,0 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.6.1
|
||||
creationTimestamp: null
|
||||
name: clusterdashboards.monitoring.kubesphere.io
|
||||
spec:
|
||||
group: monitoring.kubesphere.io
|
||||
names:
|
||||
kind: ClusterDashboard
|
||||
listKind: ClusterDashboardList
|
||||
plural: clusterdashboards
|
||||
singular: clusterdashboard
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: ClusterDashboard is the Schema for the culsterdashboards API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DashboardSpec defines the desired state of Dashboard
|
||||
properties:
|
||||
datasource:
|
||||
description: Dashboard datasource
|
||||
type: string
|
||||
description:
|
||||
description: Dashboard description
|
||||
type: string
|
||||
panels:
|
||||
description: Collection of panels. Panel is one of [Row](row.md),
|
||||
[Singlestat](#singlestat.md) or [Graph](graph.md)
|
||||
items:
|
||||
description: Supported panel
|
||||
properties:
|
||||
bars:
|
||||
description: A collection of queries Targets []Target `json:"targets,omitempty"`
|
||||
Display as a bar chart
|
||||
type: boolean
|
||||
colors:
|
||||
description: Set series color
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
decimals:
|
||||
description: Name of the signlestat panel Title string `json:"title,omitempty"`
|
||||
Must be `singlestat` Type string `json:"type"` Panel ID Id
|
||||
int64 `json:"id,omitempty"` A collection of queries Targets
|
||||
[]Target `json:"targets,omitempty"` Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
description:
|
||||
description: Name of the graph panel Title string `json:"title,omitempty"`
|
||||
Must be `graph` Type string `json:"type"` Panel ID Id int64
|
||||
`json:"id,omitempty"` Panel description
|
||||
type: string
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
id:
|
||||
description: Panel ID
|
||||
format: int64
|
||||
type: integer
|
||||
lines:
|
||||
description: Display as a line chart
|
||||
type: boolean
|
||||
stack:
|
||||
description: Display as a stacked chart
|
||||
type: boolean
|
||||
targets:
|
||||
description: A collection of queries Only for panels with `graph`
|
||||
or `singlestat` type
|
||||
items:
|
||||
description: Query editor options
|
||||
properties:
|
||||
expr:
|
||||
description: Input for fetching metrics.
|
||||
type: string
|
||||
legendFormat:
|
||||
description: Legend format for outputs. You can make a
|
||||
dynamic legend with templating variables.
|
||||
type: string
|
||||
refId:
|
||||
description: Reference ID
|
||||
format: int64
|
||||
type: integer
|
||||
step:
|
||||
description: Set series time interval
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
title:
|
||||
description: Name of the panel
|
||||
type: string
|
||||
type:
|
||||
description: Panel Type, one of `row`, `graph`, `singlestat`
|
||||
type: string
|
||||
yaxes:
|
||||
description: Y-axis options
|
||||
items:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
templating:
|
||||
description: Templating variables
|
||||
items:
|
||||
description: Templating defines a variable, which can be used as
|
||||
a placeholder in query
|
||||
properties:
|
||||
name:
|
||||
description: Variable name
|
||||
type: string
|
||||
query:
|
||||
description: Set variable values to be the return result of
|
||||
the query
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
time:
|
||||
description: Time range for display
|
||||
properties:
|
||||
from:
|
||||
description: Start time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the end time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
to:
|
||||
description: End time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the start time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
type: object
|
||||
title:
|
||||
description: Dashboard title
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: ClusterDashboard is the Schema for the culsterdashboards API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DashboardSpec defines the desired state of Dashboard
|
||||
properties:
|
||||
annotations:
|
||||
description: Annotations
|
||||
items:
|
||||
properties:
|
||||
datasource:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
expr:
|
||||
type: string
|
||||
iconColor:
|
||||
type: string
|
||||
iconSize:
|
||||
type: integer
|
||||
lineColor:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
query:
|
||||
type: string
|
||||
showLine:
|
||||
type: boolean
|
||||
step:
|
||||
type: string
|
||||
tagKeys:
|
||||
type: string
|
||||
tags:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tagsField:
|
||||
type: string
|
||||
textField:
|
||||
type: string
|
||||
textFormat:
|
||||
type: string
|
||||
titleFormat:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
auto_refresh:
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
editable:
|
||||
type: boolean
|
||||
id:
|
||||
type: integer
|
||||
panels:
|
||||
items:
|
||||
properties:
|
||||
bars:
|
||||
description: Display as a bar chart
|
||||
type: boolean
|
||||
colors:
|
||||
description: Set series color
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
content:
|
||||
type: string
|
||||
datasource:
|
||||
description: Datasource
|
||||
type: string
|
||||
decimals:
|
||||
format: int64
|
||||
type: integer
|
||||
description:
|
||||
description: Description
|
||||
type: string
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
gauge:
|
||||
description: gauge
|
||||
properties:
|
||||
maxValue:
|
||||
format: int64
|
||||
type: integer
|
||||
minValue:
|
||||
format: int64
|
||||
type: integer
|
||||
show:
|
||||
type: boolean
|
||||
thresholdLabels:
|
||||
type: boolean
|
||||
thresholdMarkers:
|
||||
type: boolean
|
||||
type: object
|
||||
height:
|
||||
description: Height
|
||||
type: string
|
||||
id:
|
||||
description: Panel ID
|
||||
format: int64
|
||||
type: integer
|
||||
legend:
|
||||
description: legend
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
lines:
|
||||
description: Display as a line chart
|
||||
type: boolean
|
||||
mode:
|
||||
type: string
|
||||
options:
|
||||
properties:
|
||||
colorMode:
|
||||
type: string
|
||||
content:
|
||||
type: string
|
||||
displayMode:
|
||||
type: string
|
||||
graphMode:
|
||||
type: string
|
||||
justifyMode:
|
||||
type: string
|
||||
mode:
|
||||
type: string
|
||||
orientation:
|
||||
type: string
|
||||
textMode:
|
||||
type: string
|
||||
type: object
|
||||
scroll:
|
||||
type: boolean
|
||||
sort:
|
||||
properties:
|
||||
col:
|
||||
type: integer
|
||||
desc:
|
||||
type: boolean
|
||||
type: object
|
||||
sparkline:
|
||||
description: 'spark line: full or bottom'
|
||||
type: string
|
||||
stack:
|
||||
description: Display as a stacked chart
|
||||
type: boolean
|
||||
targets:
|
||||
description: A collection of queries
|
||||
items:
|
||||
description: Query editor options Referers to https://pkg.go.dev/github.com/grafana-tools/sdk#Target
|
||||
properties:
|
||||
expr:
|
||||
description: 'only support prometheus,and the corresponding
|
||||
fields are as follows: Input for fetching metrics.'
|
||||
type: string
|
||||
legendFormat:
|
||||
description: Legend format for outputs. You can make a
|
||||
dynamic legend with templating variables.
|
||||
type: string
|
||||
refId:
|
||||
description: Reference ID
|
||||
format: int64
|
||||
type: integer
|
||||
step:
|
||||
description: Set series time interval
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
title:
|
||||
description: Name of the panel
|
||||
type: string
|
||||
type:
|
||||
description: Type of the panel
|
||||
type: string
|
||||
valueName:
|
||||
description: value name
|
||||
type: string
|
||||
xaxis:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
yaxes:
|
||||
description: Y-axis options
|
||||
items:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
shared_crosshair:
|
||||
type: boolean
|
||||
tags:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
templatings:
|
||||
description: // Templating variables
|
||||
items:
|
||||
properties:
|
||||
allFormat:
|
||||
type: string
|
||||
allValue:
|
||||
type: string
|
||||
auto:
|
||||
type: boolean
|
||||
auto_count:
|
||||
type: integer
|
||||
datasource:
|
||||
type: string
|
||||
hide:
|
||||
type: integer
|
||||
includeAll:
|
||||
type: boolean
|
||||
label:
|
||||
type: string
|
||||
multi:
|
||||
type: boolean
|
||||
multiFormat:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
options:
|
||||
items:
|
||||
properties:
|
||||
selected:
|
||||
type: boolean
|
||||
text:
|
||||
type: string
|
||||
value:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
query:
|
||||
type: string
|
||||
regex:
|
||||
type: string
|
||||
sort:
|
||||
type: integer
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
time:
|
||||
description: Time range
|
||||
properties:
|
||||
from:
|
||||
description: Start time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the end time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
to:
|
||||
description: End time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the start time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
type: object
|
||||
timezone:
|
||||
type: string
|
||||
title:
|
||||
type: string
|
||||
uid:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -1,470 +0,0 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.6.1
|
||||
creationTimestamp: null
|
||||
name: dashboards.monitoring.kubesphere.io
|
||||
spec:
|
||||
group: monitoring.kubesphere.io
|
||||
names:
|
||||
kind: Dashboard
|
||||
listKind: DashboardList
|
||||
plural: dashboards
|
||||
singular: dashboard
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Dashboard is the Schema for the dashboards API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DashboardSpec defines the desired state of Dashboard
|
||||
properties:
|
||||
datasource:
|
||||
description: Dashboard datasource
|
||||
type: string
|
||||
description:
|
||||
description: Dashboard description
|
||||
type: string
|
||||
panels:
|
||||
description: Collection of panels. Panel is one of [Row](row.md),
|
||||
[Singlestat](#singlestat.md) or [Graph](graph.md)
|
||||
items:
|
||||
description: Supported panel
|
||||
properties:
|
||||
bars:
|
||||
description: A collection of queries Targets []Target `json:"targets,omitempty"`
|
||||
Display as a bar chart
|
||||
type: boolean
|
||||
colors:
|
||||
description: Set series color
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
decimals:
|
||||
description: Name of the signlestat panel Title string `json:"title,omitempty"`
|
||||
Must be `singlestat` Type string `json:"type"` Panel ID Id
|
||||
int64 `json:"id,omitempty"` A collection of queries Targets
|
||||
[]Target `json:"targets,omitempty"` Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
description:
|
||||
description: Name of the graph panel Title string `json:"title,omitempty"`
|
||||
Must be `graph` Type string `json:"type"` Panel ID Id int64
|
||||
`json:"id,omitempty"` Panel description
|
||||
type: string
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
id:
|
||||
description: Panel ID
|
||||
format: int64
|
||||
type: integer
|
||||
lines:
|
||||
description: Display as a line chart
|
||||
type: boolean
|
||||
stack:
|
||||
description: Display as a stacked chart
|
||||
type: boolean
|
||||
targets:
|
||||
description: A collection of queries Only for panels with `graph`
|
||||
or `singlestat` type
|
||||
items:
|
||||
description: Query editor options
|
||||
properties:
|
||||
expr:
|
||||
description: Input for fetching metrics.
|
||||
type: string
|
||||
legendFormat:
|
||||
description: Legend format for outputs. You can make a
|
||||
dynamic legend with templating variables.
|
||||
type: string
|
||||
refId:
|
||||
description: Reference ID
|
||||
format: int64
|
||||
type: integer
|
||||
step:
|
||||
description: Set series time interval
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
title:
|
||||
description: Name of the panel
|
||||
type: string
|
||||
type:
|
||||
description: Panel Type, one of `row`, `graph`, `singlestat`
|
||||
type: string
|
||||
yaxes:
|
||||
description: Y-axis options
|
||||
items:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
templating:
|
||||
description: Templating variables
|
||||
items:
|
||||
description: Templating defines a variable, which can be used as
|
||||
a placeholder in query
|
||||
properties:
|
||||
name:
|
||||
description: Variable name
|
||||
type: string
|
||||
query:
|
||||
description: Set variable values to be the return result of
|
||||
the query
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
time:
|
||||
description: Time range for display
|
||||
properties:
|
||||
from:
|
||||
description: Start time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the end time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
to:
|
||||
description: End time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the start time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
type: object
|
||||
title:
|
||||
description: Dashboard title
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
subresources:
|
||||
status: {}
|
||||
- name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Dashboard is the Schema for the dashboards API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DashboardSpec defines the desired state of Dashboard
|
||||
properties:
|
||||
annotations:
|
||||
description: Annotations
|
||||
items:
|
||||
properties:
|
||||
datasource:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
expr:
|
||||
type: string
|
||||
iconColor:
|
||||
type: string
|
||||
iconSize:
|
||||
type: integer
|
||||
lineColor:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
query:
|
||||
type: string
|
||||
showLine:
|
||||
type: boolean
|
||||
step:
|
||||
type: string
|
||||
tagKeys:
|
||||
type: string
|
||||
tags:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tagsField:
|
||||
type: string
|
||||
textField:
|
||||
type: string
|
||||
textFormat:
|
||||
type: string
|
||||
titleFormat:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
auto_refresh:
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
editable:
|
||||
type: boolean
|
||||
id:
|
||||
type: integer
|
||||
panels:
|
||||
items:
|
||||
properties:
|
||||
bars:
|
||||
description: Display as a bar chart
|
||||
type: boolean
|
||||
colors:
|
||||
description: Set series color
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
content:
|
||||
type: string
|
||||
datasource:
|
||||
description: Datasource
|
||||
type: string
|
||||
decimals:
|
||||
format: int64
|
||||
type: integer
|
||||
description:
|
||||
description: Description
|
||||
type: string
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
gauge:
|
||||
description: gauge
|
||||
properties:
|
||||
maxValue:
|
||||
format: int64
|
||||
type: integer
|
||||
minValue:
|
||||
format: int64
|
||||
type: integer
|
||||
show:
|
||||
type: boolean
|
||||
thresholdLabels:
|
||||
type: boolean
|
||||
thresholdMarkers:
|
||||
type: boolean
|
||||
type: object
|
||||
height:
|
||||
description: Height
|
||||
type: string
|
||||
id:
|
||||
description: Panel ID
|
||||
format: int64
|
||||
type: integer
|
||||
legend:
|
||||
description: legend
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
lines:
|
||||
description: Display as a line chart
|
||||
type: boolean
|
||||
mode:
|
||||
type: string
|
||||
options:
|
||||
properties:
|
||||
colorMode:
|
||||
type: string
|
||||
content:
|
||||
type: string
|
||||
displayMode:
|
||||
type: string
|
||||
graphMode:
|
||||
type: string
|
||||
justifyMode:
|
||||
type: string
|
||||
mode:
|
||||
type: string
|
||||
orientation:
|
||||
type: string
|
||||
textMode:
|
||||
type: string
|
||||
type: object
|
||||
scroll:
|
||||
type: boolean
|
||||
sort:
|
||||
properties:
|
||||
col:
|
||||
type: integer
|
||||
desc:
|
||||
type: boolean
|
||||
type: object
|
||||
sparkline:
|
||||
description: 'spark line: full or bottom'
|
||||
type: string
|
||||
stack:
|
||||
description: Display as a stacked chart
|
||||
type: boolean
|
||||
targets:
|
||||
description: A collection of queries
|
||||
items:
|
||||
description: Query editor options Referers to https://pkg.go.dev/github.com/grafana-tools/sdk#Target
|
||||
properties:
|
||||
expr:
|
||||
description: 'only support prometheus,and the corresponding
|
||||
fields are as follows: Input for fetching metrics.'
|
||||
type: string
|
||||
legendFormat:
|
||||
description: Legend format for outputs. You can make a
|
||||
dynamic legend with templating variables.
|
||||
type: string
|
||||
refId:
|
||||
description: Reference ID
|
||||
format: int64
|
||||
type: integer
|
||||
step:
|
||||
description: Set series time interval
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
title:
|
||||
description: Name of the panel
|
||||
type: string
|
||||
type:
|
||||
description: Type of the panel
|
||||
type: string
|
||||
valueName:
|
||||
description: value name
|
||||
type: string
|
||||
xaxis:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
yaxes:
|
||||
description: Y-axis options
|
||||
items:
|
||||
properties:
|
||||
decimals:
|
||||
description: Limit the decimal numbers
|
||||
format: int64
|
||||
type: integer
|
||||
format:
|
||||
description: Display unit
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
shared_crosshair:
|
||||
type: boolean
|
||||
tags:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
templatings:
|
||||
description: // Templating variables
|
||||
items:
|
||||
properties:
|
||||
allFormat:
|
||||
type: string
|
||||
allValue:
|
||||
type: string
|
||||
auto:
|
||||
type: boolean
|
||||
auto_count:
|
||||
type: integer
|
||||
datasource:
|
||||
type: string
|
||||
hide:
|
||||
type: integer
|
||||
includeAll:
|
||||
type: boolean
|
||||
label:
|
||||
type: string
|
||||
multi:
|
||||
type: boolean
|
||||
multiFormat:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
options:
|
||||
items:
|
||||
properties:
|
||||
selected:
|
||||
type: boolean
|
||||
text:
|
||||
type: string
|
||||
value:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
query:
|
||||
type: string
|
||||
regex:
|
||||
type: string
|
||||
sort:
|
||||
type: integer
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
time:
|
||||
description: Time range
|
||||
properties:
|
||||
from:
|
||||
description: Start time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the end time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
to:
|
||||
description: End time in the format of `^now([+-][0-9]+[smhdwMy])?$`,
|
||||
eg. `now-1M`. It denotes the start time is set to the last month
|
||||
since now.
|
||||
type: string
|
||||
type: object
|
||||
timezone:
|
||||
type: string
|
||||
title:
|
||||
type: string
|
||||
uid:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -35,6 +35,7 @@ spec:
|
||||
hostPath:
|
||||
path: /etc/localtime
|
||||
type: ""
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- args:
|
||||
- --host=127.0.0.1
|
||||
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
insecureSkipVerify: true
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 1m
|
||||
interval: 10s
|
||||
metricRelabelings:
|
||||
- action: keep
|
||||
regex: container_cpu_usage_seconds_total|container_memory_usage_bytes|container_memory_cache|container_network_.+_bytes_total|container_memory_working_set_bytes|container_cpu_cfs_.*periods_total|container_processes.*|container_threads.*
|
||||
|
||||
@@ -31,6 +31,7 @@ spec:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/edge
|
||||
operator: DoesNotExist
|
||||
priorityClassName: "system-cluster-critical"
|
||||
containers:
|
||||
- args:
|
||||
- --web.listen-address=127.0.0.1:9100
|
||||
@@ -42,7 +43,7 @@ spec:
|
||||
- --collector.netdev.address-info
|
||||
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
|
||||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
|
||||
image: beclab/node-exporter:0.0.4
|
||||
image: beclab/node-exporter:0.0.5
|
||||
name: node-exporter
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -10,6 +10,7 @@ metadata:
|
||||
name: k8s
|
||||
namespace: kubesphere-monitoring-system
|
||||
spec:
|
||||
priorityClassName: "system-cluster-critical"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
|
||||
41
cli/pkg/kubesphere/plugins/ks_config.go
Normal file
41
cli/pkg/kubesphere/plugins/ks_config.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/logger"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
type ApplyKsConfigManifests struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *ApplyKsConfigManifests) Execute(runtime connector.Runtime) error {
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var appKsConfigName = common.ChartNameKsConfig
|
||||
var appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, appKsConfigName)
|
||||
|
||||
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceKubesphereSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var values = make(map[string]interface{})
|
||||
if err := utils.UpgradeCharts(context.Background(), actionConfig, settings, appKsConfigName,
|
||||
appPath, "", common.NamespaceKubesphereSystem, values, false); err != nil {
|
||||
logger.Errorf("failed to install %s chart: %v", appKsConfigName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -21,24 +21,6 @@ type CreateKsCore struct {
|
||||
}
|
||||
|
||||
func (t *CreateKsCore) Execute(runtime connector.Runtime) error {
|
||||
//var kubectlpath, err = util.GetCommand(common.CommandKubectl)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("kubectl not found")
|
||||
//}
|
||||
|
||||
//var cmd = fmt.Sprintf("%s get pod -n %s -l 'app=redis,tier=database,version=redis-4.0' -o jsonpath='{.items[0].status.phase}'", kubectlpath,
|
||||
// common.NamespaceKubesphereSystem)
|
||||
//rphase, err := runtime.GetRunner().Host.SudoCmd(cmd, false, false)
|
||||
//if rphase != "Running" {
|
||||
// return fmt.Errorf("Redis State %s", rphase)
|
||||
//}
|
||||
|
||||
masterNumIf, ok := t.PipelineCache.Get(common.CacheMasterNum)
|
||||
if !ok || masterNumIf == nil {
|
||||
return fmt.Errorf("failed to get master num")
|
||||
}
|
||||
masterNum := masterNumIf.(int64)
|
||||
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -55,7 +37,7 @@ func (t *CreateKsCore) Execute(runtime connector.Runtime) error {
|
||||
var values = make(map[string]interface{})
|
||||
values["Release"] = map[string]string{
|
||||
"Namespace": common.NamespaceKubesphereSystem,
|
||||
"ReplicaCount": fmt.Sprintf("%d", masterNum),
|
||||
"ReplicaCount": fmt.Sprintf("%d", 1),
|
||||
}
|
||||
if err := utils.UpgradeCharts(context.Background(), actionConfig, settings, appKsCoreName,
|
||||
appPath, "", common.NamespaceKubesphereSystem, values, false); err != nil {
|
||||
@@ -78,7 +60,6 @@ func (m *DeployKsCoreModule) Init() {
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CreateKsCore),
|
||||
Parallel: false,
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
@@ -21,129 +20,6 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
var kscorecrds = []map[string]string{
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "serviceaccounts",
|
||||
"resource": "kubesphere-cluster-admin",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "serviceaccounts",
|
||||
"resource": "kubesphere-router-serviceaccount",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "role",
|
||||
"resource": "system:kubesphere-router-role",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "rolebinding",
|
||||
"resource": "nginx-ingress-role-nisa-binding",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "deployment",
|
||||
"resource": "default-http-backend",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-controls-system",
|
||||
"kind": "service",
|
||||
"resource": "default-http-backend",
|
||||
"release": "ks-core",
|
||||
},
|
||||
//{
|
||||
// "ns": "kubesphere-system",
|
||||
// "kind": "secrets",
|
||||
// "resource": "ks-controller-manager-webhook-cert",
|
||||
// "release": "ks-core",
|
||||
//},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "serviceaccounts",
|
||||
"resource": "kubesphere",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "clusterroles",
|
||||
"resource": "system:kubesphere-router-clusterrole",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "clusterrolebindings",
|
||||
"resource": "system:nginx-ingress-clusterrole-nisa-binding",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "clusterrolebindings",
|
||||
"resource": "system:kubesphere-cluster-admin",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "clusterrolebindings",
|
||||
"resource": "kubesphere",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "services",
|
||||
"resource": "ks-apiserver",
|
||||
"release": "ks-core",
|
||||
},
|
||||
//{
|
||||
// "ns": "kubesphere-system",
|
||||
// "kind": "services",
|
||||
// "resource": "ks-controller-manager",
|
||||
// "release": "ks-core",
|
||||
//},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "deployments",
|
||||
"resource": "ks-apiserver",
|
||||
"release": "ks-core",
|
||||
},
|
||||
//{
|
||||
// "ns": "kubesphere-system",
|
||||
// "kind": "deployments",
|
||||
// "resource": "ks-controller-manager",
|
||||
// "release": "ks-core",
|
||||
//},
|
||||
//{
|
||||
// "ns": "kubesphere-system",
|
||||
// "kind": "validatingwebhookconfigurations",
|
||||
// "resource": "users.iam.kubesphere.io",
|
||||
// "release": "ks-core",
|
||||
//},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "validatingwebhookconfigurations",
|
||||
"resource": "resourcesquotas.quota.kubesphere.io",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "validatingwebhookconfigurations",
|
||||
"resource": "network.kubesphere.io",
|
||||
"release": "ks-core",
|
||||
},
|
||||
{
|
||||
"ns": "kubesphere-system",
|
||||
"kind": "users.iam.kubesphere.io",
|
||||
"resource": "admin",
|
||||
"release": "ks-core",
|
||||
},
|
||||
}
|
||||
|
||||
type CreateKsRole struct {
|
||||
common.KubeAction
|
||||
}
|
||||
@@ -167,42 +43,11 @@ func (t *CreateKsRole) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type PatchKsCoreStatus struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *PatchKsCoreStatus) Execute(runtime connector.Runtime) error {
|
||||
//var kubectlpath, _ = t.PipelineCache.GetMustString(common.CacheCommandKubectlPath)
|
||||
//if kubectlpath == "" {
|
||||
// kubectlpath = path.Join(common.BinDir, common.CommandKubectl)
|
||||
//}
|
||||
//
|
||||
//var jsonPath = fmt.Sprintf(`{\"status\": {\"core\": {\"status\": \"enabled\", \"enabledTime\": \"%s\"}}}`, time.Now().Format("2006-01-02T15:04:05Z"))
|
||||
//var cmd = fmt.Sprintf("%s patch cc ks-installer --type merge -p '%s' -n %s", kubectlpath, jsonPath, common.NamespaceKubesphereSystem)
|
||||
//
|
||||
//_, err := runtime.GetRunner().Host.SudoCmd(cmd, false, true)
|
||||
//if err != nil {
|
||||
// return errors.Wrap(errors.WithStack(err), "patch ks-core status failed")
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateKsCoreConfig struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *CreateKsCoreConfig) Execute(runtime connector.Runtime) error {
|
||||
jwtSecretIf, ok := t.PipelineCache.Get(common.CacheJwtSecret)
|
||||
if !ok || jwtSecretIf == nil {
|
||||
return fmt.Errorf("failed to get jwt secret")
|
||||
}
|
||||
|
||||
kubeVersionIf, ok := t.PipelineCache.Get(common.CacheKubeletVersion)
|
||||
if !ok || kubeVersionIf == nil {
|
||||
return fmt.Errorf("failed to get kubelet version")
|
||||
}
|
||||
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -230,13 +75,8 @@ func (t *CreateKsCoreConfig) Execute(runtime connector.Runtime) error {
|
||||
// create ks-config
|
||||
var appKsConfigName = common.ChartNameKsConfig
|
||||
appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, appKsConfigName)
|
||||
values = make(map[string]interface{})
|
||||
values["Release"] = map[string]interface{}{
|
||||
"JwtSecret": jwtSecretIf.(string),
|
||||
"TokenMaxAge": t.KubeConf.Arg.TokenMaxAge * int64(time.Second),
|
||||
}
|
||||
if err := utils.UpgradeCharts(context.Background(), actionConfig, settings, appKsConfigName,
|
||||
appPath, "", common.NamespaceKubesphereSystem, values, false); err != nil {
|
||||
appPath, "", common.NamespaceKubesphereSystem, nil, false); err != nil {
|
||||
logger.Errorf("failed to install %s chart: %v", appKsConfigName, err)
|
||||
return err
|
||||
}
|
||||
@@ -273,82 +113,6 @@ func (t *CreateKsCoreConfigManifests) Execute(runtime connector.Runtime) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type PacthKsCore struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *PacthKsCore) Execute(runtime connector.Runtime) error {
|
||||
var secretsNum int64
|
||||
var crdNum int64
|
||||
var secretsNumIf, ok = t.PipelineCache.Get(common.CacheSecretsNum)
|
||||
if ok && secretsNumIf != nil {
|
||||
secretsNum = secretsNumIf.(int64)
|
||||
}
|
||||
|
||||
crdNumIf, ok := t.PipelineCache.Get(common.CacheCrdsNUm)
|
||||
if ok && crdNumIf != nil {
|
||||
crdNum = crdNumIf.(int64)
|
||||
}
|
||||
|
||||
var kubectlpath, err = util.GetCommand(common.CommandKubectl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kubectl not found")
|
||||
}
|
||||
|
||||
if secretsNum == 0 && crdNum != 0 {
|
||||
for _, item := range kscorecrds {
|
||||
var cmd = fmt.Sprintf("%s -n %s annotate --overwrite %s %s meta.helm.sh/release-name=%s && %s -n %s annotate --overwrite %s %s meta.helm.sh/release-namespace=%s && %s -n %s label --overwrite %s %s app.kubernetes.io/managed-by=Helm",
|
||||
kubectlpath, item["ns"], item["kind"], item["resource"], item["release"],
|
||||
kubectlpath, item["ns"], item["kind"], item["resource"], common.NamespaceKubesphereSystem,
|
||||
kubectlpath, item["ns"], item["kind"], item["resource"])
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "patch ks-core crd")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type CheckKsCoreExist struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *CheckKsCoreExist) Execute(runtime connector.Runtime) error {
|
||||
var kubectlpath, err = util.GetCommand(common.CommandKubectl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kubectl not found")
|
||||
}
|
||||
|
||||
var cmd string
|
||||
|
||||
cmd = fmt.Sprintf("%s -n %s get secrets --field-selector=type=helm.sh/release.v1 | grep ks-core |wc -l",
|
||||
kubectlpath,
|
||||
common.NamespaceKubesphereSystem)
|
||||
stdout, _ := runtime.GetRunner().SudoCmd(cmd, false, false)
|
||||
|
||||
secretNum, err := strconv.ParseInt(stdout, 10, 64)
|
||||
if err != nil {
|
||||
secretNum = 0
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("%s get crd users.iam.kubesphere.io | grep 'users.iam.kubesphere.io' |wc -l", kubectlpath)
|
||||
stdout, _ = runtime.GetRunner().SudoCmd(cmd, false, false)
|
||||
|
||||
usersCrdNum, err := strconv.ParseInt(stdout, 10, 64)
|
||||
if err != nil {
|
||||
usersCrdNum = 0
|
||||
}
|
||||
|
||||
logger.Debugf("secretNum: %d, usersCrdNum: %d", secretNum, usersCrdNum)
|
||||
|
||||
t.ModuleCache.Set(common.CacheSecretsNum, secretNum)
|
||||
t.ModuleCache.Set(common.CacheCrdsNUm, usersCrdNum)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeployKsCoreConfigModule struct {
|
||||
common.KubeModule
|
||||
}
|
||||
@@ -356,37 +120,11 @@ type DeployKsCoreConfigModule struct {
|
||||
func (m *DeployKsCoreConfigModule) Init() {
|
||||
m.Name = "DeployKsCoreConfig"
|
||||
|
||||
checkKsCoreExist := &task.RemoteTask{
|
||||
Name: "CheckKsCoreExist",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
new(common.GetMasterNum),
|
||||
},
|
||||
Action: new(CheckKsCoreExist),
|
||||
Parallel: false,
|
||||
Retry: 0,
|
||||
}
|
||||
|
||||
pacthKsCore := &task.RemoteTask{
|
||||
Name: "PacthKsCore",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(PacthKsCore),
|
||||
Parallel: false,
|
||||
Retry: 0,
|
||||
}
|
||||
|
||||
createKsCoreConfigManifests := &task.RemoteTask{
|
||||
Name: "CreateKsCoreConfigManifests",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CreateKsCoreConfigManifests),
|
||||
Parallel: false,
|
||||
@@ -399,31 +137,17 @@ func (m *DeployKsCoreConfigModule) Init() {
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CreateKsCoreConfig),
|
||||
Parallel: true,
|
||||
Retry: 0,
|
||||
}
|
||||
|
||||
patchKsCoreStatus := &task.RemoteTask{
|
||||
Name: "PatchKsCoreStatus",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(PatchKsCoreStatus),
|
||||
Parallel: true,
|
||||
Retry: 0,
|
||||
}
|
||||
|
||||
createKsRole := &task.RemoteTask{
|
||||
Name: "CreateKsRole",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CreateKsRole),
|
||||
Parallel: true,
|
||||
@@ -431,11 +155,8 @@ func (m *DeployKsCoreConfigModule) Init() {
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
checkKsCoreExist,
|
||||
pacthKsCore,
|
||||
createKsCoreConfigManifests,
|
||||
createKsCoreConfig,
|
||||
patchKsCoreStatus,
|
||||
createKsRole,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
package plugins
|
||||
|
||||
// ! Y ./ks-monitor/files/federated --> notification-manager
|
||||
// ~ N ./ks-monitor/files/gpu-monitoring
|
||||
// ~ Y ./ks-monitor/files/ks-istio-monitoring
|
||||
// ! Y ./ks-monitor/files/monitoring-dashboard
|
||||
// ! Y ./ks-monitor/files/notification-manager
|
||||
// ! Y ./ks-monitor/files/prometheus/alertmanager
|
||||
// ~ N ./ks-monitor/files/prometheus/etcd
|
||||
// ~ N ./ks-monitor/files/prometheus/grafana
|
||||
// ~ N ./ks-monitor/files/prometheus/kube-prometheus
|
||||
// ! Y ./ks-monitor/files/prometheus/kube-state-metrics
|
||||
// ! Y ./ks-monitor/files/prometheus/kubernetes
|
||||
// ! Y ./ks-monitor/files/prometheus/node-exporter
|
||||
// ! Y ./ks-monitor/files/prometheus/prometheus
|
||||
// ! Y ./ks-monitor/files/prometheus/prometheus-operator
|
||||
// ~ N ./ks-monitor/files/prometheus/thanos-ruler
|
||||
@@ -1,10 +1,7 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
)
|
||||
|
||||
@@ -24,40 +21,3 @@ func (t *CopyEmbed) Init() {
|
||||
copyEmbed,
|
||||
}
|
||||
}
|
||||
|
||||
type DeployKsPluginsModule struct {
|
||||
common.KubeModule
|
||||
}
|
||||
|
||||
func (t *DeployKsPluginsModule) Init() {
|
||||
t.Name = "DeployKsPlugins"
|
||||
|
||||
checkNodeState := &task.RemoteTask{
|
||||
Name: "CheckNodeState",
|
||||
Hosts: t.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(CheckNodeState),
|
||||
Parallel: false,
|
||||
Retry: 20,
|
||||
Delay: 10 * time.Second,
|
||||
}
|
||||
|
||||
initNs := &task.RemoteTask{
|
||||
Name: "InitKsNamespace",
|
||||
Hosts: t.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(InitNamespace),
|
||||
Parallel: false,
|
||||
}
|
||||
|
||||
t.Tasks = []task.Interface{
|
||||
checkNodeState,
|
||||
initNs,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
cc "github.com/beclab/Olares/cli/pkg/core/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/core/prepare"
|
||||
"github.com/beclab/Olares/cli/pkg/core/task"
|
||||
"github.com/beclab/Olares/cli/pkg/core/util"
|
||||
)
|
||||
|
||||
type InstallMonitorDashboardCrd struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (t *InstallMonitorDashboardCrd) Execute(runtime connector.Runtime) error {
|
||||
var kubectlpath, err = util.GetCommand(common.CommandKubectl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kubectl not found")
|
||||
}
|
||||
|
||||
var p = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "ks-monitor", "monitoring-dashboard")
|
||||
var cmd = fmt.Sprintf("%s apply -f %s", kubectlpath, p)
|
||||
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateMonitorDashboardModule struct {
|
||||
common.KubeModule
|
||||
}
|
||||
|
||||
func (m *CreateMonitorDashboardModule) Init() {
|
||||
m.Name = "CreateMonitorDashboardModule"
|
||||
|
||||
installMonitorDashboardCrd := &task.RemoteTask{
|
||||
Name: "InstallMonitorDashboardCrd",
|
||||
Hosts: m.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.OnlyFirstMaster),
|
||||
new(NotEqualDesiredVersion),
|
||||
},
|
||||
Action: new(InstallMonitorDashboardCrd),
|
||||
Parallel: false,
|
||||
Retry: 0,
|
||||
}
|
||||
|
||||
m.Tasks = []task.Interface{
|
||||
installMonitorDashboardCrd,
|
||||
}
|
||||
|
||||
}
|
||||
@@ -17,14 +17,8 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/beclab/Olares/cli/pkg/common"
|
||||
"github.com/beclab/Olares/cli/pkg/core/connector"
|
||||
"github.com/beclab/Olares/cli/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type IsCloudInstance struct {
|
||||
@@ -43,72 +37,3 @@ func (p *IsCloudInstance) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
}
|
||||
return p.Not, nil
|
||||
}
|
||||
|
||||
type CheckStorageClass struct {
|
||||
common.KubePrepare
|
||||
}
|
||||
|
||||
func (p *CheckStorageClass) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
var kubectlpath, _ = p.PipelineCache.GetMustString(common.CacheCommandKubectlPath)
|
||||
if kubectlpath == "" {
|
||||
kubectlpath = path.Join(common.BinDir, common.CommandKubectl)
|
||||
}
|
||||
|
||||
var cmd = fmt.Sprintf("%s get sc | awk '{if(NR>1){print $1}}'", kubectlpath)
|
||||
stdout, err := runtime.GetRunner().SudoCmd(cmd, false, true)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(errors.WithStack(err), "get storageclass failed")
|
||||
}
|
||||
if stdout == "" {
|
||||
return false, fmt.Errorf("no storageclass found")
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("%s get sc --no-headers", kubectlpath)
|
||||
stdout, err = runtime.GetRunner().SudoCmd(cmd, false, true)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(errors.WithStack(err), "get storageclass failed")
|
||||
}
|
||||
|
||||
if stdout == "" {
|
||||
return false, fmt.Errorf("no storageclass found")
|
||||
}
|
||||
|
||||
if !strings.Contains(stdout, "(default)") {
|
||||
return false, fmt.Errorf("default storageclass was not found")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type GenerateRedisPassword struct {
|
||||
common.KubePrepare
|
||||
}
|
||||
|
||||
func (p *GenerateRedisPassword) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
pass, err := utils.GeneratePassword(15)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pass == "" {
|
||||
return false, fmt.Errorf("failed to generate redis password")
|
||||
}
|
||||
|
||||
p.PipelineCache.Set(common.CacheRedisPassword, pass)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type NotEqualDesiredVersion struct {
|
||||
common.KubePrepare
|
||||
}
|
||||
|
||||
func (n *NotEqualDesiredVersion) PreCheck(runtime connector.Runtime) (bool, error) {
|
||||
ksVersion, ok := n.PipelineCache.GetMustString(common.KubeSphereVersion)
|
||||
if !ok {
|
||||
ksVersion = ""
|
||||
}
|
||||
|
||||
if n.KubeConf.Cluster.KubeSphere.Version == ksVersion {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user