Compare commits

...

85 Commits

Author SHA1 Message Date
hys
dde789646a fix: rm kubesphere-monitoring-federated ns 2025-07-14 23:12:15 +08:00
hys
3a6aca6271 app-service, bfl, cli, authelia,kubesphere: support create user by cr 2025-07-14 22:31:08 +08:00
aby913
ee567c270c fix(files): external delete (#1539)
* fix(files): external delete

* login & system-frontend: update login and system-frontend new version

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-07-12 00:23:59 +08:00
hysyeah
4246bcce06 fix: simplify nat permission request (#1538) 2025-07-12 00:23:10 +08:00
eball
fb73d62bd5 bfl: change unmount-api of file-server (#1537) 2025-07-12 00:22:27 +08:00
eball
209f0d15e3 authelia: send notification in user login phase (#1536)
* authelia: send notification in user login phase

* fix: set cookie nil

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-12 00:21:48 +08:00
dkeven
78911d44cf feat(gpu): add more metrics in GPU monitor API (#1535) 2025-07-12 00:20:41 +08:00
salt
d964c33c2d feat: Chinese uses both single-character segmentation and word segmen… (#1534)
feat: Chinese uses both single-character segmentation and word segmentation. Word segmentation is used for easier sorting.

Co-authored-by: ubuntu <you@example.com>
2025-07-11 22:00:14 +08:00
salt
2b54795e10 fix: waiting... Both uppercase and lowercase letters can be searched, include special token (#1533)
fix: Both uppercase and lowercase letters can be searched, and special characters can be searched as well.'

Co-authored-by: ubuntu <you@example.com>
2025-07-11 13:20:31 +08:00
aby913
efb4be4fcf fix(files): deletion and other fixes (#1532)
* fix(files): deletion and other fixes

* feat(files & marker): update files and market new version

* feat: update market worker count

* Update bfl_deploy.yaml

---------

Co-authored-by: qq815776412 <815776412@qq.com>
Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
Co-authored-by: eball <liuy102@hotmail.com>
2025-07-11 00:35:46 +08:00
simon
89575096ba feat(knowledge): knowledge & download refactor (#1531)
* knowledge

* knowledge
2025-07-10 21:36:30 +08:00
dkeven
5edba60295 fix(cli): remove state files of olaresd when uninstalling (#1530) 2025-07-10 16:12:23 +08:00
eball
1aecc3495a ci: add a parameter of the code repository (#1529)
* ci: add a parameter of the code repository

* fix: file name bug

* refactor(cli): adjust local release command for vendor repo path

---------

Co-authored-by: dkeven <dkvvven@gmail.com>
2025-07-10 16:11:03 +08:00
salt
2d5c1fc484 feat: hybrid unigram search for title (#1528)
Co-authored-by: ubuntu <you@example.com>
2025-07-09 23:20:44 +08:00
hysyeah
81355f4a1c authelia: send login message to os.users.<olaresid> (#1527) 2025-07-09 23:20:13 +08:00
lovehunter9
2c4e9fb835 feat: seafile add support for avi, wmv, mkv, flv, rmvb (#1526) 2025-07-09 23:19:32 +08:00
dkeven
4947538e68 fix(daemon): apply filters correctly when listing users (#1525) 2025-07-09 23:18:39 +08:00
Peng Peng
21bb10b72b Revert "gpu: refactor gpu scheduler with cpp (#1475)"
This reverts commit ae3e4e6bb9.
2025-07-09 13:26:41 +08:00
wiy
8064c591f2 feat(files): files supports multiple nodes (#1524)
* feat(system-frontend): update files supports multiple nodes

* feat: add files routing gateway

* feat(media-server): surpport for multiple nodes

* feat(files): update files supports multiple nodes

---------

Co-authored-by: eball <liuy102@hotmail.com>
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
Co-authored-by: aby913 <aby913@163.com>
2025-07-08 23:11:41 +08:00
Calvin W.
1073575a1d docs: add readmes for Olares components (#1522)
* docs: add readmes for Olares components

* merge with latest upstream
2025-07-08 21:34:05 +08:00
dkeven
4cf977f6df fix(ci): specify repo when checkout code for PR (#1523) 2025-07-08 17:53:46 +08:00
hysyeah
0dda3811c7 bfl, authelia, lldap: change access-token expiry duration, support refresh and revoke user token (#1521)
bfl, authelia, lldap: change access-token expiry duration and support refresh;revoke user token after reset password
2025-07-08 00:03:59 +08:00
hysyeah
2632b45fc2 bfl, app-service, system-frontend/dashboard: remove analytics (#1520)
* bfl, app-service: remove analytics

* fix(system-frontend): remove dashboard analytics

* fix(system-frontend): update system-frontend version

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-07-08 00:03:11 +08:00
berg
ae3f3d6a20 market: v1.12 new category and fix some bugs. (#1518)
feat: v1.12 new category and fix some bugs.
2025-07-05 00:55:37 +08:00
eball
4f3b824f48 authelia: update oidc cert (#1516) 2025-07-05 00:54:44 +08:00
hysyeah
9efa6df969 tapr: add default perm for nats subject (#1515)
fix: add default perm for nats subject
2025-07-05 00:54:01 +08:00
dkeven
045dfc11bc perf(ci): ignore more archs when releasing cli (#1514)
* perf(ci): ignore more archs when releasing cli

* Update auth_backend_deploy.yaml

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-04 18:45:36 +08:00
hysyeah
9913d29f81 studio-server: move studio server to os-framework (#1513) 2025-07-04 00:42:39 +08:00
berg
0ccf091aff market, settings: fix the problem of theme settings & settings apps status & market terminusInfo error (#1512)
feat: update market frontend and backend version
2025-07-04 00:41:54 +08:00
dkeven
01f3b27b8c feat(upgrade): update sysconf for specific versions (#1511) 2025-07-04 00:41:12 +08:00
dkeven
475faafec4 fix(cli): clear upgrade-related state files when uninstalling (#1510) 2025-07-03 21:01:07 +08:00
berg
31ab286a4b market, profile: fix display error in avatar selector's image list and clear market data when terminusId changed (#1509)
feat: update market frontend and backend version
2025-07-03 00:51:40 +08:00
eball
c9b4a40a1c olares: refactor installation manifest (#1508)
* olares: refactor installation manifest

* fix: file name typo

* fix: add http accept header

* fix: bug

* fix: bug

* fix: import json
2025-07-03 00:50:09 +08:00
simon
da19d00d08 fix(download): fix download task operation & reduce youtube API requests (#1507)
download
2025-07-02 21:49:49 +08:00
dkeven
49d233a55b fix(cli): also update local reserved ports when modifying sysconf (#1506) 2025-07-02 21:49:23 +08:00
dkeven
300aaa0753 fix(daemon): handle empty pid files when check process running (#1505) 2025-07-02 21:48:56 +08:00
berg
962b220440 market: add local chart upload socket event & update menu and add search function (#1504)
* fix: omit to gen entrance url before active

* feat: update market frontend and backend version

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-01 23:44:31 +08:00
salt
4da25bca36 fix: when need physical path, miss use frontend_resource_uri (#1500)
* fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

* fix: search3 fix when need physical path miss use frontend_resource_ui

* fix: use wrong image

---------

Co-authored-by: ubuntu <you@example.com>
2025-07-01 23:32:34 +08:00
dkeven
42eff16695 feat(cli): config endpoint_pod_names in coredns when installing (#1503) 2025-07-01 20:35:42 +08:00
dkeven
450aa19dfc fix(cli): also reserve local ports for l4-proxied service (#1502) 2025-07-01 20:35:20 +08:00
eball
c750f6f85b infisical: create user error (#1501) 2025-07-01 20:33:18 +08:00
berg
bf57da0fa4 market: waiting for the app-service to start & displays the failed status of the installation button. (#1499)
feat: update market version
2025-06-30 23:57:57 +08:00
0x7fffff92
5df379f286 feat(headscale): let headscale run on the master node like l4-bfl-proxy (#1498)
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-30 21:02:26 +08:00
dkeven
cfb54fb974 feat(cli): auto enable GPU when adding new node (#1497) 2025-06-30 21:02:00 +08:00
eball
9515c05bb6 bfl: do not change owner when restart (#1496) 2025-06-30 21:01:25 +08:00
dkeven
bdcd924e50 chore(cli): remove unused DeleteCache arg and module (#1495) 2025-06-30 21:01:10 +08:00
eball
e9eb218348 olaresd: refresh user expiring certs (#1493)
* feat: refresh user expiring certs

* fix: admin user not found
2025-06-30 21:00:32 +08:00
eball
9746e2c110 infisical: crash when user not found (#1492) 2025-06-30 21:00:14 +08:00
berg
27d9715292 market: multi user multi source (#1490)
* multi user & multi source & pre-render and collect image download progress & custom render variants

* support GlobalEnvs

* feat: release system-frontend: v1.3.88

* feat: app-service, studio-server

* feat: update market backend version

---------

Co-authored-by: Sai <kldtks@live.com>
Co-authored-by: hys <hysyeah@gmail.com>
2025-06-28 16:46:44 +08:00
salt
10d6c2a6fa fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search w… (#1491)
fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

Co-authored-by: ubuntu <you@example.com>
2025-06-28 16:46:10 +08:00
eball
57d8a55d8d authelia: add user list api (#1489) 2025-06-27 22:07:27 +08:00
dkeven
b9a227acd7 fix(manifest): update the missed reverse proxy image version (#1488) 2025-06-27 11:27:07 +08:00
wiy
e6115794ce feat(system-frontend): update system-frontend new version to v1.3.86 (#1487) 2025-06-27 11:24:02 +08:00
dkeven
22739c90db fix(manifest): add missing app author label to argo deploy (#1486) 2025-06-27 11:23:29 +08:00
dkeven
6fac46130a perf(gpu): use our fork of dcgm-exporter with lower memory consumption (#1485) 2025-06-27 11:23:07 +08:00
simon
e19e049e7d feat(knowledge): add youtube feed and optimize the file name for aria2 download (#1481)
knowledge v0.12.12
2025-06-26 15:53:40 +08:00
wiy
1d0c20d6ad fix(system-frontend): copy nginx address error (#1484) 2025-06-26 15:16:18 +08:00
dkeven
397590d402 fix(cli): set health host of felix to lo addr explicitly (#1483) 2025-06-26 15:15:53 +08:00
hysyeah
fc1a59b79b ks,cli: remove host_ip label from some metric (#1482)
ks,cli: remove host_ip label from metric
2025-06-26 00:05:10 +08:00
eball
3dea149790 olaresd: network interface api modifed and nvstream mdns bug fix (#1480) 2025-06-26 00:04:10 +08:00
0x7fffff92
9d6834faa1 feat(tailscale): let tailscale run on the node where headscale is run… (#1479)
feat(tailscale): let tailscale run on the node where headscale is running

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-26 00:03:51 +08:00
dkeven
bef61309a3 feat(cli): set explicit image gc policy when installing K8s (#1478) 2025-06-26 00:03:04 +08:00
salt
cf52a59ef7 feat: search3 support multiple node for cache and external, run as daemonset (#1477)
* feat: search3 support multiple node for cache and external, and search3monitor run in daemon set

* fix: fix search3 iniialization fail because of not exist table __diesel_schema_migrations

---------

Co-authored-by: ubuntu <you@example.com>
2025-06-26 00:02:36 +08:00
wiy
80023be159 feat(system-frontend): merge system apps main (#1476)
* feat(system-frontend): merge apps into one image

* fix(system-frontend): update image version to v1.3.85

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-06-26 00:02:03 +08:00
eball
ae3e4e6bb9 gpu: refactor gpu scheduler with cpp (#1475) 2025-06-24 23:29:13 +08:00
dkeven
8c9e4d532b fix(daemon): upgrade runc dependency to fix vulnerability (#1473) 2025-06-24 21:33:43 +08:00
eball
3c48afb5b5 olares: move gpu package (#1474)
* olares: move gpu package

* fix: hami webui image
2025-06-24 21:32:37 +08:00
dkeven
3d22a01eef fix(cli): do not wait for recreation of pods without owner when changing ip (#1472) 2025-06-23 23:26:41 +08:00
eball
d6263bacca authelia: remove httponly option from set-cookie (#1471) 2025-06-23 23:25:55 +08:00
hysyeah
3b070ea095 node-exporter: add pcie_version,sata_version label for disk metric (#1470)
node-exporter: add pcie_version,sata_version label for node_disk_smartctl_info metric
2025-06-23 23:25:19 +08:00
dkeven
82b715635b feat: build and use hami-webui images using our own repo (#1469) 2025-06-23 23:24:38 +08:00
Peng Peng
1d4494c8d7 feat(user-service, notification, analytics): put prisma library under node_moudles in dockers (#1468)
feat: add prisma dependency to the docker
2025-06-23 11:22:31 +08:00
simon
56f5c07229 feat(knowledge): add ebook , pdf download and article extractor (#1467)
knowledge v0.12.11
2025-06-21 02:08:19 +08:00
berg
697ac440c7 wise, studio, desktop, dashboard: update system frontend version to v1.3.82 (#1466)
feat: update system frontend version to v1.3.82
2025-06-21 02:07:58 +08:00
eball
f0edbc08a6 gpu: bump libvgpu.so version (#1465) 2025-06-20 20:31:41 +08:00
eball
001607e840 authelia: add SameSite option to set-cookie (#1464) 2025-06-20 20:31:23 +08:00
dkeven
e8f525daca refactor(daemon): new scheme for upgrade APIs and operations (#1463) 2025-06-20 20:30:46 +08:00
salt
6d6f7705c9 feat: return search3 result with standard resource_urri (#1462)
* fix: fix search3 escape error

* feat: for search return resource_uri with standard mode

---------

Co-authored-by: Ubuntu <ubuntu@localhost.localdomain>
2025-06-20 11:18:01 +08:00
wiy
46b7fa0079 feat(system-frontend): update desktop files search; update dashboard chart components; (#1461) 2025-06-20 00:27:06 +08:00
hysyeah
793a62396b lldap,system-server: pub event async; chanage secret ns (#1460)
lldap,system-server: pub event async
2025-06-20 00:26:44 +08:00
eball
7cb4975f5b authelia: replace http session with lldap jwt (#1459)
* authelia: replace http session with lldap jwt

* fix: remove check auth

* fix: set default configuration

* fix: revert pg and nats configuration
2025-06-20 00:26:12 +08:00
eball
bfaf647ad1 tapr, cli:add extension vchord to pg and decrease k3s image fs threshold (#1458)
* tapr, cli:add extension vchord to pg and decrease k3s image fs threshold

* fix: image tag
2025-06-19 23:18:56 +08:00
hysyeah
23d3dc58ed lldap,tapr: add totp api (#1456) 2025-06-19 00:20:18 +08:00
yyh
7bf07f36b7 feat(system-frontend): update dashboard, control hub, and settings image (#1455)
* feat(system-frontend): update dashboard, control hub, and settings images to v1.3.80

* feat(ks_server): add environment variables for NODE_IP and TERMINUSD_HOST
2025-06-19 00:19:17 +08:00
eball
7e7117fc3a cli, daemon: persist the user name to the Olares release file (#1454) 2025-06-19 00:18:38 +08:00
243 changed files with 3808 additions and 4020 deletions

View File

@@ -65,6 +65,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
upload-daemon:
needs: test-version
@@ -73,6 +74,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
push-image:
runs-on: ubuntu-latest
@@ -132,6 +134,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -156,6 +159,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,27 +11,13 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -42,28 +28,12 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,22 +11,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
@@ -42,23 +26,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
runs-on: ubuntu-22.04
@@ -18,6 +28,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -51,6 +62,5 @@ jobs:
AWS_DEFAULT_REGION: "us-east-1"
run: |
cd cli/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
# coscmd upload $file /$file
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
@@ -19,6 +29,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -54,5 +65,5 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
cd daemon/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -77,6 +77,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -94,6 +95,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64
@@ -121,8 +123,8 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz > install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
echo "md5sum=$(awk '{print $1}' install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt)" >> "$GITHUB_OUTPUT"

View File

@@ -80,8 +80,8 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ github.event.inputs.tags }}.tar.gz > install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
release:
runs-on: ubuntu-latest
@@ -101,7 +101,7 @@ jobs:
- name: Get checksum
id: vars
run: |
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
- name: Update checksum
uses: eball/write-tag-to-version-file@latest

View File

@@ -108,20 +108,15 @@ Olares has been tested and verified on the following Linux platforms:
To get started with Olares on your own device, follow the [Getting Started Guide](https://docs.olares.com/manual/get-started/) for step-by-step instructions.
## Project navigation
> [!NOTE]
> We are currently consolidating Olares subproject code into this repository. This process may take a few months. Once finished, you will get a comprehensive view of the entire Olares system here.
This section lists the main directories in the Olares repository:
* **`apps`**: Contains the code for system applications, primarily for `larepass`.
* **`cli`**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **`daemon`**: Contains the code for `olaresd`, the system daemon process.
* **[`apps`](./apps)**: Contains the code for system applications, primarily for `larepass`.
* **[`cli`](./cli)**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **[`daemon`](./daemon)**: Contains the code for `olaresd`, the system daemon process.
* **`docs`**: Contains documentation for the project.
* **`framework`**: Contains the Olares system services.
* **`infrastructure`**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **`platform`**: Contains code for cloud-native components like databases and message queues.
* **[`framework`](./framework)**: Contains the Olares system services.
* **[`infrastructure`](./infrastructure)**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **[`platform`](./platform)**: Contains code for cloud-native components like databases and message queues.
* **`vendor`**: Contains code from third-party hardware vendors.
## Contributing to Olares

View File

@@ -110,19 +110,15 @@ Olares 已在以下 Linux 平台完成测试与验证:
参考[快速上手指南](https://docs.olares.cn/zh/manual/get-started/)安装并激活 Olares。
## 项目目录
> [!NOTE]
> 我们正将 Olares 子项目的代码移动到当前仓库。此过程可能会持续数月。届时您就可以通过本仓库了解 Olares 系统的全貌。
Olares 代码库中的主要目录如下:
* **`apps`**: 用于存放系统应用,主要是 `larepass` 的代码。
* **`cli`**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **`daemon`**: 用于存放 `olaresd`(系统守护进程)的代码。
* **[`apps`](./apps)**: 用于存放系统应用,主要是 `larepass` 的代码。
* **[`cli`](./cli)**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **[`daemon`](./daemon)**: 用于存放 `olaresd`(系统守护进程)的代码。
* **`docs`**: 用于存放 Olares 项目的文档。
* **`framework`**: 用来存放 Olares 系统服务代码。
* **`infrastructure`**: 用于存放计算存储网络GPU 等基础设施的代码。
* **`platform`**: 用于存放数据库、消息队列等云原生组件的代码。
* **[`framework`](./framework)**: 用来存放 Olares 系统服务代码。
* **[`infrastructure`](./infrastructure)**: 用于存放计算存储网络GPU 等基础设施的代码。
* **[`platform`](./platform)**: 用于存放数据库、消息队列等云原生组件的代码。
* **`vendor`**: 用于存放来自第三方硬件供应商的代码。
## 社区贡献

View File

@@ -110,18 +110,15 @@ Olaresは以下のLinuxプラットフォームで動作検証を完了してい
## プロジェクトナビゲーション
> [!NOTE]
> 現在、Olaresのサブプロジェクトのコードを当リポジトリへ移行する作業を進めています。この作業が完了するまでには数ヶ月を要する見込みです。完了後には、当リポジトリを通じてOlaresシステムの全貌をご覧いただけるようになります。
このセクションでは、Olares リポジトリ内の主要なディレクトリをリストアップしています:
* **`apps`**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **`cli`**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **`daemon`**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **[`apps`](./apps)**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **[`cli`](./cli)**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **[`daemon`](./daemon)**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **`docs`**: プロジェクトのドキュメントが含まれています。
* **`framework`**: Olares システムサービスが含まれています。
* **`infrastructure`**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **`platform`**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **[`framework`](./framework)**: Olares システムサービスが含まれています。
* **[`infrastructure`](./infrastructure)**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **[`platform`](./platform)**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **`vendor`**: サードパーティのハードウェアベンダーからのコードが含まれています。
## Olaresへの貢献

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,26 +0,0 @@
apiVersion: v2
name: appstore
description: A Helm chart for Kubernetes
maintainers:
- name: bytetrade
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "appstore.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "appstore.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "appstore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "appstore.labels" -}}
helm.sh/chart: {{ include "appstore.chart" . }}
{{ include "appstore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "appstore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "appstore.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "appstore.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "appstore.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,353 +0,0 @@
{{- $market_secret := (lookup "v1" "Secret" .Release.Namespace "market-secrets") -}}
{{- $redis_password := "" -}}
{{ if $market_secret -}}
{{ $redis_password = (index $market_secret "data" "redis-passwords") }}
{{ else -}}
{{ $redis_password = randAlphaNum 16 | b64enc }}
{{- end -}}
{{- $market_backend_nats_secret := (lookup "v1" "Secret" .Release.Namespace "market-backend-nats-secret") -}}
{{- $nats_password := "" -}}
{{ if $market_backend_nats_secret -}}
{{ $nats_password = (index $market_backend_nats_secret "data" "nats_password") }}
{{ else -}}
{{ $nats_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: market-backend-nats-secret
namespace: {{ .Release.Namespace }}
type: Opaque
data:
nats_password: {{ $nats_password }}
---
apiVersion: v1
kind: Secret
metadata:
name: market-secrets
namespace: {{ .Release.Namespace }}
type: Opaque
data:
redis-passwords: {{ $redis_password }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: market-deployment
namespace: {{ .Release.Namespace }}
labels:
app: appstore
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
selector:
matchLabels:
app: appstore
template:
metadata:
labels:
app: appstore
io.bytetrade.app: "true"
annotations:
instrumentation.opentelemetry.io/inject-go: "olares-instrumentation"
instrumentation.opentelemetry.io/go-container-names: "appstore-backend"
instrumentation.opentelemetry.io/otel-go-auto-target-exe: "/opt/app/market"
spec:
priorityClassName: "system-cluster-critical"
initContainers:
- args:
- -it
- authelia-backend.os-framework:9091
image: owncloudci/wait-for:latest
imagePullPolicy: IfNotPresent
name: check-auth
- name: terminus-sidecar-init
image: openservicemesh/init:v1.2.3
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
runAsNonRoot: false
runAsUser: 0
command:
- /bin/sh
- -c
- |
iptables-restore --noflush <<EOF
# sidecar interception rules
*nat
:PROXY_IN_REDIRECT - [0:0]
:PROXY_INBOUND - [0:0]
-A PROXY_IN_REDIRECT -p tcp -j REDIRECT --to-port 15003
-A PROXY_INBOUND -p tcp --dport 15000 -j RETURN
-A PROXY_INBOUND -p tcp -j PROXY_IN_REDIRECT
-A PREROUTING -p tcp -j PROXY_INBOUND
COMMIT
EOF
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
containers:
- name: appstore-backend
image: beclab/market-backend:v0.3.12
imagePullPolicy: IfNotPresent
ports:
- containerPort: 81
env:
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: OS_APP_SECRET
value: '{{ .Values.os.appstore.appSecret }}'
- name: OS_APP_KEY
value: {{ .Values.os.appstore.appKey }}
- name: APP_SOTRE_SERVICE_SERVICE_HOST
value: appstore-server-prod.bttcdn.com
- name: MARKET_PROVIDER
value: '{{ .Values.os.appstore.marketProvider }}'
- name: APP_SOTRE_SERVICE_SERVICE_PORT
value: '443'
- name: APP_SERVICE_SERVICE_HOST
value: app-service.os-framework
- name: APP_SERVICE_SERVICE_PORT
value: '6755'
- name: REPO_URL_PORT
value: "82"
- name: REDIS_ADDRESS
value: 'redis-cluster-proxy.user-system-{{ .Values.bfl.username }}:6379'
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: market-secrets
key: redis-passwords
- name: REDIS_DB_NUMBER
value: '0'
- name: REPO_URL_HOST
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NATS_HOST
value: nats.user-system-{{ .Values.bfl.username }}
- name: NATS_PORT
value: '4222'
- name: NATS_USERNAME
value: os-market-backend
- name: NATS_PASSWORD
valueFrom:
secretKeyRef:
name: market-backend-nats-secret
key: nats_password
- name: NATS_SUBJECT_USER_APPLICATION
value: terminus.user.application.{{ .Values.bfl.username}}
volumeMounts:
- name: opt-data
mountPath: /opt/app/data
- name: terminus-envoy-sidecar
image: bytetrade/envoy:v1.25.11
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
runAsUser: 1000
ports:
- name: proxy-admin
containerPort: 15000
- name: proxy-inbound
containerPort: 15003
volumeMounts:
- name: terminus-sidecar-config
readOnly: true
mountPath: /etc/envoy/envoy.yaml
subPath: envoy.yaml
command:
- /usr/local/bin/envoy
- --log-level
- debug
- -c
- /etc/envoy/envoy.yaml
env:
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: terminus-ws-sidecar
image: 'beclab/ws-gateway:v1.0.5'
command:
- /ws-gateway
env:
- name: WS_PORT
value: '81'
- name: WS_URL
value: /app-store/v1/websocket/message
resources: { }
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
volumes:
- name: terminus-sidecar-config
configMap:
name: sidecar-ws-configs
items:
- key: envoy.yaml
path: envoy.yaml
- name: opt-data
hostPath:
path: '{{ .Values.userspace.appData}}/appstore/data'
type: DirectoryOrCreate
- name: app
emptyDir: {}
- name: nginx-confd
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: appstore-service
namespace: {{ .Release.Namespace }}
spec:
selector:
app: appstore
type: ClusterIP
ports:
- protocol: TCP
name: appstore-backend
port: 81
targetPort: 81
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ApplicationPermission
metadata:
name: appstore
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: appstore
appid: appstore
key: {{ .Values.os.appstore.appKey }}
secret: {{ .Values.os.appstore.appSecret }}
permissions:
- dataType: event
group: message-disptahcer.system-server
ops:
- Create
version: v1
- dataType: app
group: service.bfl
ops:
- UserApps
version: v1
status:
state: active
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ProviderRegistry
metadata:
name: appstore-backend-provider
namespace: user-system-{{ .Values.bfl.username }}
spec:
dataType: app
deployment: market
description: app store provider
endpoint: appstore-service.{{ .Release.Namespace }}:81
group: service.appstore
kind: provider
namespace: {{ .Release.Namespace }}
opApis:
- name: InstallDevApp
uri: /app-store/v1/applications/provider/installdev
- name: UninstallDevApp
uri: /app-store/v1/applications/provider/uninstalldev
version: v1
status:
state: active
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-redis
namespace: {{ .Release.Namespace }}
spec:
app: market
appNamespace: {{ .Release.Namespace }}
middleware: redis
redis:
password:
valueFrom:
secretKeyRef:
key: redis-passwords
name: market-secrets
namespace: market
---
apiVersion: v1
kind: Service
metadata:
name: appstore-svc
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
app: appstore
ports:
- name: "appstore-backend"
protocol: TCP
port: 81
targetPort: 81
- name: "appstore-websocket"
protocol: TCP
port: 40010
targetPort: 40010
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-backend-nats
namespace: {{ .Release.Namespace }}
spec:
app: market-backend
appNamespace: os
middleware: nats
nats:
password:
valueFrom:
secretKeyRef:
key: nats_password
name: market-backend-nats-secret
refs:
- appName: user-service
appNamespace: os
subjects:
- name: "application.*"
perm:
- pub
- sub
- appName: user-service
appNamespace: os
subjects:
- name: "market.*"
perm:
- pub
- sub
user: os-market-backend

View File

@@ -1,44 +0,0 @@
bfl:
nodeport: 30883
nodeport_ingress_http: 30083
nodeport_ingress_https: 30082
username: 'test'
url: 'test'
nodeName: test
pvc:
userspace: test
userspace:
userData: test/Home
appData: test/Data
appCache: test
dbdata: test
docs:
nodeport: 30881
desktop:
nodeport: 30180
os:
portfolio:
appKey: '${ks[0]}'
appSecret: test
vault:
appKey: '${ks[0]}'
appSecret: test
desktop:
appKey: '${ks[0]}'
appSecret: test
message:
appKey: '${ks[0]}'
appSecret: test
rss:
appKey: '${ks[0]}'
appSecret: test
search:
appKey: '${ks[0]}'
appSecret: test
search2:
appKey: '${ks[0]}'
appSecret: test
appstore:
marketProvider: ''
kubesphere:
redis_password: ""

View File

@@ -1,294 +1,13 @@
{{- $namespace := printf "%s%s" "user-system-" .Values.bfl.username -}}
{{- $studio_secret := (lookup "v1" "Secret" $namespace "studio-secrets") -}}
{{- $pg_password := "" -}}
{{ if $studio_secret -}}
{{ $pg_password = (index $studio_secret "data" "pg_password") }}
{{ else -}}
{{ $pg_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: studio-secrets
namespace: user-system-{{ .Values.bfl.username }}
type: Opaque
data:
pg_password: {{ $pg_password }}
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: studio-pg
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: studio
appNamespace: {{ .Release.Namespace }}
middleware: postgres
postgreSQL:
user: studio_{{ .Values.bfl.username }}
password:
valueFrom:
secretKeyRef:
key: pg_password
name: studio-secrets
databases:
- name: studio
---
apiVersion: v1
kind: Service
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
namespace: user-space-{{ .Values.bfl.username }}
spec:
selector:
app: studio-server
type: ExternalName
externalName: studio-server.os-framework.svc.cluster.local
ports:
- protocol: TCP
name: studio-server
port: 8080
targetPort: 8088
name: http
- protocol: TCP
port: 8083
targetPort: 8083
name: https
---
kind: Service
apiVersion: v1
metadata:
name: chartmuseum-studio
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8888
selector:
app: studio-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: studio-san-cnf
namespace: {{ .Release.Namespace }}
data:
san.cnf: |
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
[req_distinguished_name]
countryName = CN
stateOrProvinceName = Beijing
localityName = Beijing
0.organizationName = bytetrade
commonName = studio-server.{{ .Release.Namespace }}.svc
[v3_req]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @bytetrade
[bytetrade]
DNS.1 = studio-server.{{ .Release.Namespace }}.svc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
labels:
app: studio-server
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: studio-server
template:
metadata:
labels:
app: studio-server
spec:
serviceAccountName: bytetrade-controller
volumes:
- name: chart
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData}}/studio/Chart'
- name: data
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData }}/studio/Data'
- name: storage-volume
hostPath:
path: '{{ .Values.userspace.appData }}/studio/helm-repo-dev'
type: DirectoryOrCreate
- name: config-san
configMap:
name: studio-san-cnf
items:
- key: san.cnf
path: san.cnf
- name: certs
emptyDir: {}
initContainers:
- name: init-chmod-data
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- sh
- '-c'
- |
chown -R 1000:1000 /home/coder
chown -R 65532:65532 /charts
chown -R 65532:65532 /data
securityContext:
runAsUser: 0
resources: { }
volumeMounts:
- name: storage-volume
mountPath: /home/coder
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- name: generate-certs
image: beclab/openssl:v3
imagePullPolicy: IfNotPresent
command: [ "/bin/sh", "-c" ]
args:
- |
openssl genrsa -out /etc/certs/ca.key 2048
openssl req -new -x509 -days 3650 -key /etc/certs/ca.key -out /etc/certs/ca.crt \
-subj "/CN=bytetrade CA/O=bytetrade/C=CN"
openssl req -new -newkey rsa:2048 -nodes \
-keyout /etc/certs/server.key -out /etc/certs/server.csr \
-config /etc/san/san.cnf
openssl x509 -req -days 3650 -in /etc/certs/server.csr \
-CA /etc/certs/ca.crt -CAkey /etc/certs/ca.key \
-CAcreateserial -out /etc/certs/server.crt \
-extensions v3_req -extfile /etc/san/san.cnf
chown -R 65532 /etc/certs/*
volumeMounts:
- name: config-san
mountPath: /etc/san
- name: certs
mountPath: /etc/certs
containers:
- name: studio
image: beclab/studio-server:v0.1.51
imagePullPolicy: IfNotPresent
args:
- server
ports:
- name: port
containerPort: 8088
protocol: TCP
- name: ssl-port
containerPort: 8083
protocol: TCP
volumeMounts:
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- mountPath: /etc/certs
name: certs
lifecycle:
preStop:
exec:
command:
- "/studio"
- "clean"
env:
- name: BASE_DIR
value: /charts
- name: OS_API_KEY
value: {{ .Values.os.studio.appKey }}
- name: OS_API_SECRET
value: {{ .Values.os.studio.appSecret }}
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: NAME_SPACE
value: {{ .Release.Namespace }}
- name: OWNER
value: '{{ .Values.bfl.username }}'
- name: DB_HOST
value: citus-master-svc.user-system-{{ .Values.bfl.username }}
- name: DB_USERNAME
value: studio_{{ .Values.bfl.username }}
- name: DB_PASSWORD
value: "{{ $pg_password | b64dec }}"
- name: DB_NAME
value: user_space_{{ .Values.bfl.username }}_studio
- name: DB_PORT
value: "5432"
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: "0.5"
memory: 1000Mi
- name: chartmuseum
image: aboveos/helm-chartmuseum:v0.15.0
args:
- '--port=8888'
- '--storage-local-rootdir=/storage'
ports:
- name: http
containerPort: 8888
protocol: TCP
env:
- name: CHART_POST_FORM_FIELD_NAME
value: chart
- name: DISABLE_API
value: 'false'
- name: LOG_JSON
value: 'true'
- name: PROV_POST_FORM_FIELD_NAME
value: prov
- name: STORAGE
value: local
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: 1000m
memory: 512Mi
volumeMounts:
- name: storage-volume
mountPath: /storage
livenessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
targetPort: 8080

View File

@@ -0,0 +1,20 @@
# Olares Apps
## Overview
This directory contains the code for system applications, primarily for LarePass. The following are the pre-installed system applications that offer tools for managing files, knowledge, passwords, and the system itself.
## System Applications Overview
| Application | Description |
| --- | --- |
| Files | A file management app that manages and synchronizes files across devices and sources, enabling seamless sharing and access. |
| Wise | A local-first and AI-native modern reader that helps to collect, read, and manage information from various platforms. Users can run self-hosted recommendation algorithms to filter and sort online content. |
| Vault | A secure password manager for storing and managing sensitive information across devices. |
| Market | A decentralized and permissionless app store for installing, uninstalling, and updating applications and recommendation algorithms. |
| Desktop | A hub for managing and interacting with installed applications. File and application searching are also supported. |
| Profile | An app to customize the user's profile page. |
| Settings | A system configuration application. |
| Dashboard | An app for monitoring system resource usage. |
| Control Hub | The console for Olares, providing precise and autonomous control over the system and its environment. |
| DevBox | A development tool for building and deploying Olares applications. |

View File

@@ -6,7 +6,7 @@ metadata:
annotations:
iam.kubesphere.io/uninitialized: "true"
helm.sh/resource-policy: keep
bytetrade.io/owner-role: platform-admin
bytetrade.io/owner-role: owner
bytetrade.io/terminus-name: "{{.Values.user.terminus_name}}"
bytetrade.io/launcher-auth-policy: two_factor
bytetrade.io/launcher-access-level: "1"
@@ -23,4 +23,4 @@ spec:
groups:
- lldap_admin
status:
state: Active
state: Created

View File

@@ -24,6 +24,7 @@ cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
pushd ${BASE_DIR}.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd

162
build/build-manifest.py Normal file
View File

@@ -0,0 +1,162 @@
#!/usr/bin/env python3
import argparse
import hashlib
import os
import requests
import sys
import json
CDN_URL = "https://dc3p1870nn3cj.cloudfront.net"
def download_checksum(name):
"""Downloads the checksum for a given name."""
url = f"{CDN_URL}/{name}.checksum.txt"
try:
response = requests.get(url)
response.raise_for_status()
return response.text.split()[0]
except requests.exceptions.RequestException as e:
print(f"Error getting checksum for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def get_image_manifest(name):
"""Downloads the image manifest for a given name."""
url = f"{CDN_URL}/{name}.manifest.json"
try:
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error getting manifest for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def main():
"""Main function."""
parser = argparse.ArgumentParser()
parser.add_argument("manifest_file", help="The manifest file to write to.")
args = parser.parse_args()
manifest_file = args.manifest_file
version = os.environ.get("VERSION", "")
repo_path = os.environ.get("REPO_PATH", "/")
manifest_amd64_data = {}
manifest_arm64_data = {}
# Process components
try:
with open("components", "r") as f:
for line in f:
line = line.strip()
if not line:
continue
# Replace version
if version:
line = line.replace("#__VERSION__", version)
# Replace repo path
if repo_path:
line = line.replace("#__REPO_PATH__", repo_path)
fields = line.split(",")
if len(fields) < 5:
print(f"Format error in components file: {line}", file=sys.stderr)
sys.exit(1)
filename, path, deps, _, fileid = fields[:5]
print(f"Downloading file checksum for {filename}")
name = hashlib.md5(filename.encode()).hexdigest()
url_amd64 = name
url_arm64 = f"arm64/{name}"
checksum_amd64 = download_checksum(url_amd64)
checksum_arm64 = download_checksum(url_arm64)
manifest_amd64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": fileid
}
manifest_arm64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": fileid
}
except FileNotFoundError:
print("Error: 'components' file not found.", file=sys.stderr)
sys.exit(1)
# Process images
path = "images"
for deps_file in ["images.mf"]:
try:
with open(deps_file, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
print(f"Downloading file checksum for {line}")
name = hashlib.md5(line.encode()).hexdigest()
url_amd64 = f"{name}.tar.gz"
url_arm64 = f"arm64/{name}.tar.gz"
checksum_amd64 = download_checksum(name)
checksum_arm64 = download_checksum(f"arm64/{name}")
# Get the image manifest
image_manifest_amd64 = get_image_manifest(name)
image_manifest_arm64 = get_image_manifest(f"arm64/{name}")
filename = f"{name}.tar.gz"
manifest_amd64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": line,
"manifest": image_manifest_amd64
}
manifest_arm64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": line,
"manifest": image_manifest_arm64
}
except FileNotFoundError:
print(f"Warning: '{deps_file}' not found, skipping.", file=sys.stderr)
sys.exit(1)
# Write the manifest file
amd64_manifest_file = f"{manifest_file}.amd64"
with open(amd64_manifest_file, "w") as mf:
json.dump(manifest_amd64_data, mf, indent=2)
arm64_manifest_file = f"{manifest_file}.arm64"
with open(arm64_manifest_file, "w") as mf:
json.dump(manifest_arm64_data, mf, indent=2)
# TODO: compress the manifest files
if __name__ == "__main__":
main()

View File

@@ -46,6 +46,9 @@ while read line; do
done < components
sed -i "s/#__VERSION__/${VERSION}/g" $manifest_file
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" $manifest_file
path="images"
for deps in "images.mf"; do
while read line; do

View File

@@ -16,6 +16,7 @@ rm -rf ${BASE_DIR}/../.dependencies
set -e
pushd ${BASE_DIR}/../.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd
pushd $DIST_PATH

View File

@@ -77,3 +77,5 @@ find $BASE_DIR/../ -type f -name Olares.yaml | while read f; do
done
sed -i "s/#__VERSION__/${VERSION}/g" ${manifest}
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" ${manifest}

200
build/get-manifest.py Normal file
View File

@@ -0,0 +1,200 @@
#!/usr/bin/env python3
import requests
import json
import argparse
import re
import sys
import platform
def parse_image_name(image_name):
"""
Parses a full image name into registry, repository, and reference (tag/digest).
Handles defaults for Docker Hub.
"""
# Default to 'latest' tag if no tag or digest is specified
if ":" not in image_name and "@" not in image_name:
image_name += ":latest"
# Split repository from reference (tag or digest)
if "@" in image_name:
repo_part, reference = image_name.rsplit("@", 1)
else:
repo_part, reference = image_name.rsplit(":", 1)
# Determine registry and repository
if "/" not in repo_part:
# This is an official Docker Hub image, e.g., "ubuntu"
registry = "registry-1.docker.io"
repository = f"library/{repo_part}"
else:
parts = repo_part.split("/")
# If the first part looks like a domain name, it's the registry
if "." in parts[0] or ":" in parts[0]:
registry = parts[0]
repository = "/".join(parts[1:])
else:
# A scoped Docker Hub image, e.g., "bitnami/nginx"
registry = "registry-1.docker.io"
repository = repo_part
return registry, repository, reference
def get_auth_token(registry, repository):
"""
Gets an authentication token from the registry's auth service.
"""
# First, probe the registry to get the auth challenge
try:
probe_url = f"https://{registry}/v2/"
response = requests.get(probe_url, timeout=10)
except requests.exceptions.RequestException as e:
print(f"Error: Could not connect to registry at {probe_url}. Details: {e}", file=sys.stderr)
sys.exit(1)
if response.status_code != 401:
# Either public or something is wrong, we can try without a token
return None
auth_header = response.headers.get("Www-Authenticate")
if not auth_header:
print(f"Error: Registry {registry} returned 401 but did not provide Www-Authenticate header.", file=sys.stderr)
sys.exit(1)
# Parse the Www-Authenticate header to find realm, service, and scope
try:
realm = re.search('realm="([^"]+)"', auth_header).group(1)
service = re.search('service="([^"]+)"', auth_header).group(1)
# Scope for the specific repository is needed
scope = f"repository:{repository}:pull"
except AttributeError:
print(f"Error: Could not parse Www-Authenticate header: {auth_header}", file=sys.stderr)
sys.exit(1)
# Request the actual token from the auth realm
auth_params = {
"service": service,
"scope": scope
}
try:
auth_response = requests.get(realm, params=auth_params, timeout=10)
auth_response.raise_for_status()
return auth_response.json().get("token")
except requests.exceptions.RequestException as e:
print(f"Error: Failed to get auth token from {realm}. Details: {e}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Failed to decode JSON response from auth server: {auth_response.text}", file=sys.stderr)
sys.exit(1)
def get_manifest(registry, repository, reference, token):
"""
Fetches the image manifest from the registry.
"""
manifest_url = f"https://{registry}/v2/{repository}/manifests/{reference}"
headers = {
# Request multiple manifest types, the registry will return the correct one
"Accept": "application/vnd.oci.image.index.v1+json, application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json"
}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
response = requests.get(manifest_url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401 and not token:
print("Error: Received 401 Unauthorized. Attempting to get a token...", file=sys.stderr)
# The initial probe might have passed, but manifest access requires auth.
# We re-run the token acquisition logic.
new_token = get_auth_token(registry, repository)
if new_token:
return get_manifest(registry, repository, reference, new_token)
print(f"Error: Failed to fetch manifest from {manifest_url}. Status: {e.response.status_code}", file=sys.stderr)
print(f"Response: {e.response.text}", file=sys.stderr)
sys.exit(1)
except requests.exceptions.RequestException as e:
print(f"Error: A network error occurred. Details: {e}", file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description="Fetch an OCI/Docker image manifest from a container registry.",
epilog="""Examples:
python get_manifest.py ubuntu:22.04
python get_manifest.py quay.io/brancz/kube-rbac-proxy:v0.18.1 -o manifest.json
python get_manifest.py gcr.io/google-containers/pause:3.9""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("image_name", help="Full name of the container image (e.g., 'ubuntu:latest' or 'quay.io/prometheus/node-exporter:v1.7.0')")
parser.add_argument("-o", "--output-file", help="Optional. Path to write the final manifest JSON to. If not provided, prints to stdout.")
args = parser.parse_args()
registry, repository, reference = parse_image_name(args.image_name)
# Suppress informational prints if writing to a file
verbose_print = print if not args.output_file else lambda *a, **k: None
verbose_print(f"Registry: {registry}")
verbose_print(f"Repository: {repository}")
verbose_print(f"Reference: {reference}", end='\n\n', flush=True)
token = get_auth_token(registry, repository)
if not token and not args.output_file:
print("No authentication token needed or could be retrieved. Proceeding without token...", file=sys.stderr)
manifest = get_manifest(registry, repository, reference, token)
final_manifest = None
media_type = manifest.get("mediaType", "")
if "manifest.list" in media_type or "image.index" in media_type:
verbose_print("Detected a multi-platform image index. Finding manifest for current architecture...")
system_arch = platform.machine()
arch_map = {"x86_64": "amd64", "aarch64": "arm64"}
target_arch = arch_map.get(system_arch, system_arch)
verbose_print(f"System architecture: {system_arch} -> Target: linux/{target_arch}")
target_digest = None
for m in manifest.get("manifests", []):
plat = m.get("platform", {})
if plat.get("os") == "linux" and plat.get("architecture") == target_arch:
target_digest = m.get("digest")
break
if target_digest:
verbose_print(f"Found manifest for linux/{target_arch} with digest: {target_digest}\n")
final_manifest = get_manifest(registry, repository, target_digest, token)
else:
print(f"Error: Could not find a manifest for 'linux/{target_arch}' in the index.", file=sys.stderr)
if not args.output_file:
print("Available platforms:", file=sys.stderr)
for m in manifest.get("manifests", []):
print(f" - {m.get('platform', {}).get('os')}/{m.get('platform', {}).get('architecture')}", file=sys.stderr)
sys.exit(1)
else:
final_manifest = manifest
if final_manifest:
if args.output_file:
try:
with open(args.output_file, 'w') as f:
json.dump(final_manifest, f, indent=2)
print(f"Successfully wrote manifest to {args.output_file}")
except IOError as e:
print(f"Error: Could not write to file {args.output_file}. Details: {e}", file=sys.stderr)
sys.exit(1)
else:
print(json.dumps(final_manifest, indent=2))
if __name__ == "__main__":
main()

View File

@@ -74,6 +74,6 @@ echo "packaging launcher ..."
run_cmd "cp -rf framework/bfl/.olares/config/launcher ${DIST}/wizard/config/"
echo "packaging gpu ..."
run_cmd "cp -rf framework/gpu/.olares/config/gpu ${DIST}/wizard/config/"
run_cmd "cp -rf infrastructure/gpu/.olares/config/gpu ${DIST}/wizard/config/"
echo "packaging completed"

View File

@@ -23,26 +23,28 @@ while read line; do
continue
fi
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
filename=$(echo "$line"|awk -F"," '{print $1}')
echo "if exists $filename ... "
name=$(echo -n "$filename"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
echo "if exists $filename ... "
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz)
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name)
if [ $code -eq 403 ]; then
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
set -ex
aws s3 cp $name s3://terminus-os-install/$path$name --acl=public-read
aws s3 cp $name s3://terminus-os-install/backup/$path$backup_file --acl=public-read

View File

@@ -10,6 +10,7 @@ cat $1|while read image; do
echo "if exists $image ... "
name=$(echo -n "$image"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
manifest="$name.manifest.json"
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz > /dev/null
if [ $? -ne 0 ]; then
@@ -68,48 +69,29 @@ cat $1|while read image; do
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image"
echo "failed to check image checksum"
exit -1
fi
fi
fi
# upload to tencent cloud cos
# curl -fsSLI https://cdn.joinolares.cn/$path$name.tar.gz > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# # re-upload checksum.txt
# curl -fsSLI https://cdn.joinolares.cn/$path$checksum > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# upload manifest.json
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$manifest > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$manifest)
if [ $code -eq 403 ]; then
set -ex
BASE_DIR=$(dirname $(realpath -s $0))
python3 $BASE_DIR/get-manifest.py $image -o $manifest
aws s3 cp $manifest s3://terminus-os-install/$path$manifest --acl=public-read
echo "upload $name manifest completed"
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image manifest"
exit -1
fi
fi
fi
done

View File

@@ -17,8 +17,12 @@ builds:
ignore:
- goos: darwin
goarch: arm
- goos: darwin
goarch: amd64
- goos: windows
goarch: arm
- goos: windows
goarch: arm64
ldflags:
- -s
- -w

View File

@@ -1 +1,92 @@
# installer
# Olares CLI
This directory contains the code for **olares-cli**, the official command-line interface for administering an **Olares** cluster. It provides a modular, pipeline-based architecture for orchestrating complex system operations. See the full [Olares CLI Documentation](https://docs.olares.com/developer/install/cli-1.12/olares-cli.html) for command reference and tutorials.
Key responsibilities include:
- **Cluster management**: Installing, upgrading, restarting, and maintaining an Olares cluster.
- **Node management**: Adding to or removing nodes from an Olares cluster.
## Execution Model
For most of the commands, `olares-cli` is executed through a four-tier hierarchy:
```
Pipeline ➜ Module ➜ Task ➜ Action
````
### Example: `install-olares` Pipeline
```text
Pipeline: Install Olares
├── ...other modules
└── Module: Bootstrap OS
├── ...other tasks
├── Task: Check Prerequisites
│ └── Action: run-precheck.sh
└── Task: Configure System
└── Action: apply-sysctl
````
## Repository layout
```text
cli/
├── cmd/ # Cobra command definitions
│ ├── main.go # CLI entry point
│ └── ctl/
│ ├── root.go
│ ├── os/ # OS-level maintenance commands
│ ├── node/ # Cluster node operations
│ └── gpu/ # GPU management
└── pkg/
├── core/
│ ├── action/ # Re-usable action primitives
│ ├── module/ # Module abstractions
│ ├── pipeline/ # Pipeline abstractions
│ └── task/ # Task abstractions
└── pipelines/ # Pre-built pipelines
│ ├── ... # actual modules and tasks for various commands and components
```
## Build from source
### Prerequisites
* **Go 1.24+**
* **GoReleaser** (optional, for cross-compiling and packaging)
### Sample commands
```bash
# Clone the repo and enter the CLI folder
cd cli
# 1) Build for the host OS/ARCH
go build -o olares-cli ./cmd/main.go
# 2) Cross-compile for Linux amd64 (from macOS, for example)
GOOS=linux GOARCH=amd64 go build -o olares-cli ./cmd/main.go
# 3) Produce multi-platform artifacts (tar.gz, checksums, etc.)
goreleaser release --snapshot --clean
```
---
## Development workflow
### Add a new command
1. Create the command file in `cmd/ctl/<category>/`.
2. Define a pipeline in `pkg/pipelines/`.
3. Implement modules & tasks inside the relevant `pkg/` sub-packages.
### Test your build
1. Upload the self-built `olares-cli` binary to a machine that's running Olares.
2. Replace the existing `olares-cli` binary on the machine using `sudo cp -f olares-cli /usr/local/bin`.
3. Execute arbitrary commands using `olares-cli`

View File

@@ -60,7 +60,7 @@ echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
@@ -84,7 +84,7 @@ sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /et
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf

View File

@@ -265,7 +265,7 @@ const (
CacheAppServicePod = "app_service_pod_name"
CacheAppValues = "app_built_in_values"
CacheCountPodsUsingHostIP = "count_pods_using_host_ip"
CacheCountPodsWaitForRecreation = "count_pods_wait_for_recreation"
CacheUpgradeUsers = "upgrade_users"
CacheUpgradeAdminUser = "upgrade_admin_user"

View File

@@ -73,7 +73,6 @@ type Argument struct {
ImagesDir string `json:"images_dir"`
Namespace string `json:"namespace"`
DeleteCRI bool `json:"delete_cri"`
DeleteCache bool `json:"delete_cache"`
Role string `json:"role"`
Type string `json:"type"`
Kubetype string `json:"kube_type"`
@@ -322,10 +321,26 @@ func (a *Argument) SaveReleaseInfo() error {
if a.OlaresVersion == "" {
return errors.New("invalid: empty olares version")
}
releaseInfoMap := map[string]string{
ENV_OLARES_BASE_DIR: a.BaseDir,
ENV_OLARES_VERSION: a.OlaresVersion,
}
if a.User != nil {
releaseInfoMap["OLARES_NAME"] = fmt.Sprintf("%s@%s", a.User.UserName, a.User.DomainName)
} else {
if util.IsExist(OlaresReleaseFile) {
// if the user is not set, try to load the user name from the release file
envs, err := godotenv.Read(OlaresReleaseFile)
if err == nil {
if userName, ok := envs["OLARES_NAME"]; ok {
releaseInfoMap["OLARES_NAME"] = userName
}
}
}
}
if !util.IsExist(filepath.Dir(OlaresReleaseFile)) {
if err := os.MkdirAll(filepath.Dir(OlaresReleaseFile), 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %v", filepath.Dir(OlaresReleaseFile), err)
@@ -395,10 +410,6 @@ func (a *Argument) SetRegistryMirrors(registryMirrors string) {
a.RegistryMirrors = registryMirrors
}
func (a *Argument) SetDeleteCache(deleteCache bool) {
a.DeleteCache = deleteCache
}
func (a *Argument) SetDeleteCRI(deleteCRI bool) {
a.DeleteCRI = deleteCRI
}

View File

@@ -1,17 +1,16 @@
package common
const (
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringFederated = "kubesphere-monitoring-federated"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
ChartNameRedis = "redis"
ChartNameSnapshotController = "snapshot-controller"

View File

@@ -133,8 +133,11 @@ type DisableTerminusdService struct {
}
func (s *DisableTerminusdService) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
stdout, _ := runtime.GetRunner().SudoCmd("systemctl is-active olaresd", false, false)
if stdout == "active" {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
}
}
return nil
}
@@ -144,10 +147,18 @@ type UninstallTerminusd struct {
}
func (r *UninstallTerminusd) Execute(runtime connector.Runtime) error {
var olaresdFiles []string
svcpath := filepath.Join("/etc/systemd/system", templates.TerminusdService.Name())
svcenvpath := filepath.Join("/etc/systemd/system", templates.TerminusdEnv.Name())
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s && rm -rf %s && rm -rf /usr/local/bin/olaresd", svcpath, svcenvpath), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
binPath := "/usr/local/bin/olaresd"
olaresdFiles = append(olaresdFiles, svcpath, svcenvpath, binPath)
for _, pidFile := range []string{"installing.pid", "changingip.pid"} {
olaresdFiles = append(olaresdFiles, filepath.Join(runtime.GetBaseDir(), pidFile))
}
for _, f := range olaresdFiles {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s", f), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
}
}
return nil
}

View File

@@ -263,30 +263,25 @@ type NodeLabelingModule struct {
func (l *NodeLabelingModule) Init() {
l.Name = "NodeLabeling"
updateNode := &task.RemoteTask{
Name: "UpdateNode",
Hosts: l.Runtime.GetHostsByRole(common.Master),
updateNode := &task.LocalTask{
Name: "UpdateNode",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(UpdateNodeLabels),
Parallel: false,
Retry: 1,
Action: new(UpdateNodeLabels),
Retry: 1,
}
restartPlugin := &task.RemoteTask{
Name: "RestartPlugin",
Hosts: l.Runtime.GetHostsByRole(common.Master),
restartPlugin := &task.LocalTask{
Name: "RestartPlugin",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(RestartPlugin),
Parallel: false,
Retry: 1,
Action: new(RestartPlugin),
Retry: 1,
}
l.Tasks = []task.Interface{

View File

@@ -195,11 +195,13 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
defaultKubeletArs := map[string]string{
"kube-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"system-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"eviction-hard": "memory.available<5%,nodefs.available<10%",
"eviction-hard": "memory.available<5%,nodefs.available<10%,imagefs.available<10%",
"config": "/etc/rancher/k3s/kubelet.config",
"containerd": container.DefaultContainerdCRISocket,
"cgroup-driver": "systemd",
"runtime-request-timeout": "5m",
"image-gc-high-threshold": "91",
"image-gc-low-threshold": "90",
}
defaultKubeProxyArgs := map[string]string{
"proxy-mode": "ipvs",

View File

@@ -307,6 +307,8 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
"evictionPressureTransitionPeriod": "30s",
"featureGates": FeatureGatesDefaultConfiguration,
"runtimeRequestTimeout": "5m",
"imageGCHighThresholdPercent": 91,
"imageGCLowThresholdPercent": 90,
}
if securityEnhancement {

View File

@@ -47,24 +47,6 @@ func (m *DeleteKubeSphereCachesModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCache"
deleteCache := &task.LocalTask{
Name: "DeleteCache",
Prepare: new(ShouldDeleteCache),
Action: new(DeleteCache),
}
m.Tasks = []task.Interface{
deleteCache,
}
}
type DeployModule struct {
common.KubeModule
Skip bool

File diff suppressed because one or more lines are too long

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -32,7 +32,7 @@ spec:
- command:
- ks-apiserver
- --logtostderr=true
image: beclab/ks-apiserver:0.0.20
image: beclab/ks-apiserver:0.0.21
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-apiserver
ports:

View File

@@ -1,121 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
strategy:
rollingUpdate:
maxSurge: 0
type: RollingUpdate
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
template:
metadata:
labels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- command:
- controller-manager
- --logtostderr=true
- --leader-elect=false
image: beclab/ks-controller-manager:0.0.20
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-controller-manager
ports:
- containerPort: 8080
protocol: TCP
resources:
{{- toYaml .Values.controller.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/kubesphere/
name: kubesphere-config
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.controller.extraVolumeMounts }}
{{- toYaml .Values.controller.extraVolumeMounts | nindent 8 }}
{{- end }}
env:
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
terminationGracePeriodSeconds: 30
volumes:
- name: kubesphere-config
configMap:
name: kubesphere-config
defaultMode: 420
- hostPath:
path: /etc/localtime
type: ""
name: host-time
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 6 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-controller-manager
namespaces:
- kubesphere-system
{{- with .Values.nodeAffinity }}
nodeAffinity:
{{ toYaml . | indent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
ports:
- port: 443
protocol: TCP
targetPort: 8443
selector:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
sessionAffinity: None
type: ClusterIP

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -748,12 +748,12 @@ spec:
sum (node_cpu_seconds_total{job="node-exporter", mode=~"user|nice|system|iowait|irq|softirq"}) by (cpu, instance, job, namespace, pod)
record: node_cpu_used_seconds_total
- expr: |
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, host_ip, role, pod)
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, role, pod)
record: 'node_namespace_pod:kube_pod_info:'
- expr: |
count by (node, host_ip, role) (sum by (node, cpu, host_ip, role) (
count by (node, role) (sum by (node, cpu, role) (
node_cpu_seconds_total{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
))
record: node:node_num_cpu:sum
@@ -761,27 +761,27 @@ spec:
avg(irate(node_cpu_used_seconds_total{job="node-exporter"}[5m]))
record: :node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_used_seconds_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"user"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_user_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"system"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_system_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"iowait"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_iowait_cpu_utilisation:avg1m
- expr: |
@@ -806,9 +806,9 @@ spec:
label_replace(node_memory_Cached_bytes, "node", "$1", "instance", "(.*)")
record: node:node_memory_Cached_bytes
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_Slab_bytes{job="node-exporter"} + node_memory_KernelStack_bytes{job="node-exporter"} + node_memory_PageTables_bytes{job="node-exporter"}+ node_memory_HardwareCorrupted_bytes{job="node-exporter"}+node_memory_Bounce_bytes{job="node-exporter"}-node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_system_reserved
@@ -825,16 +825,16 @@ spec:
sum(node_memory_MemTotal_bytes{job="node-exporter"})
record: ':node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"} + node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_available:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
node_memory_MemTotal_bytes{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_total:sum
@@ -842,30 +842,30 @@ spec:
1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum)
record: 'node:node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_reads_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_reads:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_writes_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_writes:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_read_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_read:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_written_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_written:sum
@@ -874,74 +874,74 @@ spec:
sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
record: :node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m]) +
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_transmitted:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_received:sum_irate
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_total:'
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_free:'
- expr: |
sum by (node, host_ip, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load1:ratio
- expr: |
sum by (node, host_ip, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load5:ratio
- expr: |
sum by (node, host_ip, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load15:ratio
- expr: |
sum by (node, host_ip, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by (node, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: node:pod_count:sum
- expr: |
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(host_ip, role) max by(node, host_ip, role) (node_namespace_pod:kube_pod_info:{node!="",host_ip!=""}))
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(role) max by(node, role) (node_namespace_pod:kube_pod_info:{node!=""}))
record: node:pod_capacity:sum
- expr: |
node:pod_running:count / node:pod_capacity:sum
record: node:pod_utilization:ratio
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, role)
record: node:pod_running:count
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, role)
record: node:pod_succeeded:count
- expr: |
count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, role)
record: node:pod_abnormal:count
- expr: |
(count by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"} unless on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Succeeded"} > 0) unless on(pod, namespace, cluster) ((kube_pod_status_ready{condition="true",job="kube-state-metrics"} > 0) and on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} > 0)) unless on(pod, namespace, cluster) kube_pod_container_status_waiting_reason{job="kube-state-metrics",reason="ContainerCreating"} > 0) or on(namespace, cluster) (group by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"}) * 0)) * on(namespace, cluster) group_left(user) (kube_namespace_labels{job="kube-state-metrics"}) > 0
record: user:pod_abnormal:count
- expr: |
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: node:pod_abnormal:ratio
- expr: |
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: user:pod_abnormal:ratio
- expr: |
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: 'node:disk_space_available:'
- expr: |
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: node:disk_space_utilization:ratio
- expr: |
(1 - (node:node_inodes_free: / node:node_inodes_total:))

View File

@@ -42,7 +42,7 @@ spec:
- --collector.netdev.address-info
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: beclab/node-exporter:0.0.2
image: beclab/node-exporter:0.0.3
name: node-exporter
securityContext:
privileged: true

View File

@@ -58,12 +58,12 @@ var kscorecrds = []map[string]string{
"resource": "default-http-backend",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "secrets",
"resource": "ks-controller-manager-webhook-cert",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "secrets",
// "resource": "ks-controller-manager-webhook-cert",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "serviceaccounts",
@@ -100,24 +100,24 @@ var kscorecrds = []map[string]string{
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "services",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "services",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "deployments",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
//{
// "ns": "kubesphere-system",
// "kind": "validatingwebhookconfigurations",

View File

@@ -65,7 +65,7 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
kubectlpath = path.Join(common.BinDir, common.CommandKubectl)
}
for _, ns := range []string{common.NamespaceKubesphereControlsSystem, common.NamespaceKubesphereMonitoringFederated} {
for _, ns := range []string{common.NamespaceKubesphereControlsSystem} {
if stdout, err := runtime.GetRunner().Cmd(fmt.Sprintf("%s create ns %s", kubectlpath, ns), false, true); err != nil {
if !strings.Contains(stdout, "already exists") {
logger.Errorf("create ns %s failed: %v", ns, err)
@@ -98,8 +98,6 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
common.NamespaceKubeSystem,
common.NamespaceKubekeySystem,
common.NamespaceKubesphereControlsSystem,
common.NamespaceKubesphereMonitoringFederated,
common.NamespaceKubesphereMonitoringSystem,
common.NamespaceKubesphereSystem,
}

View File

@@ -23,17 +23,6 @@ import (
versionutil "k8s.io/apimachinery/pkg/util/version"
)
type ShouldDeleteCache struct {
common.KubePrepare
}
func (p *ShouldDeleteCache) PreCheck(runtime connector.Runtime) (bool, error) {
if p.KubeConf.Arg.DeleteCache {
return true, nil
}
return false, nil
}
type VersionBelowV3 struct {
common.KubePrepare
}

View File

@@ -52,19 +52,6 @@ func (d *DeleteKubeSphereCaches) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCache struct {
common.KubeAction
}
func (t *DeleteCache) Execute(runtime connector.Runtime) error {
// var cacheDir = path.Join(runtime.GetBaseDir(), cc.ImagesDir)
// if err := util.RemoveDir(cacheDir); err != nil {
// return err
// }
// logger.Debugf("delete caches success")
return nil
}
type AddInstallerConfig struct {
common.KubeAction
}
@@ -368,7 +355,7 @@ func (c *Check) Execute(runtime connector.Runtime) error {
return fmt.Errorf("kubectl not found")
}
var labels = []string{"app=ks-apiserver", "app=ks-controller-manager"}
var labels = []string{"app=ks-apiserver"}
for _, label := range labels {
var cmd = fmt.Sprintf("%s get pod -n %s -l '%s' -o jsonpath='{.items[0].status.phase}'", kubectlpath, common.NamespaceKubesphereSystem, label)

View File

@@ -6,6 +6,7 @@ import (
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/module"
"github.com/beclab/Olares/cli/pkg/core/pipeline"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/pkg/k3s"
"github.com/beclab/Olares/cli/pkg/kubernetes"
"github.com/beclab/Olares/cli/pkg/manifest"
@@ -75,6 +76,7 @@ func (m *AddNodeModule) Init() {
&k3s.JoinNodesModule{},
}
}
m.underlyingModules = append(m.underlyingModules, &gpu.NodeLabelingModule{})
for _, underlyingModule := range m.underlyingModules {
underlyingModule.Default(m.Runtime, m.PipelineCache, m.ModuleCache)
underlyingModule.AutoAssert()

View File

@@ -105,6 +105,7 @@ func (p *phaseBuilder) phaseInstall() *phaseBuilder {
&certs.UninstallCertsFilesModule{},
&storage.DeleteUserDataModule{},
&terminus.DeleteWizardFilesModule{},
&terminus.DeleteUpgradeFilesModule{},
&storage.RemoveJuiceFSModule{},
&storage.DeletePhaseFlagModule{
PhaseFile: common.TerminusStateFileInstalled,
@@ -132,33 +133,13 @@ func (p *phaseBuilder) phasePrepare() *phaseBuilder {
PhaseFile: common.TerminusStateFilePrepared,
BaseDir: p.runtime.GetBaseDir(),
},
&daemon.UninstallTerminusdModule{},
&terminus.RemoveReleaseFileModule{},
)
}
return p
}
func (p *phaseBuilder) phaseDownload() *phaseBuilder {
terminusdAction := &daemon.CheckTerminusdService{}
err := terminusdAction.Execute()
if p.convert() >= PhaseDownload {
if err == nil {
p.modules = append(p.modules, &daemon.UninstallTerminusdModule{})
}
p.modules = append(p.modules,
&kubesphere.DeleteCacheModule{},
)
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &storage.DeleteCacheModule{
BaseDir: p.runtime.GetBaseDir(),
})
}
}
return p
}
func (p *phaseBuilder) phaseMacos() {
p.modules = []module.Module{
&precheck.GreetingsModule{},
@@ -168,9 +149,6 @@ func (p *phaseBuilder) phaseMacos() {
}
if p.convert() >= PhaseDownload {
p.modules = append(p.modules, &kubesphere.DeleteKubeSphereCachesModule{})
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &kubesphere.DeleteCacheModule{})
}
}
}
@@ -189,8 +167,7 @@ func UninstallTerminus(phase string, runtime *common.KubeRuntime) pipeline.Pipel
builder.
phaseInstall().
phaseStorage().
phasePrepare().
phaseDownload()
phasePrepare()
}
return pipeline.Pipeline{

View File

@@ -65,6 +65,7 @@ data:
health
ready
kubernetes {{ .DNSDomain }} in-addr.arpa ip6.arpa {
endpoint_pod_names
pods insecure
fallthrough in-addr.arpa ip6.arpa
}

View File

@@ -5993,6 +5993,8 @@ spec:
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
- name: FELIX_HEALTHHOST
value: 127.0.0.1
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:

View File

@@ -117,7 +117,7 @@ func (m *Manager) packageLauncher() error {
func (m *Manager) packageGPU() error {
fmt.Println("packaging gpu ...")
return util.CopyDirectory(
filepath.Join(m.olaresRepoRoot, "framework/gpu/.olares/config/gpu"),
filepath.Join(m.olaresRepoRoot, "infrastructure/gpu/.olares/config/gpu"),
filepath.Join(m.distPath, "wizard/config/gpu"),
)
}

View File

@@ -11,6 +11,7 @@ import (
type Builder struct {
olaresRepoRoot string
vendorRepoPath string
distPath string
version string
manifestManager *manifest.Manager
@@ -19,8 +20,13 @@ type Builder struct {
func NewBuilder(olaresRepoRoot, version, cdnURL string, ignoreMissingImages bool) *Builder {
distPath := filepath.Join(olaresRepoRoot, ".dist/install-wizard")
vendorRepoPath := os.Getenv("OLARES_VENDOR_REPO_PATH")
if vendorRepoPath == "" {
vendorRepoPath = "/"
}
return &Builder{
olaresRepoRoot: olaresRepoRoot,
vendorRepoPath: vendorRepoPath,
distPath: distPath,
version: version,
manifestManager: manifest.NewManager(olaresRepoRoot, distPath, cdnURL, ignoreMissingImages),
@@ -68,6 +74,9 @@ func (b *Builder) archive() (string, error) {
if err := util.ReplaceInFile(file, "#__VERSION__", b.version); err != nil {
return "", err
}
if err := util.ReplaceInFile(file, "#__REPO_PATH__", b.vendorRepoPath); err != nil {
return "", err
}
}
tarFile := filepath.Join(b.olaresRepoRoot, fmt.Sprintf("install-wizard-%s.tar.gz", versionStr))

View File

@@ -214,29 +214,6 @@ func (m *DeletePhaseFlagModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
BaseDir string
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCaches"
deleteCaches := &task.RemoteTask{
Name: "DeleteCaches",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Action: &DeleteCaches{
BaseDir: m.BaseDir,
},
Parallel: false,
Retry: 1,
}
m.Tasks = []task.Interface{
deleteCaches,
}
}
type DeleteUserDataModule struct {
common.KubeModule
}

View File

@@ -325,38 +325,6 @@ func (t *DeletePhaseFlagFile) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCaches struct {
common.KubeAction
BaseDir string
}
func (t *DeleteCaches) Execute(runtime connector.Runtime) error {
var cachesDirs []string
filepath.WalkDir(t.BaseDir, func(path string, d fs.DirEntry, err error) error {
if path != t.BaseDir {
if d.IsDir() {
cachesDirs = append(cachesDirs, path)
return filepath.SkipDir
}
}
return nil
},
)
if cachesDirs != nil && len(cachesDirs) > 0 {
for _, cachesDir := range cachesDirs {
if util.IsExist(cachesDir) {
if err := util.RemoveDir(cachesDir); err != nil {
logger.Errorf("remove %s failed %v", cachesDir, err)
}
}
}
}
return nil
}
type DeleteTerminusUserData struct {
common.KubeAction
}

View File

@@ -199,6 +199,23 @@ func (m *InstalledModule) Init() {
}
}
type DeleteUpgradeFilesModule struct {
common.KubeModule
}
func (d *DeleteUpgradeFilesModule) Init() {
d.Name = "DeleteUpgradeFiles"
deleteUpgradeFiles := &task.LocalTask{
Name: "DeleteUpgradeFiles",
Action: &DeleteUpgradeFiles{},
}
d.Tasks = []task.Interface{
deleteUpgradeFiles,
}
}
type DeleteWizardFilesModule struct {
common.KubeModule
}

View File

@@ -86,6 +86,12 @@ func (t *InstallOsSystem) Execute(runtime connector.Runtime) error {
// TODO: wait for the platform to be ready
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
var frameworkPath = path.Join(runtime.GetInstallerDir(), "wizard", "config", "os-framework")
if err := utils.UpgradeCharts(ctx, actionConfig, settings, common.ChartNameOSFramework, frameworkPath, "", common.NamespaceOsFramework, vals, false); err != nil {
return err

View File

@@ -296,6 +296,30 @@ func (t *InstallFinished) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteUpgradeFiles struct {
common.KubeAction
}
func (d *DeleteUpgradeFiles) Execute(runtime connector.Runtime) error {
baseDir := runtime.GetBaseDir()
files, err := os.ReadDir(baseDir)
if err != nil {
return errors.Wrapf(err, "failed to read directory %s", baseDir)
}
for _, file := range files {
if strings.HasPrefix(file.Name(), "upgrade.") {
filePath := path.Join(baseDir, file.Name())
if err := os.RemoveAll(filePath); err != nil && !os.IsNotExist(err) {
logger.Warnf("failed to delete %s: %v", filePath, err)
}
}
}
return nil
}
type DeleteWizardFiles struct {
common.KubeAction
}
@@ -453,14 +477,21 @@ func (a *DeletePodsUsingHostIP) Execute(runtime connector.Runtime) error {
if err != nil {
return errors.Wrap(err, "failed to get pods using host IP")
}
a.PipelineCache.Set(common.CacheCountPodsUsingHostIP, len(targetPods))
var waitRecreationPodsCount int
for _, pod := range targetPods {
logger.Infof("restarting pod %s/%s that's using host IP", pod.Namespace, pod.Name)
err = kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
if err != nil && !kerrors.IsNotFound(err) {
return errors.Wrap(err, "failed to delete pod")
}
// pods not created by any owner resource
// may not be recreated immediately and should not be waited
if len(pod.OwnerReferences) > 0 {
waitRecreationPodsCount++
}
}
a.PipelineCache.Set(common.CacheCountPodsWaitForRecreation, waitRecreationPodsCount)
// try our best to wait for the pods to be actually deleted
// to avoid the next module getting the pods with a still running phase
@@ -479,7 +510,7 @@ type WaitForPodsUsingHostIPRecreate struct {
}
func (a *WaitForPodsUsingHostIPRecreate) Execute(runtime connector.Runtime) error {
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsUsingHostIP)
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsWaitForRecreation)
if !ok {
return errors.New("failed to get the count of pods using host IP")
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/beclab/Olares/cli/pkg/common"
@@ -197,7 +198,7 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
return err
}
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsPlatform)
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
@@ -221,3 +222,67 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
}
return nil
}
type UpdateSysctlReservedPorts struct {
common.KubeAction
}
func (u *UpdateSysctlReservedPorts) Execute(runtime connector.Runtime) error {
const sysctlFile = "/etc/sysctl.conf"
const reservedPortsKey = "net.ipv4.ip_local_reserved_ports"
const expectedValue = "30000-32767,46800-50000"
content, err := os.ReadFile(sysctlFile)
if err != nil {
return fmt.Errorf("failed to read sysctl.conf: %v", err)
}
lines := strings.Split(string(content), "\n")
var foundKey bool
var needUpdate bool
var updatedLines []string
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
if strings.HasPrefix(trimmedLine, reservedPortsKey) {
foundKey = true
parts := strings.SplitN(trimmedLine, "=", 2)
if len(parts) == 2 {
currentValue := strings.TrimSpace(parts[1])
if currentValue != expectedValue {
logger.Infof("updating %s from %s to %s", reservedPortsKey, currentValue, expectedValue)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
}
if !foundKey {
logger.Infof("key %s not found in sysctl.conf, adding it", reservedPortsKey)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
}
if needUpdate {
updatedContent := strings.Join(updatedLines, "\n")
if err := os.WriteFile(sysctlFile, []byte(updatedContent), 0644); err != nil {
return fmt.Errorf("failed to write updated sysctl.conf: %v", err)
}
if _, err := runtime.GetRunner().SudoCmd("sysctl -p", false, false); err != nil {
return fmt.Errorf("failed to reload sysctl: %v", err)
}
logger.Infof("updated and reloaded sysctl configuration")
} else {
logger.Debugf("%s already has the expected value: %s", reservedPortsKey, expectedValue)
}
return nil
}

View File

@@ -18,7 +18,16 @@ type UpgradeModule struct {
}
var (
preTasks []*upgradeTask
preTasks = []*upgradeTask{
{
Task: &task.LocalTask{
Name: "UpdateSysctlReservedPorts",
Action: new(UpdateSysctlReservedPorts),
},
Current: &explicitVersionMatcher{max: semver.New(1, 12, 0, "20250701", "")},
Target: anyVersion,
},
}
coreTasks = []*upgradeTask{
{

View File

@@ -5,5 +5,5 @@ output:
-
id: olaresd
name: olaresd-v#__VERSION__.tar.gz
amd64: https://dc3p1870nn3cj.cloudfront.net/olaresd-v#__VERSION__-linux-amd64.tar.gz
arm64: https://dc3p1870nn3cj.cloudfront.net/olaresd-v#__VERSION__-linux-arm64.tar.gz
amd64: https://dc3p1870nn3cj.cloudfront.net#__REPO_PATH__olaresd-v#__VERSION__-linux-amd64.tar.gz
arm64: https://dc3p1870nn3cj.cloudfront.net#__REPO_PATH__olaresd-v#__VERSION__-linux-arm64.tar.gz

View File

@@ -1,4 +1,6 @@
current_dir := $(dir $(abspath $(firstword $(MAKEFILE_LIST))))
.PHONY: all tidy fmt vet build
all: tidy build
@@ -17,3 +19,11 @@ build: fmt vet ;$(info $(M)...Begin to build terminusd.) @
build-linux: fmt vet ;$(info $(M)...Begin to build terminusd (linux version).) @
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/olaresd cmd/terminusd/main.go
build-linux-in-docker:
docker run -it --platform linux/amd64 --rm \
-v $(current_dir):/olaresd \
-w /olaresd \
-e DEBIAN_FRONTEND=noninteractive \
golang:1.24 \
sh -c "apt-get -y update; apt-get -y install libudev-dev; make build-linux"

170
daemon/README.md Normal file
View File

@@ -0,0 +1,170 @@
# Olares System Daemon (`olaresd`)
`olaresd` is the foundational process that boots on every Olares node. It runs as a `systemd` service on port `18088`, exposing a secure REST API for hardware abstraction, network orchestration, storage management, and turnkey cluster operations—all before Kubernetes starts.
Olaresd is installed as a systemd service in `/etc/systemd/system/olaresd.service`.
## Key features
- **System monitoring**: Continuous health checks of cluster and node status.
- **Cluster lifecycle management**: Automated install, upgrade, IP-switching, restart, and maintenance operations.
- **Hardware Abstraction**: USB auto-mounting, storage provisioning, and management.
- **Network Management**: mDNS service discovery, WiFi onboarding, and network interface control.
## REST API reference
The daemon provides an authenticated REST API (using signature-based auth):
**Base URL**: `http://<node-ip>:18088`
### System commands `/command/`
**Lifecycle operations**
| Method | Endpoint | Description |
|--------|-----------------------------|------------------------------|
| POST | `/command/install` | Install Olares |
| POST | `/command/uninstall` | Uninstall Olares |
| POST | `/command/upgrade` | Upgrade Olares |
| DELETE | `/command/upgrade` | Cancel upgrade |
| POST | `/command/reboot` | Reboot node |
| POST | `/command/shutdown` | Shutdown node |
**Network configuration**
| Method | Endpoint | Description |
|--------|-----------------------------|------------------------------|
| POST | `/command/connect-wifi` | Connect to WiFi |
| POST | `/command/change-host` | Change Olares IP binding |
**Storage management**
| Method | Endpoint | Description |
|--------|-----------------------------------|------------------------------------|
| POST | `/command/mount-samba` | Mount SMB shares |
| POST | `/command/v2/mount-samba` | Enhanced SMB mounting |
| POST | `/command/umount-samba` | Unmount SMB shares |
| POST | `/command/umount-samba-incluster` | Cluster-wide SMB unmount |
| POST | `/command/umount-usb` | Unmount USB device |
| POST | `/command/umount-usb-incluster` | Cluster-wide USB unmount |
**System Maintenance**
| Method | Endpoint | Description |
|--------|-----------------------------|-------------------------------------|
| POST | `/command/collect-logs` | Collect system logs for diagnostics |
---
### System information (`/system/`)
**System status**
| Method | Endpoint | Description |
|--------|--------------------------|-----------------------------|
| GET | `/system/status` | Get full system status |
| GET | `/system/ifs` | List network interfaces |
| GET | `/system/hosts-file` | View `/etc/hosts` |
| POST | `/system/hosts-file` | Update `/etc/hosts` |
**Mount information**
| Method | Endpoint | Description |
|--------|---------------------------------|--------------------------------|
| GET | `/system/mounted-usb` | Mounted USB devices |
| GET | `/system/mounted-hdd` | Mounted hard drives |
| GET | `/system/mounted-smb` | Mounted SMB shares |
| GET | `/system/mounted-path` | All mount points |
**Cluster-wide mounts**
| Method | Endpoint | Description |
|--------|--------------------------------------|----------------------------------|
| GET | `/system/mounted-usb-incluster` | USB mounts in cluster |
| GET | `/system/mounted-hdd-incluster` | HDD mounts in cluster |
| GET | `/system/mounted-smb-incluster` | SMB mounts in cluster |
| GET | `/system/mounted-path-incluster` | All cluster mounts |
---
### Container management (`/containerd/`)
**Registry Management**
| Method | Endpoint | Description |
|--------|-------------------------------------------|-------------------------------------|
| GET | `/containerd/registries` | List registries |
| GET | `/containerd/registry/mirrors/` | List registry mirrors |
| GET | `/containerd/registry/mirrors/:registry` | Get specific registry mirror |
| PUT | `/containerd/registry/mirrors/:registry` | Update registry mirror |
| DELETE | `/containerd/registry/mirrors/:registry` | Delete registry mirror |
**Image Management**
| Method | Endpoint | Description |
|--------|----------------------------------|--------------------------------|
| GET | `/containerd/images/` | List container images |
| DELETE | `/containerd/images/:image` | Delete specific image |
| POST | `/containerd/images/prune` | Remove unused images |
## Build from source
### Prerequisites
* Go 1.24+
* GoReleaser (Optional, for creating release artifacts)
### Steps
1. **Navigate to the daemon directory:**
```bash
cd daemon
```
2. **Build for your host OS/architecture:**
```bash
go build -o olaresd ./cmd/olaresd/main.go
```
3. **Cross-compile for another target (e.g., Linux AMD64):**
```bash
GOOS=linux GOARCH=amd64 go build -o olaresd ./cmd/olaresd/main.go
```
4. **Produce release artifacts (optional):**
```bash
goreleaser release --snapshot --clean
```
## Extend `olaresd`
To add a new command API:
1. **Define command**: Add a new command struct in `pkg/commands/`.
2. **Implement handler**: Create the corresponding HTTP handler logic in `internal/apiserver/handlers/`.
3. **Register route**: Register the new API route in `internal/apiserver/server.go`.
4. **Update state**: If the command modifies the cluster's state, ensure you update the logic in `pkg/cluster/state/`.
5. **Validate**: Run `go vet ./... && go test ./...` to check for issues and ensure all tests pass before opening a pull request.
### Test a custom build
1. Copy the binary to your Olares node.
2. On the node, replace the existing binary:
```bash
# Move the new binary into place
sudo cp -f /tmp/olaresd /usr/local/bin/
3. Restart the daemon to apply changes:
```
sudo systemctl restart olaresd
```

View File

@@ -14,6 +14,7 @@ import (
"github.com/beclab/Olares/daemon/internel/ble"
"github.com/beclab/Olares/daemon/internel/mdns"
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/internel/watcher/cert"
"github.com/beclab/Olares/daemon/internel/watcher/system"
"github.com/beclab/Olares/daemon/internel/watcher/upgrade"
"github.com/beclab/Olares/daemon/internel/watcher/usb"
@@ -96,6 +97,7 @@ func main() {
// usb.NewUsbWatcher(),
usb.NewUmountWatcher(),
upgrade.NewUpgradeWatcher(),
cert.NewCertWatcher(),
}, func() {
if s != nil {
if err := s.Restart(); err != nil {

View File

@@ -6,6 +6,7 @@ toolchain go1.24.4
replace (
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.2.33
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
bytetrade.io/web3os/bfl => github.com/beclab/bfl v0.3.36
k8s.io/api => k8s.io/api v0.31.0
k8s.io/apimachinery => k8s.io/apimachinery v0.31.0
@@ -79,7 +80,6 @@ require (
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -123,7 +123,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
@@ -134,9 +134,9 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/runc v1.1.13 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opencontainers/runc v1.3.0 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/selinux v1.11.1 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect

View File

@@ -63,8 +63,6 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -120,7 +118,6 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
@@ -233,8 +230,8 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
@@ -273,12 +270,12 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs=
github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
@@ -440,7 +437,6 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -80,22 +80,32 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
}
}
if test == "true" {
r.InternetConnected = ptr.To(utils.CheckInterfaceIPv4Connectivity(ctx.Context(), i.Iface.Name))
devices, err := utils.GetAllDevice(ctx.Context())
if err != nil {
klog.Error("get all devices error, ", err)
return h.ErrJSON(ctx, http.StatusServiceUnavailable, err.Error())
}
devices, err := utils.GetAllDevice(ctx.Context())
if err != nil {
klog.Error("get all devices error, ", err)
return h.ErrJSON(ctx, http.StatusServiceUnavailable, err.Error())
}
if d, ok := devices[r.Iface]; ok {
r.Ipv4Gateway = &d.Ipv4Gateway
r.Ipv6Gateway = &d.Ipv6Gateway
r.Ipv4DNS = &d.Ipv4DNS
r.Ipv6DNS = &d.Ipv6DNS
r.Ipv6Address = &d.Ipv6Address
r.Ipv4Mask = &d.Ipv4Mask
r.Method = &d.Method
if d, ok := devices[r.Iface]; ok {
r.Ipv4Gateway = &d.Ipv4Gateway
r.Ipv6Gateway = &d.Ipv6Gateway
r.Ipv4DNS = &d.Ipv4DNS
r.Ipv6DNS = &d.Ipv6DNS
r.Ipv6Address = &d.Ipv6Address
r.Ipv4Mask = &d.Ipv4Mask
r.Method = &d.Method
}
if rx, tx, err := utils.GetInterfaceTraffic(r.Iface); err == nil {
r.RxRate = ptr.To(rx)
r.TxRate = ptr.To(tx)
} else {
klog.Error("get interface rx/tx rate error, ", err)
}
if test == "true" {
if r.IP != "" {
r.InternetConnected = ptr.To(utils.CheckInterfaceIPv4Connectivity(ctx.Context(), i.Iface.Name))
}
if r.Ipv6Address != nil && *r.Ipv6Address != "" {
@@ -104,12 +114,6 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
r.Ipv6Connectivity = &connected
}
if rx, tx, err := utils.GetInterfaceTraffic(r.Iface); err == nil {
r.RxRate = ptr.To(rx)
r.TxRate = ptr.To(tx)
} else {
klog.Error("get interface rx/tx rate error, ", err)
}
}
res = append(res, r)

View File

@@ -8,12 +8,14 @@ import (
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/beclab/Olares/daemon/pkg/commands/upgrade"
"github.com/gofiber/fiber/v2"
"k8s.io/klog/v2"
)
type UpgradeReq struct {
Version string `json:"version"`
Version string `json:"version"`
DownloadOnly bool `json:"downloadOnly,omitempty"` // false means download-and-upgrade
}
func (r *UpgradeReq) Check() error {
@@ -43,10 +45,18 @@ func (h *handlers) RequestOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface)
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
if _, err := cmd.Execute(ctx.Context(), req.Version); err != nil {
upgradeReq := upgrade.UpgradeRequest{
Version: req.Version,
DownloadOnly: req.DownloadOnly,
}
if _, err := cmd.Execute(ctx.Context(), upgradeReq); err != nil {
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
if req.DownloadOnly {
return h.OkJSON(ctx, "successfully created download target")
}
return h.OkJSON(ctx, "successfully created upgrade target")
}
@@ -55,5 +65,5 @@ func (h *handlers) CancelOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface) e
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
return h.OkJSON(ctx, "successfully removed upgrade target")
return h.OkJSON(ctx, "successfully cancelled upgrade/download")
}

View File

@@ -50,10 +50,10 @@ func (s *server) Start() error {
cmd.Post("/upgrade", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.RequestOlaresUpgrade, upgrade.NewCreateTarget))))
s.handlers.RunCommand(s.handlers.RequestOlaresUpgrade, upgrade.NewCreateUpgradeTarget))))
cmd.Delete("/upgrade", s.handlers.RequireSignature(
s.handlers.RunCommand(s.handlers.CancelOlaresUpgrade, upgrade.NewRemoveTarget)))
s.handlers.RunCommand(s.handlers.CancelOlaresUpgrade, upgrade.NewRemoveUpgradeTarget)))
cmd.Post("/reboot", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(

View File

@@ -43,6 +43,7 @@ func (s *server) Close() {
if s.server != nil {
klog.Info("mDNS server shutdown ")
s.server.Shutdown()
s.registeredIP = "" // clear the registered IP
}
}
@@ -88,11 +89,11 @@ func (s *server) Restart() error {
}
if s.registeredIP != ip {
s.registeredIP = ip
if s.server != nil {
s.Close()
}
s.registeredIP = ip
instanceName := s.name
if instanceName == "" {
instanceName = hostname

View File

@@ -0,0 +1,141 @@
package cert
import (
"context"
"fmt"
"time"
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/utils"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
kubeErr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
)
var _ watcher.Watcher = &userCertWatcher{}
type userCertWatcher struct {
}
func NewCertWatcher() *userCertWatcher {
return &userCertWatcher{}
}
// Watch implements watcher.Watcher.
func (u *userCertWatcher) Watch(ctx context.Context) {
if state.CurrentState.TerminusState != state.TerminusRunning {
return
}
kubeClient, err := utils.GetKubeClient()
if err != nil {
klog.Error("failed to get kube client, ", err)
return
}
dynamicClient, err := utils.GetDynamicClient()
if err != nil {
klog.Error("failed to get dynamic client, ", err)
return
}
users, err := utils.ListUsers(ctx, dynamicClient)
if err != nil {
klog.Error("failed to list users, ", err)
return
}
for _, user := range users {
namespace := fmt.Sprintf("user-space-%s", user.GetName())
config, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, "zone-ssl-config", metav1.GetOptions{})
if err != nil {
klog.Error("failed to get user config map, ", err, ", namespace: ", namespace)
continue
}
if expired, ok := config.Data["expired_at"]; ok {
expiredTime, err := time.Parse("2006-01-02T15:04:05Z", expired)
if err != nil {
klog.Error("failed to parse expired_at, ", err)
continue
}
// Check if the certificate will expire within 10 days
if expiredTime.Before(time.Now().Add(10 * 24 * time.Hour)) {
klog.Info("user cert expired, ", user.GetName())
err = createOrUpdateJob(ctx, kubeClient, namespace)
if err != nil {
klog.Error("failed to create or update job for user cert, ", err, ", namespace: ", namespace)
} else {
klog.Info("job created for user cert download, ", user.GetName(), ", namespace: ", namespace)
}
}
}
}
}
func createOrUpdateJob(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error {
currentJob, err := kubeClient.BatchV1().Jobs(namespace).Get(ctx, jobDownloadUserCert.Name, metav1.GetOptions{})
if err != nil {
if kubeErr.IsNotFound(err) {
// Create the job if it does not exist
} else {
return fmt.Errorf("failed to get job: %w", err)
}
} else {
// check the existing job
if currentJob.Status.Succeeded == 0 || currentJob.Status.Failed > 0 {
klog.Info("job is still running, skip creating a new one")
return nil
}
// If the job exists and has completed, delete it before creating a new one
klog.Info("delete existing job: ", currentJob.Name)
err = kubeClient.BatchV1().Jobs(namespace).Delete(ctx, currentJob.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete job: %w", err)
}
}
job := jobDownloadUserCert.DeepCopy()
job.Namespace = namespace
_, err = kubeClient.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create job: %w", err)
}
klog.Info("Job created: ", job.Name)
return nil
}
var jobDownloadUserCert = batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "download-user-cert",
},
Spec: batchv1.JobSpec{
BackoffLimit: ptr.To[int32](5),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "download-user-cert",
Image: "busybox:1.28",
Command: []string{"wget",
"--header",
"X-FROM-CRONJOB: true",
"-qSO -",
"http://bfl.user-space-pengpeng9/bfl/backend/v1/re-download-cert",
},
},
},
},
},
},
}

View File

@@ -3,10 +3,13 @@ package upgrade
import (
"context"
"fmt"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/daemon/pkg/utils"
"math"
"os"
"path/filepath"
"sync"
"time"
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
@@ -19,6 +22,9 @@ type upgradeWatcher struct {
watcher.Watcher
sync.Mutex
upgrading bool
// Internal retry state
retryCount int
nextRetryTime *time.Time
}
func NewUpgradeWatcher() watcher.Watcher {
@@ -27,19 +33,72 @@ func NewUpgradeWatcher() watcher.Watcher {
}
func (w *upgradeWatcher) Watch(ctx context.Context) {
switch state.CurrentState.TerminusState {
// indicates an upgrade target exists
case state.Upgrading:
// if the upgrade process is running, just wait for it to finish
if !w.isUpgrading() {
go func() {
w.startUpgrading()
defer w.stopUpgrading()
if err := doUpgrade(ctx); err != nil {
klog.Errorf("upgrading error: %v", err)
}
}()
targetVersion, err := state.GetOlaresUpgradeTarget()
if err != nil {
klog.Errorf("failed to check upgrade target: %v", err)
return
}
if targetVersion == nil {
w.resetRetryState()
state.TerminusStateMu.Lock()
state.CurrentState.UpgradingState = ""
state.CurrentState.UpgradingTarget = ""
state.CurrentState.UpgradingRetryNum = 0
state.CurrentState.UpgradingNextRetryAt = nil
state.CurrentState.UpgradingStep = ""
state.CurrentState.UpgradingProgressNum = 0
state.CurrentState.UpgradingProgress = ""
state.CurrentState.UpgradingError = ""
state.CurrentState.UpgradingDownloadState = ""
state.CurrentState.UpgradingDownloadStep = ""
state.CurrentState.UpgradingDownloadProgressNum = 0
state.CurrentState.UpgradingDownloadProgress = ""
state.CurrentState.UpgradingDownloadError = ""
state.TerminusStateMu.Unlock()
return
}
dynamicClient, err := utils.GetDynamicClient()
if err != nil {
return
}
currentVersionStr, err := utils.GetTerminusVersion(ctx, dynamicClient)
if err != nil {
klog.Error("failed to get current version, skip upgrading check: ", err)
return
}
if currentVersionStr == nil {
klog.Error("current version is nil, skip upgrading check")
return
}
currentVersion, err := semver.NewVersion(*currentVersionStr)
if err != nil || currentVersion.LessThan(targetVersion) {
state.CurrentState.UpgradingTarget = targetVersion.Original()
} else {
err = upgrade.RemoveUpgradeFiles()
if err != nil {
klog.Error("failed to remove upgrade files: ", err)
}
return
}
if !w.isUpgrading() {
if !w.isTimeToRetry() {
return
}
go func() {
w.startUpgrading()
defer w.stopUpgrading()
if err := w.doUpgradeWithRetry(ctx); err != nil {
klog.Errorf("upgrading error: %v", err)
}
}()
}
}
@@ -61,52 +120,195 @@ func (w *upgradeWatcher) stopUpgrading() {
w.upgrading = false
}
func (w *upgradeWatcher) isTimeToRetry() bool {
w.Lock()
defer w.Unlock()
if w.nextRetryTime == nil {
return true
}
now := time.Now()
if now.Before(*w.nextRetryTime) {
klog.V(2).Infof("upgrade retry scheduled for %v (in %v)",
*w.nextRetryTime,
w.nextRetryTime.Sub(now))
return false
}
return true
}
func (w *upgradeWatcher) resetRetryState() {
w.Lock()
defer w.Unlock()
w.retryCount = 0
w.nextRetryTime = nil
}
func (w *upgradeWatcher) incrementRetry() {
w.Lock()
defer w.Unlock()
w.retryCount++
nextRetry := state.CalculateNextRetryTime(w.retryCount)
w.nextRetryTime = &nextRetry
}
func (w *upgradeWatcher) getRetryCount() int {
w.Lock()
defer w.Unlock()
return w.retryCount
}
func (w *upgradeWatcher) doUpgradeWithRetry(ctx context.Context) error {
err := doUpgrade(ctx)
if err != nil {
w.incrementRetry()
state.CurrentState.UpgradingRetryNum = w.getRetryCount()
state.CurrentState.UpgradingNextRetryAt = w.nextRetryTime
klog.Errorf("upgrade attempt %d failed: %v. Next retry scheduled for %v",
w.getRetryCount(), err, *w.nextRetryTime)
targetVersionDir := filepath.Join(commands.TERMINUS_BASE_DIR, "versions", "v"+state.CurrentState.UpgradingTarget)
prepareLogFile := filepath.Join(targetVersionDir, "install.log")
upgradeLogFile := filepath.Join(targetVersionDir, "upgrade.log")
for _, logFile := range []string{prepareLogFile, upgradeLogFile} {
if err := os.Remove(logFile); err != nil && !os.IsNotExist(err) {
klog.Errorf("failed to clear log file %s: %v", logFile, err)
}
}
}
return err
}
type upgradePhase struct {
newCMD func() commands.Interface
progressOffset int
progressSpan int
}
// todo: add a phase to upgrade olares-cli after the version of olares-cli and olares has been unified
var downloadPhases = []upgradePhase{
{upgrade.NewDownloadCLI, 0, 10},
{upgrade.NewDownloadWizard, 10, 20},
{upgrade.NewDownloadComponent, 30, 40},
}
var upgradePhases = []upgradePhase{
{upgrade.NewUpgradeCli, 0, 5},
{upgrade.NewDownloadWizard, 5, 10},
{upgrade.NewVersionCompatibilityCheck, 15, 0},
{upgrade.NewHealthCheck, 15, 0},
{upgrade.NewDownloadComponent, 15, 30},
{upgrade.NewPrepareImages, 45, 30},
{upgrade.NewPrepareOlaresd, 75, 5},
{upgrade.NewUpgrade, 80, 15},
{upgrade.NewVersionCompatibilityCheck, 0, 5},
{upgrade.NewHealthCheck, 5, 5},
{upgrade.NewInstallCLI, 10, 10},
{upgrade.NewImportImages, 20, 30},
{upgrade.NewInstallOlaresd, 50, 10},
{upgrade.NewUpgrade, 60, 35},
{upgrade.NewRemoveTarget, 95, 5},
}
func doUpgrade(ctx context.Context) (err error) {
downloadCompleted, err := state.IsUpgradeDownloadCompleted()
if err != nil {
return fmt.Errorf("failed to check download status: %v", err)
}
if !downloadCompleted {
// Execute download phases
if err := doDownloadPhases(ctx); err != nil {
return err
}
} else {
klog.Info("download already completed, skipping download phases")
state.CurrentState.UpgradingDownloadState = state.Completed
state.CurrentState.UpgradingDownloadProgress = "100%"
state.CurrentState.UpgradingDownloadProgressNum = 100
}
downloadOnly, err := state.IsUpgradeDownloadOnly()
if err != nil {
return fmt.Errorf("failed to check download-only status: %v", err)
}
if downloadOnly {
state.CurrentState.UpgradingState = "WaitingForUserConfirm"
klog.Info("download completed, waiting for user request to remove upgrade.downloadonly file to proceed with upgrade")
return nil
}
return doUpgradePhases(ctx)
}
func doDownloadPhases(ctx context.Context) (err error) {
defer func() {
if err != nil {
state.CurrentState.UpgradingDownloadState = state.Failed
state.CurrentState.UpgradingDownloadError = err.Error()
klog.Errorf("download phases failed: %v", err)
} else {
state.CurrentState.UpgradingDownloadState = state.Completed
state.CurrentState.UpgradingDownloadError = ""
if err := createUpgradeDownloadedFile(); err != nil {
klog.Errorf("failed to create upgrade.downloaded file: %v", err)
}
klog.Info("download phases completed successfully")
}
}()
state.CurrentState.UpgradingDownloadState = state.InProgress
state.CurrentState.UpgradingDownloadError = ""
for _, phase := range downloadPhases {
phaseCMD := phase.newCMD()
state.CurrentState.UpgradingDownloadStep = string(phaseCMD.OperationName())
res, err := phaseCMD.Execute(ctx, state.CurrentState.UpgradingTarget)
if err != nil {
return fmt.Errorf("error: download phase %s: %v", phaseCMD.OperationName(), err)
}
executionRes, ok := res.(upgrade.ExecutionRes)
if !ok {
return fmt.Errorf("unexpected result type for download phase %s", phaseCMD.OperationName())
}
if executionRes.Finished() {
continue
}
var phaseProgress int
for phaseProgress < 100 {
select {
case <-ctx.Done():
return nil
case p, ok := <-executionRes.Progress():
if !ok {
if phaseProgress != commands.ProgressNumFinished {
return fmt.Errorf("error: download phase %s: command execution did not succeed", phaseCMD.OperationName())
}
} else if p > phaseProgress {
klog.Infof("refreshing download phase %s, progress: %d", phaseCMD.OperationName(), phaseProgress)
phaseProgress = p
}
}
refreshDownloadProgressFromPhase(phase, phaseProgress)
}
}
return nil
}
func doUpgradePhases(ctx context.Context) (err error) {
defer func() {
if err != nil {
state.CurrentState.UpgradingState = state.Failed
state.CurrentState.UpgradingError = err.Error()
// clear logs after every failed attempt
// in case any under layer change that bypassed olaresd, e.g., manual removal of files
// is causing the upgrade retry to stuck forever
targetVersionDir := filepath.Join(commands.TERMINUS_BASE_DIR, "versions", "v"+state.CurrentState.UpgradingTarget)
prepareLogFile := filepath.Join(targetVersionDir, "install.log")
upgradeLogFile := filepath.Join(targetVersionDir, "upgrade.log")
for _, logFile := range []string{prepareLogFile, upgradeLogFile} {
if err := os.Remove(logFile); err != nil && !os.IsNotExist(err) {
klog.Errorf("failed to clear log file %s of current upgrade attempt (%d): %v", logFile, state.CurrentState.UpgradingRetryNum, err)
}
}
}
}()
state.CurrentState.UpgradingState = state.InProgress
state.CurrentState.UpgradingError = ""
state.CurrentState.UpgradingRetryNum += 1
state.StateTrigger <- struct{}{}
for _, phase := range upgradePhases {
phaseCMD := phase.newCMD()
state.CurrentState.UpgradingStep = string(phaseCMD.OperationName())
res, err := phaseCMD.Execute(ctx, state.CurrentState.UpgradingTarget)
if err != nil {
return fmt.Errorf("error: upgrade phase %s: %v", phaseCMD.OperationName(), err)
@@ -116,9 +318,6 @@ func doUpgrade(ctx context.Context) (err error) {
return fmt.Errorf("unexpected result type for upgrade phase %s", phaseCMD.OperationName())
}
if executionRes.Finished() {
// for now, do not update progress here
// as it may revert back the progress
// todo: if the retry num will be presented by the frontend to user, maybe we can update progress here
continue
}
var phaseProgress int
@@ -127,7 +326,6 @@ func doUpgrade(ctx context.Context) (err error) {
case <-ctx.Done():
return nil
case p, ok := <-executionRes.Progress():
// the command completed and the progress channel is closed
if !ok {
if phaseProgress != commands.ProgressNumFinished {
return fmt.Errorf("error: upgrade phase %s: command execution did not succeed", phaseCMD.OperationName())
@@ -140,8 +338,6 @@ func doUpgrade(ctx context.Context) (err error) {
refreshUpgradeProgressFromPhase(phase, phaseProgress)
}
}
// if the upgrade succeeded, the upgrade target will be removed
// and the upgrade status cleared
return nil
}
@@ -154,3 +350,17 @@ func refreshUpgradeProgressFromPhase(phase upgradePhase, phaseProgress int) {
state.CurrentState.UpgradingProgressNum = newProgress
state.CurrentState.UpgradingProgress = fmt.Sprintf("%d%%", state.CurrentState.UpgradingProgressNum)
}
func refreshDownloadProgressFromPhase(phase upgradePhase, phaseProgress int) {
spanProgress := math.Min(float64(phaseProgress)*float64(phase.progressSpan)/float64(commands.ProgressNumFinished), float64(phase.progressSpan))
newProgress := phase.progressOffset + int(math.Round(spanProgress))
if state.CurrentState.UpgradingDownloadProgressNum >= newProgress {
return
}
state.CurrentState.UpgradingDownloadProgressNum = newProgress
state.CurrentState.UpgradingDownloadProgress = fmt.Sprintf("%d%%", state.CurrentState.UpgradingDownloadProgressNum)
}
func createUpgradeDownloadedFile() error {
return os.WriteFile(commands.UPGRADE_DOWNLOADED_FILE, []byte(""), 0644)
}

View File

@@ -16,7 +16,6 @@ import (
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"github.com/Masterminds/semver/v3"
cpu "github.com/klauspost/cpuid/v2"
"github.com/pbnjay/memory"
)
@@ -59,12 +58,19 @@ type state struct {
UninstallingProgressNum int `json:"-"`
UpgradingTarget string `json:"upgradingTarget"`
UpgradingRetryNum int `json:"upgradingRetryNum"`
UpgradingNextRetryAt *time.Time `json:"upgradingNextRetryAt,omitempty"`
UpgradingState ProcessingState `json:"upgradingState"`
UpgradingStep string `json:"upgradingStep"`
UpgradingProgress string `json:"upgradingProgress"`
UpgradingProgressNum int `json:"-"`
UpgradingError string `json:"upgradingError"`
UpgradingDownloadState ProcessingState `json:"upgradingDownloadState"`
UpgradingDownloadStep string `json:"upgradingDownloadStep"`
UpgradingDownloadProgress string `json:"upgradingDownloadProgress"`
UpgradingDownloadProgressNum int `json:"-"`
UpgradingDownloadError string `json:"upgradingDownloadError"`
CollectingLogsState ProcessingState `json:"collectingLogsState"`
CollectingLogsError string `json:"collectingLogsError"`
@@ -97,6 +103,13 @@ func bToGb(b uint64) string {
func CheckCurrentStatus(ctx context.Context) error {
TerminusStateMu.Lock()
name, err := utils.GetOlaresNameFromReleaseFile()
if err != nil {
klog.Error("get olares name from release file error, ", err)
} else {
CurrentState.TerminusName = &name
}
var currentTerminusState TerminusState = CurrentState.TerminusState
defer func() {
CurrentState.TerminusState = currentTerminusState
@@ -151,6 +164,7 @@ func CheckCurrentStatus(ctx context.Context) error {
// get network info
ips, err := nets.GetInternalIpv4Addr()
if err != nil {
currentTerminusState = NetworkNotReady
return err
}
@@ -293,7 +307,6 @@ func CheckCurrentStatus(ctx context.Context) error {
currentTerminusState = NotInstalled
CurrentState.InstallingProgress = ""
CurrentState.InstallingState = ""
CurrentState.TerminusName = nil
CurrentState.InstalledTime = nil
CurrentState.InitializedTime = nil
@@ -332,33 +345,24 @@ func CheckCurrentStatus(ctx context.Context) error {
}
}
targetVersion, err := GetOlaresUpgradeTarget()
// only set system state to Upgrading if actual upgrade should be in progress
// (not during download phase)
upgradeTarget, err := GetOlaresUpgradeTarget()
if err != nil {
return err
return fmt.Errorf("error getting Olares upgrade target: %v", err.Error())
}
if targetVersion != nil {
// get current version and compare with target version
currentVersionStr, err := utils.GetTerminusVersion(ctx, dynamicClient)
if err != nil {
klog.Error("failed to get current version: ", err)
return err
}
currentVersion, err := semver.NewVersion(*currentVersionStr)
if err != nil || currentVersion.LessThan(targetVersion) {
CurrentState.UpgradingTarget = targetVersion.Original()
currentTerminusState = Upgrading
return nil
}
upgradeDownloadCompleted, err := IsUpgradeDownloadCompleted()
if err != nil {
return fmt.Errorf("error checking if upgrade download completed: %v", err.Error())
}
upgradeDownloadOnly, err := IsUpgradeDownloadOnly()
if err != nil {
return fmt.Errorf("error checking if upgrade download only: %v", err.Error())
}
if upgradeTarget != nil && upgradeDownloadCompleted && !upgradeDownloadOnly {
currentTerminusState = Upgrading
return nil
}
// not upgrading, reset upgrading status
CurrentState.UpgradingState = ""
CurrentState.UpgradingTarget = ""
CurrentState.UpgradingRetryNum = 0
CurrentState.UpgradingStep = ""
CurrentState.UpgradingProgressNum = 0
CurrentState.UpgradingProgress = ""
CurrentState.UpgradingError = ""
if tmsrunning, err := utils.IsTerminusRunning(ctx, kubeClient); err != nil {
currentTerminusState = SystemError

View File

@@ -0,0 +1,41 @@
package state
import (
"math"
"math/rand"
"time"
)
const (
retryBaseDelay = 5 * time.Second
retryMaxDelay = 10 * time.Minute
retryBackoffFactor = 2.0
)
func calculateNextRetryDelay(retryNum int) time.Duration {
backoffDelay := float64(retryBaseDelay) * math.Pow(retryBackoffFactor, float64(retryNum))
if backoffDelay > float64(retryMaxDelay) {
backoffDelay = float64(retryMaxDelay)
}
delay := time.Duration(backoffDelay)
jitter := float64(delay) * 0.25 * (rand.Float64()*2 - 1)
delay = time.Duration(float64(delay) + jitter)
if delay < 0 {
delay = retryBaseDelay
}
return delay
}
func calculateNextRetryTime(retryNum int) time.Time {
delay := calculateNextRetryDelay(retryNum)
return time.Now().Add(delay)
}
func CalculateNextRetryTime(retryNum int) time.Time {
return calculateNextRetryTime(retryNum)
}

View File

@@ -37,6 +37,7 @@ const (
Shutdown TerminusState = "shutdown"
Restarting TerminusState = "restarting"
Checking TerminusState = "checking"
NetworkNotReady TerminusState = "network-not-ready"
)
func (s TerminusState) String() string {

View File

@@ -88,6 +88,10 @@ func isProcessRunning(pidfile string) (bool, error) {
return false, err
}
if len(strings.TrimSpace(string(pidData))) == 0 {
return false, nil
}
pid, err := strconv.Atoi(string(pidData))
if err != nil {
return false, err
@@ -143,6 +147,28 @@ func GetOlaresUpgradeTarget() (*semver.Version, error) {
return version, nil
}
func IsUpgradeDownloadOnly() (bool, error) {
_, err := os.Stat(commands.UPGRADE_DOWNLOADONLY_FILE)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
func IsUpgradeDownloadCompleted() (bool, error) {
_, err := os.Stat(commands.UPGRADE_DOWNLOADED_FILE)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
func IsIpChangeRunning() (bool, error) {
running, err := isProcessRunning(commands.CHANGINGIP_PID_FILE)
if err != nil {

View File

@@ -63,6 +63,11 @@ func (i *collectLogs) Execute(ctx context.Context, p any) (res any, err error) {
return
}
if adminUser == nil {
errStr = "admin user not found"
return
}
hostPath, err := utils.GetUserspacePvcHostPath(ctx, adminUser.GetName(), kubeClient)
if err != nil {
errStr = fmt.Sprintf("get admin user host path error, %v", err)

View File

@@ -16,20 +16,22 @@ var (
COMMAND_BASE_DIR = "" // deprecated shell command base dir
CDN_URL = "https://dc3p1870nn3cj.cloudfront.net"
OS_ROOT_DIR = "/olares"
INSTALLING_PID_FILE = "installing.pid"
UNINSTALLING_PID_FILE = "uninstalling.pid"
CHANGINGIP_PID_FILE = "changingip.pid"
UPGRADE_TARGET_FILE = "upgrade.target"
PREV_IP_TO_CHANGE_FILE = ".prev_ip"
PREV_IP_CHANGE_FAILED = ".ip_change_failed"
INSTALL_LOCK = ".installed"
LOG_FILE = "install.log"
TERMINUS_BASE_DIR = ""
MOUNT_BASE_DIR = path.Join(OS_ROOT_DIR, "share")
PREPARE_LOCK = ".prepared"
REDIS_CONF = OS_ROOT_DIR + "/data/redis/etc/redis.conf"
EXPORT_POD_LOGS_DIR = "Home/pod_logs"
OS_ROOT_DIR = "/olares"
INSTALLING_PID_FILE = "installing.pid"
UNINSTALLING_PID_FILE = "uninstalling.pid"
CHANGINGIP_PID_FILE = "changingip.pid"
UPGRADE_TARGET_FILE = "upgrade.target"
UPGRADE_DOWNLOADONLY_FILE = "upgrade.downloadonly"
UPGRADE_DOWNLOADED_FILE = "upgrade.downloaded"
PREV_IP_TO_CHANGE_FILE = ".prev_ip"
PREV_IP_CHANGE_FAILED = ".ip_change_failed"
INSTALL_LOCK = ".installed"
LOG_FILE = "install.log"
TERMINUS_BASE_DIR = ""
MOUNT_BASE_DIR = path.Join(OS_ROOT_DIR, "share")
PREPARE_LOCK = ".prepared"
REDIS_CONF = OS_ROOT_DIR + "/data/redis/etc/redis.conf"
EXPORT_POD_LOGS_DIR = "Home/pod_logs"
ProgressNumFinished = 100
)
@@ -45,6 +47,8 @@ func Init() {
UNINSTALLING_PID_FILE = filepath.Join(baseDir, UNINSTALLING_PID_FILE)
CHANGINGIP_PID_FILE = filepath.Join(baseDir, CHANGINGIP_PID_FILE)
UPGRADE_TARGET_FILE = filepath.Join(baseDir, UPGRADE_TARGET_FILE)
UPGRADE_DOWNLOADONLY_FILE = filepath.Join(baseDir, UPGRADE_DOWNLOADONLY_FILE)
UPGRADE_DOWNLOADED_FILE = filepath.Join(baseDir, UPGRADE_DOWNLOADED_FILE)
INSTALL_LOCK = filepath.Join(baseDir, INSTALL_LOCK)
PREPARE_LOCK = filepath.Join(baseDir, PREPARE_LOCK)
PREV_IP_TO_CHANGE_FILE = filepath.Join(baseDir, PREV_IP_TO_CHANGE_FILE)

View File

@@ -19,14 +19,15 @@ const (
Uninstall Operations = "uninstall"
CreateUpgradeTarget Operations = "createUpgradeTarget"
RemoveUpgradeTarget Operations = "removeUpgradeTarget"
DownloadCLI Operations = "downloadCLI"
DownloadWizard Operations = "downloadWizard"
VersionCompatibilityCheck Operations = "versionCompatibilityCheck"
UpgradeHealthCheck Operations = "upgradeHealthCheck"
DownloadComponent Operations = "downloadComponent"
PrepareImages Operations = "prepareImages"
PrepareOlaresd Operations = "prepareOlaresd"
ImportImages Operations = "importImages"
InstallOlaresd Operations = "installOlaresd"
Upgrade Operations = "upgrade"
UpgradeCli Operations = "upgradeCli"
InstallCLI Operations = "installCLI"
Reboot Operations = "reboot"
Shutdown Operations = "shutdown"
ConnectWifi Operations = "connectWifi"

View File

@@ -15,6 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/klog/v2"
)
type versionCompatibilityCheck struct {
@@ -91,6 +92,19 @@ func NewHealthCheck() commands.Interface {
}
func (i *healthCheck) Execute(ctx context.Context, _ any) (res any, err error) {
klog.Info("Starting upgrade health check")
const minAvailableSpace = 100 * 1024 * 1024 * 1024 // 100GB in bytes
availableSpace, err := utils.GetDiskAvailableSpace("/")
if err != nil {
return nil, fmt.Errorf("error checking disk space: %s", err)
}
klog.Infof("Root partition available space: %.2fGB", float64(availableSpace)/(1024*1024*1024))
if availableSpace < minAvailableSpace {
return nil, fmt.Errorf("insufficient disk space: %.2fGB available, minimum 100GB required",
float64(availableSpace)/(1024*1024*1024))
}
client, err := utils.GetKubeClient()
if err != nil {
return nil, fmt.Errorf("error getting kubernetes client: %s", err)
@@ -132,5 +146,33 @@ func (i *healthCheck) Execute(ctx context.Context, _ any) (res any, err error) {
}
}
criticalNamespaces := []string{"os-platform", "os-framework"}
for _, namespace := range criticalNamespaces {
pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("error listing pods in namespace %s: %s", namespace, err)
}
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodSucceeded {
continue
}
podStatus := utils.GetPodStatus(&pod)
if podStatus != "Running" && podStatus != "Completed" {
klog.Errorf("Pod %s/%s is not healthy: %s", namespace, pod.Name, podStatus)
return nil, fmt.Errorf("pod %s/%s is not healthy: %s", namespace, pod.Name, podStatus)
}
if !utils.IsPodReady(&pod) && pod.Status.Phase == corev1.PodRunning {
klog.Warningf("Pod %s/%s is running but not ready", namespace, pod.Name)
return nil, fmt.Errorf("pod %s/%s is running but not ready", namespace, pod.Name)
}
}
}
klog.Info("health checks passed for upgrade")
return newExecutionRes(true, nil), nil
}

View File

@@ -0,0 +1,84 @@
package upgrade
import (
"context"
"errors"
"fmt"
"os"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
)
type UpgradeRequest struct {
Version string `json:"version"`
DownloadOnly bool `json:"downloadOnly"`
}
type createUpgradeTarget struct {
commands.Operation
}
var _ commands.Interface = &createUpgradeTarget{}
func NewCreateUpgradeTarget() commands.Interface {
return &createUpgradeTarget{
Operation: commands.Operation{
Name: commands.CreateUpgradeTarget,
},
}
}
func (i *createUpgradeTarget) Execute(ctx context.Context, p any) (res any, err error) {
req, ok := p.(UpgradeRequest)
if !ok {
return nil, errors.New("invalid param")
}
if err := checkVersionConflicts(req.Version); err != nil {
return nil, err
}
if err := createUpgradeTargetFile(req.Version); err != nil {
return nil, fmt.Errorf("failed to create upgrade target: %v", err)
}
if req.DownloadOnly {
if err := createUpgradeDownloadOnlyFile(); err != nil {
return nil, fmt.Errorf("failed to create upgrade downloadonly file: %v", err)
}
} else {
if err := removeUpgradeDownloadOnlyFile(); err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("failed to remove upgrade downloadonly file: %v", err)
}
}
state.StateTrigger <- struct{}{}
return NewExecutionRes(true, nil), nil
}
func checkVersionConflicts(version string) error {
if state.CurrentState.UpgradingState == state.InProgress {
return fmt.Errorf("system is currently upgrading")
}
upgradeTarget, err := state.GetOlaresUpgradeTarget()
if err == nil && upgradeTarget != nil && upgradeTarget.Original() != version {
return fmt.Errorf("different upgrade version %s already exists, please cancel it first", upgradeTarget.Original())
}
return nil
}
func createUpgradeTargetFile(version string) error {
return os.WriteFile(commands.UPGRADE_TARGET_FILE, []byte(version), 0755)
}
func createUpgradeDownloadOnlyFile() error {
return os.WriteFile(commands.UPGRADE_DOWNLOADONLY_FILE, []byte(""), 0755)
}
func removeUpgradeDownloadOnlyFile() error {
return os.Remove(commands.UPGRADE_DOWNLOADONLY_FILE)
}

View File

@@ -0,0 +1,85 @@
package upgrade
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/daemon/pkg/commands"
"k8s.io/klog/v2"
)
type downloadCLI struct {
commands.Operation
}
var _ commands.Interface = &downloadCLI{}
func NewDownloadCLI() commands.Interface {
return &downloadCLI{
Operation: commands.Operation{
Name: commands.DownloadCLI,
},
}
}
func (i *downloadCLI) Execute(ctx context.Context, p any) (res any, err error) {
version, ok := p.(string)
if !ok {
return nil, errors.New("invalid param")
}
targetVersion, err := semver.NewVersion(version)
if err != nil {
return nil, fmt.Errorf("invalid target version %s: %v", version, err)
}
currentVersion, err := getCurrentCliVersion()
if err != nil {
// if we can't get the current version, assume we need to download
klog.Warningf("Failed to get current olares-cli version: %v, proceeding with download", err)
} else {
if !currentVersion.LessThan(targetVersion) {
return newExecutionRes(true, nil), nil
}
}
arch := "amd64"
if runtime.GOARCH == "arm" {
arch = "arm64"
}
destDir := filepath.Join(commands.TERMINUS_BASE_DIR, "pkg", "components")
if err := os.MkdirAll(destDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create components directory: %v", err)
}
downloadURL := fmt.Sprintf("%s/olares-cli-v%s_linux_%s.tar.gz", commands.CDN_URL, version, arch)
tarFile := filepath.Join(destDir, fmt.Sprintf("olares-cli-v%s.tar.gz", version))
if err := downloadFile(downloadURL, tarFile); err != nil {
return nil, fmt.Errorf("failed to download olares-cli: %v", err)
}
if err := extractTarGz(tarFile, destDir); err != nil {
return nil, fmt.Errorf("failed to extract olares-cli: %v", err)
}
binaryPath := filepath.Join(destDir, "olares-cli")
versionedPath := filepath.Join(destDir, fmt.Sprintf("olares-cli-v%s", version))
if err := os.Rename(binaryPath, versionedPath); err != nil {
return nil, fmt.Errorf("failed to rename olares-cli binary: %v", err)
}
if err := os.Chmod(versionedPath, 0755); err != nil {
return nil, fmt.Errorf("failed to make olares-cli executable: %v", err)
}
os.Remove(tarFile)
return newExecutionRes(true, nil), nil
}

View File

@@ -26,10 +26,10 @@ type prepareImages struct {
var _ commands.Interface = &prepareImages{}
func NewPrepareImages() commands.Interface {
func NewImportImages() commands.Interface {
return &prepareImages{
Operation: commands.Operation{
Name: commands.PrepareImages,
Name: commands.ImportImages,
},
progressKeywords: []progressKeyword{
{"Preload Container Images execute successfully", commands.ProgressNumFinished},
@@ -119,7 +119,7 @@ func (i *prepareImages) refreshProgress() error {
if strings.Contains(line.Text, p.KeyWord) {
lineProgress = p.ProgressNum
} else {
lineProgress = parseProgressFromItemProgress(line.Text)
lineProgress = parseImagePrepareProgressByItemProgress(line.Text)
}
if i.progress < lineProgress {
i.progress = lineProgress

View File

@@ -0,0 +1,67 @@
package upgrade
import (
"context"
"errors"
"fmt"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/daemon/pkg/commands"
"k8s.io/klog/v2"
"os"
"os/exec"
"path/filepath"
)
type installCLI struct {
commands.Operation
}
var _ commands.Interface = &installCLI{}
func NewInstallCLI() commands.Interface {
return &installCLI{
Operation: commands.Operation{
Name: commands.InstallCLI,
},
}
}
func (i *installCLI) Execute(ctx context.Context, p any) (res any, err error) {
version, ok := p.(string)
if !ok {
return nil, errors.New("invalid param")
}
targetVersion, err := semver.NewVersion(version)
if err != nil {
return nil, fmt.Errorf("invalid target version %s: %v", version, err)
}
currentVersion, err := getCurrentCliVersion()
if err != nil {
klog.Warningf("Failed to get current olares-cli version: %v, proceeding with installation", err)
} else {
if !currentVersion.LessThan(targetVersion) {
return newExecutionRes(true, nil), nil
}
}
preDownloadedPath := filepath.Join(commands.TERMINUS_BASE_DIR, "pkg", "components", fmt.Sprintf("olares-cli-v%s", version))
if _, err := os.Stat(preDownloadedPath); err != nil {
klog.Warningf("Failed to find pre-downloaded binary path %s: %v", preDownloadedPath, err)
return newExecutionRes(false, nil), err
}
cmd := exec.Command("cp", "-f", preDownloadedPath, "/usr/local/bin/olares-cli")
err = cmd.Run()
if err != nil {
klog.Warningf("Failed to install olares-cli: %v", err)
return newExecutionRes(false, nil), err
}
if err := os.Chmod("/usr/local/bin/olares-cli", 0755); err != nil {
return nil, fmt.Errorf("failed to make olares-cli executable: %v", err)
}
return newExecutionRes(true, nil), nil
}

View File

@@ -10,6 +10,7 @@ import (
"strings"
"time"
semver "github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/daemon/pkg/cli"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/nxadm/tail"
@@ -26,10 +27,10 @@ type prepareOlaresd struct {
var _ commands.Interface = &prepareOlaresd{}
func NewPrepareOlaresd() commands.Interface {
func NewInstallOlaresd() commands.Interface {
return &prepareOlaresd{
Operation: commands.Operation{
Name: commands.PrepareOlaresd,
Name: commands.InstallOlaresd,
},
progressKeywords: []progressKeyword{
{"ReplaceOlaresdBinary success", 30},
@@ -52,6 +53,19 @@ func (i *prepareOlaresd) Execute(ctx context.Context, p any) (res any, err error
if !ok {
return nil, errors.New("invalid param")
}
targetVersion, err := semver.NewVersion(version)
if err != nil {
return nil, fmt.Errorf("invalid target version %s: %v", version, err)
}
currentVersion, err := getCurrentDaemonVersion()
if err != nil {
klog.Warningf("Failed to get current olaresd version: %v, proceeding with installation", err)
} else {
if !currentVersion.LessThan(targetVersion) {
return newExecutionRes(true, nil), nil
}
}
i.logFile = filepath.Join(commands.TERMINUS_BASE_DIR, "versions", "v"+version, "logs", "install.log")
if err := i.refreshProgress(); err != nil {

View File

@@ -0,0 +1,57 @@
package upgrade
import (
"context"
"os"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
)
type removeUpgradeTarget struct {
commands.Operation
}
var _ commands.Interface = &removeUpgradeTarget{}
func NewRemoveUpgradeTarget() commands.Interface {
return &removeUpgradeTarget{
Operation: commands.Operation{
Name: commands.RemoveUpgradeTarget,
},
}
}
func (i *removeUpgradeTarget) Execute(ctx context.Context, p any) (res any, err error) {
err = RemoveUpgradeFiles()
if err != nil {
return nil, err
}
state.CurrentState.UpgradingDownloadState = ""
state.CurrentState.UpgradingDownloadStep = ""
state.CurrentState.UpgradingDownloadProgress = ""
state.CurrentState.UpgradingDownloadProgressNum = 0
state.CurrentState.UpgradingDownloadError = ""
state.StateTrigger <- struct{}{}
return NewExecutionRes(true, nil), nil
}
func RemoveUpgradeFiles() error {
// attempt to remove all files whether they exist or not (idempotent)
files := []string{
commands.UPGRADE_TARGET_FILE,
commands.UPGRADE_DOWNLOADONLY_FILE,
commands.UPGRADE_DOWNLOADED_FILE,
}
for _, file := range files {
if err := os.Remove(file); err != nil && !os.IsNotExist(err) {
return err
}
}
return nil
}

View File

@@ -31,6 +31,10 @@ func newExecutionRes(finished bool, progressChan <-chan int) ExecutionRes {
}
}
func NewExecutionRes(finished bool, progressChan <-chan int) ExecutionRes {
return newExecutionRes(finished, progressChan)
}
type progressKeyword struct {
KeyWord string
ProgressNum int
@@ -41,7 +45,7 @@ var itemProcessProgressRE = regexp.MustCompile(`\((\d+)/(\d+)\)`)
func parseProgressFromItemProgress(line string) int {
matches := itemProcessProgressRE.FindAllStringSubmatch(line, 2)
if len(matches) != 3 {
if len(matches) != 1 || len(matches[0]) != 3 {
return 0
}
indexStr, totalStr := matches[0][1], matches[0][2]
@@ -53,5 +57,5 @@ func parseProgressFromItemProgress(line string) int {
if total == 0 || err != nil {
return 0
}
return int(math.Round(index / total * 90))
return int(math.Round((index / total) * 90.0))
}

View File

@@ -11,7 +11,6 @@ import (
"time"
"github.com/beclab/Olares/daemon/pkg/cli"
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/nxadm/tail"
"k8s.io/klog/v2"
@@ -155,41 +154,6 @@ func (i *upgrade) refreshProgress() error {
return nil
}
type createTarget struct {
commands.Operation
}
var _ commands.Interface = &createTarget{}
func NewCreateTarget() commands.Interface {
return &createTarget{
Operation: commands.Operation{
Name: commands.CreateUpgradeTarget,
},
}
}
func (i *createTarget) Execute(ctx context.Context, p any) (res any, err error) {
version, ok := p.(string)
if !ok {
err = errors.New("invalid param")
return
}
if err := createUpgradeTarget(version); err != nil {
return nil, fmt.Errorf("failed to create upgrade target: %v", err)
}
state.StateTrigger <- struct{}{}
return nil, nil
}
func createUpgradeTarget(version string) error {
return os.WriteFile(commands.UPGRADE_TARGET_FILE, []byte(version), 0755)
}
type removeTarget struct {
commands.Operation
}
@@ -204,10 +168,7 @@ func NewRemoveTarget() commands.Interface {
}
}
func (i *removeTarget) Execute(_ context.Context, _ any) (res any, err error) {
if err := os.Remove(commands.UPGRADE_TARGET_FILE); err != nil && !os.IsNotExist(err) {
return nil, err
}
return newExecutionRes(true, nil), nil
func (i *removeTarget) Execute(ctx context.Context, p any) (res any, err error) {
upgradeRemove := NewRemoveUpgradeTarget()
return upgradeRemove.Execute(ctx, p)
}

View File

@@ -1,135 +0,0 @@
package upgrade
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/daemon/pkg/commands"
"k8s.io/klog/v2"
)
type upgradeCli struct {
commands.Operation
}
var _ commands.Interface = &upgradeCli{}
func NewUpgradeCli() commands.Interface {
return &upgradeCli{
Operation: commands.Operation{
Name: commands.UpgradeCli,
},
}
}
func (i *upgradeCli) Execute(ctx context.Context, p any) (res any, err error) {
version, ok := p.(string)
if !ok {
return nil, errors.New("invalid param")
}
targetVersion, err := semver.NewVersion(version)
if err != nil {
return nil, fmt.Errorf("invalid target version %s: %v", version, err)
}
currentVersion, err := getCurrentCliVersion()
if err != nil {
// if we can't get the current version, assume we need to upgrade
klog.Warningf("Failed to get current olares-cli version: %v, proceeding with upgrade", err)
} else {
if !currentVersion.LessThan(targetVersion) {
return newExecutionRes(true, nil), nil
}
}
arch := "amd64"
if runtime.GOARCH == "arm" {
arch = "arm64"
}
tmpDir, err := os.MkdirTemp("", "olares-cli-upgrade-*")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(tmpDir)
downloadURL := fmt.Sprintf("%s/olares-cli-v%s_linux_%s.tar.gz", commands.CDN_URL, version, arch)
tarFile := filepath.Join(tmpDir, "olares-cli.tar.gz")
if err := downloadFile(downloadURL, tarFile); err != nil {
return nil, fmt.Errorf("failed to download olares-cli: %v", err)
}
if err := extractTarGz(tarFile, tmpDir); err != nil {
return nil, fmt.Errorf("failed to extract olares-cli: %v", err)
}
binaryPath := filepath.Join(tmpDir, "olares-cli")
if err := os.Rename(binaryPath, "/usr/local/bin/olares-cli"); err != nil {
return nil, fmt.Errorf("failed to move olares-cli to /usr/local/bin: %v", err)
}
if err := os.Chmod("/usr/local/bin/olares-cli", 0755); err != nil {
return nil, fmt.Errorf("failed to make olares-cli executable: %v", err)
}
return newExecutionRes(true, nil), nil
}
func getCurrentCliVersion() (*semver.Version, error) {
cmd := exec.Command("olares-cli", "-v")
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to execute olares-cli -v: %v", err)
}
// parse version from output
// expected format: "olares-cli version ${VERSION}"
parts := strings.Split(string(output), " ")
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected version output format: %s", string(output))
}
version, err := semver.NewVersion(parts[2])
if err != nil {
return nil, fmt.Errorf("invalid version format: %v", err)
}
return version, nil
}
func downloadFile(url, filepath string) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
return err
}
func extractTarGz(tarFile, destDir string) error {
cmd := exec.Command("tar", "-xzf", tarFile, "-C", destDir)
return cmd.Run()
}

View File

@@ -0,0 +1,99 @@
package upgrade
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"github.com/Masterminds/semver/v3"
)
func getCurrentCliVersion() (*semver.Version, error) {
cmd := exec.Command("olares-cli", "-v")
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to execute olares-cli -v: %v", err)
}
// parse version from output
// expected format: "olares-cli version ${VERSION}"
parts := strings.Split(string(output), " ")
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected version output format: %s", string(output))
}
version, err := semver.NewVersion(strings.TrimSpace(parts[2]))
if err != nil {
return nil, fmt.Errorf("invalid version format: %v", err)
}
return version, nil
}
func getCurrentDaemonVersion() (*semver.Version, error) {
cmd := exec.Command("olaresd", "--version")
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to execute olaresd --version: %v", err)
}
// parse version from output
// expected format: "olaresd version: v${VERSION}"
parts := strings.Split(string(output), " ")
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected version output format: %s", string(output))
}
version, err := semver.NewVersion(strings.TrimPrefix(strings.TrimSpace(parts[2]), "v"))
if err != nil {
return nil, fmt.Errorf("invalid version format: %v", err)
}
return version, nil
}
func downloadFile(url, filepath string) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
return err
}
func extractTarGz(tarFile, destDir string) error {
cmd := exec.Command("tar", "-xzf", tarFile, "-C", destDir)
return cmd.Run()
}
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
return err
}

View File

@@ -16,3 +16,15 @@ func GetDiskSize() (uint64, error) {
size := fs.Blocks * uint64(fs.Bsize)
return size, nil
}
func GetDiskAvailableSpace(path string) (uint64, error) {
fs := syscall.Statfs_t{}
err := syscall.Statfs(path, &fs)
if err != nil {
klog.Error("get disk available space error, ", err)
return 0, err
}
available := fs.Bavail * uint64(fs.Bsize)
return available, nil
}

View File

@@ -89,7 +89,7 @@ func detectdStorageDevices(ctx context.Context, bus string) (usbDevs []storageDe
token := strings.Split(syspath, "/")
devPath := filepath.Join("/dev", token[len(token)-1])
klog.Info("device path:", device.Properties())
klog.V(8).Info("device path:", device.Properties())
vender := device.Properties()["ID_VENDOR"]
if vender == "" {
vender = device.Properties()["ID_USB_VENDOR"]
@@ -162,7 +162,7 @@ func getMountedPath(devs []storageDevice) ([]string, error) {
var paths []string
for _, m := range list {
if slices.ContainsFunc(devs, func(u storageDevice) bool { return u.DevPath == m.Device }) {
klog.Infof("mount: %v, %v, %v", m.Path, m.Device, devs)
klog.V(8).Infof("mount: %v, %v, %v", m.Path, m.Device, devs)
paths = append(paths, m.Path)
}
}

View File

@@ -278,25 +278,53 @@ func GetAdminUserTerminusName(ctx context.Context, client dynamic.Interface) (st
}
type Filter func(u *unstructured.Unstructured) bool
func GetAdminUser(ctx context.Context, client dynamic.Interface) (*unstructured.Unstructured, error) {
u, err := ListUsers(ctx, client, func(u *unstructured.Unstructured) bool {
role, ok := u.GetAnnotations()[bflconst.UserAnnotationOwnerRole]
if !ok {
return false
}
return role == bflconst.RolePlatformAdmin
})
if err != nil {
klog.Error("list user error, ", err)
return nil, err
}
if len(u) == 0 {
klog.Info("admin user not found")
return nil, nil
}
return u[0], nil
}
func ListUsers(ctx context.Context, client dynamic.Interface, filters ...Filter) ([]*unstructured.Unstructured, error) {
users, err := client.Resource(UserGVR).List(ctx, metav1.ListOptions{})
if err != nil {
klog.Error("list user error, ", err)
return nil, err
}
var userList []*unstructured.Unstructured
for _, u := range users.Items {
role, ok := u.GetAnnotations()[bflconst.UserAnnotationOwnerRole]
if !ok {
var skip bool
for _, filter := range filters {
if !filter(&u) {
skip = true
break
}
}
if skip {
continue
}
if role == bflconst.RolePlatformAdmin {
return &u, nil
}
userList = append(userList, &u)
}
return nil, nil
return userList, nil
}
func isKeyPod(pod *corev1.Pod) bool {

View File

@@ -11,7 +11,7 @@ const CHECK_CONNECTIVITY_URL = "http://connectivity-check.ubuntu.com/"
func CheckInterfaceIPv4Connectivity(ctx context.Context, interfaceName string) bool {
// try to connect to the CHECK_CONNECTIVITY_URL using the specified interface
cmd := exec.CommandContext(ctx, "curl", "--interface", interfaceName, "--connect-timeout", "5", "-s", "-o", "/dev/null", CHECK_CONNECTIVITY_URL)
cmd := exec.CommandContext(ctx, "curl", "-4", "--interface", interfaceName, "--connect-timeout", "5", "-s", "-o", "/dev/null", CHECK_CONNECTIVITY_URL)
if err := cmd.Run(); err == nil {
return true
}

View File

@@ -81,7 +81,16 @@ func GetWifiDevice(ctx context.Context) (map[string]Device, error) {
}
func GetAllDevice(ctx context.Context) (map[string]Device, error) {
return deviceStatus(ctx, func(d *Device) bool { return true })
return deviceStatus(ctx, func(d *Device) bool {
managedByOthers := []string{"cali", "kube", "tun", "tailscale"}
for _, devPrefix := range managedByOthers {
if strings.HasPrefix(d.Name, devPrefix) {
return false
}
}
return true
})
}
func ManagedAllDevices(ctx context.Context) (map[string]Device, error) {
@@ -102,7 +111,6 @@ func ManagedAllDevices(ctx context.Context) (map[string]Device, error) {
cmd := exec.CommandContext(ctx, nmcli, "device", "set", d.Name, "managed", "yes")
cmd.Env = os.Environ()
output, err := cmd.CombinedOutput()
klog.Info(string(output))
if err != nil {
klog.Error("exec cmd error, ", err, ", nmcli device set ", d.Name, " managed yes")
return false
@@ -252,17 +260,17 @@ func showDeviceByNM(ctx context.Context, deviceName string, device *Device) erro
switch key {
case "IP4.ADDRESS[1]":
ipAndMask := strings.Split(value, "/")
if len(ipAndMask) > 2 {
if len(ipAndMask) > 1 {
device.Ipv4Address = ipAndMask[0]
cidr, err := strconv.Atoi(ipAndMask[1])
if err != nil {
klog.Error("convert cidr error, ", err)
return err
continue
}
mask, err := MaskFromCIDR(cidr)
if err != nil {
klog.Error("get mask from cidr error, ", err)
return err
continue
}
device.Ipv4Mask = mask
}
@@ -279,7 +287,7 @@ func showDeviceByNM(ctx context.Context, deviceName string, device *Device) erro
case "GENERAL.CONNECTION":
err := showConnectionByNM(ctx, value, device)
if err != nil {
klog.Error("get connection method error, ", err)
klog.V(8).Info("get connection method error, ", err, ", connection name: ", value)
}
default:
continue

View File

@@ -0,0 +1,96 @@
package utils
import (
"fmt"
corev1 "k8s.io/api/core/v1"
)
// GetPodStatus returns a kubectl-like status string for a pod
// because the pod.Status.Phase field is unreliable
func GetPodStatus(pod *corev1.Pod) string {
if pod.DeletionTimestamp != nil {
if pod.Status.Reason == "NodeLost" {
return "Unknown"
}
return "Terminating"
}
for i, container := range pod.Status.InitContainerStatuses {
if container.State.Terminated != nil && container.State.Terminated.ExitCode == 0 {
continue
}
if container.State.Terminated != nil {
if container.State.Terminated.Reason != "" {
return fmt.Sprintf("Init:%s", container.State.Terminated.Reason)
}
if container.State.Terminated.Signal != 0 {
return fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
}
return fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
return fmt.Sprintf("Init:%s", container.State.Waiting.Reason)
}
return fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
}
hasRunning := false
for _, container := range pod.Status.ContainerStatuses {
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
return container.State.Waiting.Reason
}
if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
return container.State.Terminated.Reason
}
if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
return fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
}
return fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
if container.State.Running != nil && container.Ready {
hasRunning = true
}
}
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionFalse && condition.Reason != "" {
return condition.Reason
}
}
if pod.Status.Phase == corev1.PodRunning && hasRunning {
return "Running"
}
if pod.Status.Phase != "" {
return string(pod.Status.Phase)
}
if pod.Status.Reason != "" {
return pod.Status.Reason
}
return "Unknown"
}
// IsPodReady checks if a pod is fully ready (all containers ready)
func IsPodReady(pod *corev1.Pod) bool {
if pod.Status.Phase != corev1.PodRunning {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodReady {
return condition.Status == corev1.ConditionTrue
}
}
return false
}

Some files were not shown because too many files have changed in this diff Show More