Compare commits

...

94 Commits

Author SHA1 Message Date
lovehunter9
05d14de4fe fix: files sync paste dir out bug 2025-07-15 21:16:34 +08:00
wiy
058cf31e44 system-frontend&user-service: update user-service & system-frontend new version (#1544)
* feat(user-service): update dataStore use redis

* feat(wise): remove from system-frontend
fix(settings): some bugs
fix(files): some bugs

* knowledge: remove knowledge, rss, argo

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-15 00:39:01 +08:00
hysyeah
72a5b2c6a2 app-service, bfl, cli, authelia,kubesphere: support create user from user cr (#1543)
* app-service, bfl, cli, authelia,kubesphere: support create user by cr

* fix: rm kubesphere-monitoring-federated ns
2025-07-14 23:48:53 +08:00
eball
f78890b01b otel: disable telemetry by default (#1542) 2025-07-14 23:48:18 +08:00
eball
13df294653 olaresd: refactor api server (#1541) 2025-07-14 23:47:55 +08:00
0x7fffff92
2af86e161a fix(headscale): Make the Affinity Rule Strict (#1540)
* fix(headscale): Make the Affinity Rule Strict

* fix(headscale): make ci happy

---------

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-07-14 23:47:25 +08:00
aby913
ee567c270c fix(files): external delete (#1539)
* fix(files): external delete

* login & system-frontend: update login and system-frontend new version

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-07-12 00:23:59 +08:00
hysyeah
4246bcce06 fix: simplify nat permission request (#1538) 2025-07-12 00:23:10 +08:00
eball
fb73d62bd5 bfl: change unmount-api of file-server (#1537) 2025-07-12 00:22:27 +08:00
eball
209f0d15e3 authelia: send notification in user login phase (#1536)
* authelia: send notification in user login phase

* fix: set cookie nil

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-12 00:21:48 +08:00
dkeven
78911d44cf feat(gpu): add more metrics in GPU monitor API (#1535) 2025-07-12 00:20:41 +08:00
salt
d964c33c2d feat: Chinese uses both single-character segmentation and word segmen… (#1534)
feat: Chinese uses both single-character segmentation and word segmentation. Word segmentation is used for easier sorting.

Co-authored-by: ubuntu <you@example.com>
2025-07-11 22:00:14 +08:00
salt
2b54795e10 fix: waiting... Both uppercase and lowercase letters can be searched, include special token (#1533)
fix: Both uppercase and lowercase letters can be searched, and special characters can be searched as well.'

Co-authored-by: ubuntu <you@example.com>
2025-07-11 13:20:31 +08:00
aby913
efb4be4fcf fix(files): deletion and other fixes (#1532)
* fix(files): deletion and other fixes

* feat(files & marker): update files and market new version

* feat: update market worker count

* Update bfl_deploy.yaml

---------

Co-authored-by: qq815776412 <815776412@qq.com>
Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
Co-authored-by: eball <liuy102@hotmail.com>
2025-07-11 00:35:46 +08:00
simon
89575096ba feat(knowledge): knowledge & download refactor (#1531)
* knowledge

* knowledge
2025-07-10 21:36:30 +08:00
dkeven
5edba60295 fix(cli): remove state files of olaresd when uninstalling (#1530) 2025-07-10 16:12:23 +08:00
eball
1aecc3495a ci: add a parameter of the code repository (#1529)
* ci: add a parameter of the code repository

* fix: file name bug

* refactor(cli): adjust local release command for vendor repo path

---------

Co-authored-by: dkeven <dkvvven@gmail.com>
2025-07-10 16:11:03 +08:00
salt
2d5c1fc484 feat: hybrid unigram search for title (#1528)
Co-authored-by: ubuntu <you@example.com>
2025-07-09 23:20:44 +08:00
hysyeah
81355f4a1c authelia: send login message to os.users.<olaresid> (#1527) 2025-07-09 23:20:13 +08:00
lovehunter9
2c4e9fb835 feat: seafile add support for avi, wmv, mkv, flv, rmvb (#1526) 2025-07-09 23:19:32 +08:00
dkeven
4947538e68 fix(daemon): apply filters correctly when listing users (#1525) 2025-07-09 23:18:39 +08:00
Peng Peng
21bb10b72b Revert "gpu: refactor gpu scheduler with cpp (#1475)"
This reverts commit ae3e4e6bb9.
2025-07-09 13:26:41 +08:00
wiy
8064c591f2 feat(files): files supports multiple nodes (#1524)
* feat(system-frontend): update files supports multiple nodes

* feat: add files routing gateway

* feat(media-server): surpport for multiple nodes

* feat(files): update files supports multiple nodes

---------

Co-authored-by: eball <liuy102@hotmail.com>
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
Co-authored-by: aby913 <aby913@163.com>
2025-07-08 23:11:41 +08:00
Calvin W.
1073575a1d docs: add readmes for Olares components (#1522)
* docs: add readmes for Olares components

* merge with latest upstream
2025-07-08 21:34:05 +08:00
dkeven
4cf977f6df fix(ci): specify repo when checkout code for PR (#1523) 2025-07-08 17:53:46 +08:00
hysyeah
0dda3811c7 bfl, authelia, lldap: change access-token expiry duration, support refresh and revoke user token (#1521)
bfl, authelia, lldap: change access-token expiry duration and support refresh;revoke user token after reset password
2025-07-08 00:03:59 +08:00
hysyeah
2632b45fc2 bfl, app-service, system-frontend/dashboard: remove analytics (#1520)
* bfl, app-service: remove analytics

* fix(system-frontend): remove dashboard analytics

* fix(system-frontend): update system-frontend version

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-07-08 00:03:11 +08:00
berg
ae3f3d6a20 market: v1.12 new category and fix some bugs. (#1518)
feat: v1.12 new category and fix some bugs.
2025-07-05 00:55:37 +08:00
eball
4f3b824f48 authelia: update oidc cert (#1516) 2025-07-05 00:54:44 +08:00
hysyeah
9efa6df969 tapr: add default perm for nats subject (#1515)
fix: add default perm for nats subject
2025-07-05 00:54:01 +08:00
dkeven
045dfc11bc perf(ci): ignore more archs when releasing cli (#1514)
* perf(ci): ignore more archs when releasing cli

* Update auth_backend_deploy.yaml

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-04 18:45:36 +08:00
hysyeah
9913d29f81 studio-server: move studio server to os-framework (#1513) 2025-07-04 00:42:39 +08:00
berg
0ccf091aff market, settings: fix the problem of theme settings & settings apps status & market terminusInfo error (#1512)
feat: update market frontend and backend version
2025-07-04 00:41:54 +08:00
dkeven
01f3b27b8c feat(upgrade): update sysconf for specific versions (#1511) 2025-07-04 00:41:12 +08:00
dkeven
475faafec4 fix(cli): clear upgrade-related state files when uninstalling (#1510) 2025-07-03 21:01:07 +08:00
berg
31ab286a4b market, profile: fix display error in avatar selector's image list and clear market data when terminusId changed (#1509)
feat: update market frontend and backend version
2025-07-03 00:51:40 +08:00
eball
c9b4a40a1c olares: refactor installation manifest (#1508)
* olares: refactor installation manifest

* fix: file name typo

* fix: add http accept header

* fix: bug

* fix: bug

* fix: import json
2025-07-03 00:50:09 +08:00
simon
da19d00d08 fix(download): fix download task operation & reduce youtube API requests (#1507)
download
2025-07-02 21:49:49 +08:00
dkeven
49d233a55b fix(cli): also update local reserved ports when modifying sysconf (#1506) 2025-07-02 21:49:23 +08:00
dkeven
300aaa0753 fix(daemon): handle empty pid files when check process running (#1505) 2025-07-02 21:48:56 +08:00
berg
962b220440 market: add local chart upload socket event & update menu and add search function (#1504)
* fix: omit to gen entrance url before active

* feat: update market frontend and backend version

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-01 23:44:31 +08:00
salt
4da25bca36 fix: when need physical path, miss use frontend_resource_uri (#1500)
* fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

* fix: search3 fix when need physical path miss use frontend_resource_ui

* fix: use wrong image

---------

Co-authored-by: ubuntu <you@example.com>
2025-07-01 23:32:34 +08:00
dkeven
42eff16695 feat(cli): config endpoint_pod_names in coredns when installing (#1503) 2025-07-01 20:35:42 +08:00
dkeven
450aa19dfc fix(cli): also reserve local ports for l4-proxied service (#1502) 2025-07-01 20:35:20 +08:00
eball
c750f6f85b infisical: create user error (#1501) 2025-07-01 20:33:18 +08:00
berg
bf57da0fa4 market: waiting for the app-service to start & displays the failed status of the installation button. (#1499)
feat: update market version
2025-06-30 23:57:57 +08:00
0x7fffff92
5df379f286 feat(headscale): let headscale run on the master node like l4-bfl-proxy (#1498)
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-30 21:02:26 +08:00
dkeven
cfb54fb974 feat(cli): auto enable GPU when adding new node (#1497) 2025-06-30 21:02:00 +08:00
eball
9515c05bb6 bfl: do not change owner when restart (#1496) 2025-06-30 21:01:25 +08:00
dkeven
bdcd924e50 chore(cli): remove unused DeleteCache arg and module (#1495) 2025-06-30 21:01:10 +08:00
eball
e9eb218348 olaresd: refresh user expiring certs (#1493)
* feat: refresh user expiring certs

* fix: admin user not found
2025-06-30 21:00:32 +08:00
eball
9746e2c110 infisical: crash when user not found (#1492) 2025-06-30 21:00:14 +08:00
berg
27d9715292 market: multi user multi source (#1490)
* multi user & multi source & pre-render and collect image download progress & custom render variants

* support GlobalEnvs

* feat: release system-frontend: v1.3.88

* feat: app-service, studio-server

* feat: update market backend version

---------

Co-authored-by: Sai <kldtks@live.com>
Co-authored-by: hys <hysyeah@gmail.com>
2025-06-28 16:46:44 +08:00
salt
10d6c2a6fa fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search w… (#1491)
fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

Co-authored-by: ubuntu <you@example.com>
2025-06-28 16:46:10 +08:00
eball
57d8a55d8d authelia: add user list api (#1489) 2025-06-27 22:07:27 +08:00
dkeven
b9a227acd7 fix(manifest): update the missed reverse proxy image version (#1488) 2025-06-27 11:27:07 +08:00
wiy
e6115794ce feat(system-frontend): update system-frontend new version to v1.3.86 (#1487) 2025-06-27 11:24:02 +08:00
dkeven
22739c90db fix(manifest): add missing app author label to argo deploy (#1486) 2025-06-27 11:23:29 +08:00
dkeven
6fac46130a perf(gpu): use our fork of dcgm-exporter with lower memory consumption (#1485) 2025-06-27 11:23:07 +08:00
simon
e19e049e7d feat(knowledge): add youtube feed and optimize the file name for aria2 download (#1481)
knowledge v0.12.12
2025-06-26 15:53:40 +08:00
wiy
1d0c20d6ad fix(system-frontend): copy nginx address error (#1484) 2025-06-26 15:16:18 +08:00
dkeven
397590d402 fix(cli): set health host of felix to lo addr explicitly (#1483) 2025-06-26 15:15:53 +08:00
hysyeah
fc1a59b79b ks,cli: remove host_ip label from some metric (#1482)
ks,cli: remove host_ip label from metric
2025-06-26 00:05:10 +08:00
eball
3dea149790 olaresd: network interface api modifed and nvstream mdns bug fix (#1480) 2025-06-26 00:04:10 +08:00
0x7fffff92
9d6834faa1 feat(tailscale): let tailscale run on the node where headscale is run… (#1479)
feat(tailscale): let tailscale run on the node where headscale is running

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-26 00:03:51 +08:00
dkeven
bef61309a3 feat(cli): set explicit image gc policy when installing K8s (#1478) 2025-06-26 00:03:04 +08:00
salt
cf52a59ef7 feat: search3 support multiple node for cache and external, run as daemonset (#1477)
* feat: search3 support multiple node for cache and external, and search3monitor run in daemon set

* fix: fix search3 iniialization fail because of not exist table __diesel_schema_migrations

---------

Co-authored-by: ubuntu <you@example.com>
2025-06-26 00:02:36 +08:00
wiy
80023be159 feat(system-frontend): merge system apps main (#1476)
* feat(system-frontend): merge apps into one image

* fix(system-frontend): update image version to v1.3.85

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-06-26 00:02:03 +08:00
eball
ae3e4e6bb9 gpu: refactor gpu scheduler with cpp (#1475) 2025-06-24 23:29:13 +08:00
dkeven
8c9e4d532b fix(daemon): upgrade runc dependency to fix vulnerability (#1473) 2025-06-24 21:33:43 +08:00
eball
3c48afb5b5 olares: move gpu package (#1474)
* olares: move gpu package

* fix: hami webui image
2025-06-24 21:32:37 +08:00
dkeven
3d22a01eef fix(cli): do not wait for recreation of pods without owner when changing ip (#1472) 2025-06-23 23:26:41 +08:00
eball
d6263bacca authelia: remove httponly option from set-cookie (#1471) 2025-06-23 23:25:55 +08:00
hysyeah
3b070ea095 node-exporter: add pcie_version,sata_version label for disk metric (#1470)
node-exporter: add pcie_version,sata_version label for node_disk_smartctl_info metric
2025-06-23 23:25:19 +08:00
dkeven
82b715635b feat: build and use hami-webui images using our own repo (#1469) 2025-06-23 23:24:38 +08:00
Peng Peng
1d4494c8d7 feat(user-service, notification, analytics): put prisma library under node_moudles in dockers (#1468)
feat: add prisma dependency to the docker
2025-06-23 11:22:31 +08:00
simon
56f5c07229 feat(knowledge): add ebook , pdf download and article extractor (#1467)
knowledge v0.12.11
2025-06-21 02:08:19 +08:00
berg
697ac440c7 wise, studio, desktop, dashboard: update system frontend version to v1.3.82 (#1466)
feat: update system frontend version to v1.3.82
2025-06-21 02:07:58 +08:00
eball
f0edbc08a6 gpu: bump libvgpu.so version (#1465) 2025-06-20 20:31:41 +08:00
eball
001607e840 authelia: add SameSite option to set-cookie (#1464) 2025-06-20 20:31:23 +08:00
dkeven
e8f525daca refactor(daemon): new scheme for upgrade APIs and operations (#1463) 2025-06-20 20:30:46 +08:00
salt
6d6f7705c9 feat: return search3 result with standard resource_urri (#1462)
* fix: fix search3 escape error

* feat: for search return resource_uri with standard mode

---------

Co-authored-by: Ubuntu <ubuntu@localhost.localdomain>
2025-06-20 11:18:01 +08:00
wiy
46b7fa0079 feat(system-frontend): update desktop files search; update dashboard chart components; (#1461) 2025-06-20 00:27:06 +08:00
hysyeah
793a62396b lldap,system-server: pub event async; chanage secret ns (#1460)
lldap,system-server: pub event async
2025-06-20 00:26:44 +08:00
eball
7cb4975f5b authelia: replace http session with lldap jwt (#1459)
* authelia: replace http session with lldap jwt

* fix: remove check auth

* fix: set default configuration

* fix: revert pg and nats configuration
2025-06-20 00:26:12 +08:00
eball
bfaf647ad1 tapr, cli:add extension vchord to pg and decrease k3s image fs threshold (#1458)
* tapr, cli:add extension vchord to pg and decrease k3s image fs threshold

* fix: image tag
2025-06-19 23:18:56 +08:00
hysyeah
23d3dc58ed lldap,tapr: add totp api (#1456) 2025-06-19 00:20:18 +08:00
yyh
7bf07f36b7 feat(system-frontend): update dashboard, control hub, and settings image (#1455)
* feat(system-frontend): update dashboard, control hub, and settings images to v1.3.80

* feat(ks_server): add environment variables for NODE_IP and TERMINUSD_HOST
2025-06-19 00:19:17 +08:00
eball
7e7117fc3a cli, daemon: persist the user name to the Olares release file (#1454) 2025-06-19 00:18:38 +08:00
hysyeah
ff159c7a29 tapr: change nats subjet name (#1452) 2025-06-17 23:38:39 +08:00
yyh
92b84ab70b feat(system-frontend/ks_server): update apps image and monitoring server versions (#1451)
* feat: update apps image  and monitoring server versions

* fix(system-frontend): update files-frontend image version to v1.3.79
2025-06-17 23:38:03 +08:00
dkeven
561d4ba93c refactor(cli): unify local release with daily build (#1450) 2025-06-17 23:37:29 +08:00
aby913
2089e42c32 files: fix files, gateway image (#1449)
files: fix files, appdata-gateway image
2025-06-17 23:37:02 +08:00
eball
b50139af5d authelia: wrong lldap service namespace configuration (#1448)
* authelia: wrong lldap service namespace configuration

* fix: change lldap secret namespace

* fix: nats namespace

* bfl: fix lldap namespace bug

* fix: app-service lldap secret

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-06-17 23:36:37 +08:00
293 changed files with 4256 additions and 6698 deletions

View File

@@ -65,6 +65,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
upload-daemon:
needs: test-version
@@ -73,6 +74,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
push-image:
runs-on: ubuntu-latest
@@ -132,6 +134,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -156,6 +159,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,27 +11,13 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -42,28 +28,12 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,22 +11,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
@@ -42,23 +26,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
runs-on: ubuntu-22.04
@@ -18,6 +28,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -51,6 +62,5 @@ jobs:
AWS_DEFAULT_REGION: "us-east-1"
run: |
cd cli/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
# coscmd upload $file /$file
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
@@ -19,6 +29,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -54,5 +65,5 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
cd daemon/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -77,6 +77,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -94,6 +95,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64
@@ -121,8 +123,8 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz > install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
echo "md5sum=$(awk '{print $1}' install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt)" >> "$GITHUB_OUTPUT"

View File

@@ -80,8 +80,8 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ github.event.inputs.tags }}.tar.gz > install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
release:
runs-on: ubuntu-latest
@@ -101,7 +101,7 @@ jobs:
- name: Get checksum
id: vars
run: |
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
- name: Update checksum
uses: eball/write-tag-to-version-file@latest

View File

@@ -108,20 +108,15 @@ Olares has been tested and verified on the following Linux platforms:
To get started with Olares on your own device, follow the [Getting Started Guide](https://docs.olares.com/manual/get-started/) for step-by-step instructions.
## Project navigation
> [!NOTE]
> We are currently consolidating Olares subproject code into this repository. This process may take a few months. Once finished, you will get a comprehensive view of the entire Olares system here.
This section lists the main directories in the Olares repository:
* **`apps`**: Contains the code for system applications, primarily for `larepass`.
* **`cli`**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **`daemon`**: Contains the code for `olaresd`, the system daemon process.
* **[`apps`](./apps)**: Contains the code for system applications, primarily for `larepass`.
* **[`cli`](./cli)**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **[`daemon`](./daemon)**: Contains the code for `olaresd`, the system daemon process.
* **`docs`**: Contains documentation for the project.
* **`framework`**: Contains the Olares system services.
* **`infrastructure`**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **`platform`**: Contains code for cloud-native components like databases and message queues.
* **[`framework`](./framework)**: Contains the Olares system services.
* **[`infrastructure`](./infrastructure)**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **[`platform`](./platform)**: Contains code for cloud-native components like databases and message queues.
* **`vendor`**: Contains code from third-party hardware vendors.
## Contributing to Olares

View File

@@ -110,19 +110,15 @@ Olares 已在以下 Linux 平台完成测试与验证:
参考[快速上手指南](https://docs.olares.cn/zh/manual/get-started/)安装并激活 Olares。
## 项目目录
> [!NOTE]
> 我们正将 Olares 子项目的代码移动到当前仓库。此过程可能会持续数月。届时您就可以通过本仓库了解 Olares 系统的全貌。
Olares 代码库中的主要目录如下:
* **`apps`**: 用于存放系统应用,主要是 `larepass` 的代码。
* **`cli`**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **`daemon`**: 用于存放 `olaresd`(系统守护进程)的代码。
* **[`apps`](./apps)**: 用于存放系统应用,主要是 `larepass` 的代码。
* **[`cli`](./cli)**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **[`daemon`](./daemon)**: 用于存放 `olaresd`(系统守护进程)的代码。
* **`docs`**: 用于存放 Olares 项目的文档。
* **`framework`**: 用来存放 Olares 系统服务代码。
* **`infrastructure`**: 用于存放计算存储网络GPU 等基础设施的代码。
* **`platform`**: 用于存放数据库、消息队列等云原生组件的代码。
* **[`framework`](./framework)**: 用来存放 Olares 系统服务代码。
* **[`infrastructure`](./infrastructure)**: 用于存放计算存储网络GPU 等基础设施的代码。
* **[`platform`](./platform)**: 用于存放数据库、消息队列等云原生组件的代码。
* **`vendor`**: 用于存放来自第三方硬件供应商的代码。
## 社区贡献

View File

@@ -110,18 +110,15 @@ Olaresは以下のLinuxプラットフォームで動作検証を完了してい
## プロジェクトナビゲーション
> [!NOTE]
> 現在、Olaresのサブプロジェクトのコードを当リポジトリへ移行する作業を進めています。この作業が完了するまでには数ヶ月を要する見込みです。完了後には、当リポジトリを通じてOlaresシステムの全貌をご覧いただけるようになります。
このセクションでは、Olares リポジトリ内の主要なディレクトリをリストアップしています:
* **`apps`**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **`cli`**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **`daemon`**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **[`apps`](./apps)**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **[`cli`](./cli)**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **[`daemon`](./daemon)**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **`docs`**: プロジェクトのドキュメントが含まれています。
* **`framework`**: Olares システムサービスが含まれています。
* **`infrastructure`**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **`platform`**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **[`framework`](./framework)**: Olares システムサービスが含まれています。
* **[`infrastructure`](./infrastructure)**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **[`platform`](./platform)**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **`vendor`**: サードパーティのハードウェアベンダーからのコードが含まれています。
## Olaresへの貢献

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,26 +0,0 @@
apiVersion: v2
name: appstore
description: A Helm chart for Kubernetes
maintainers:
- name: bytetrade
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "appstore.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "appstore.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "appstore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "appstore.labels" -}}
helm.sh/chart: {{ include "appstore.chart" . }}
{{ include "appstore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "appstore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "appstore.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "appstore.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "appstore.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,353 +0,0 @@
{{- $market_secret := (lookup "v1" "Secret" .Release.Namespace "market-secrets") -}}
{{- $redis_password := "" -}}
{{ if $market_secret -}}
{{ $redis_password = (index $market_secret "data" "redis-passwords") }}
{{ else -}}
{{ $redis_password = randAlphaNum 16 | b64enc }}
{{- end -}}
{{- $market_backend_nats_secret := (lookup "v1" "Secret" .Release.Namespace "market-backend-nats-secret") -}}
{{- $nats_password := "" -}}
{{ if $market_backend_nats_secret -}}
{{ $nats_password = (index $market_backend_nats_secret "data" "nats_password") }}
{{ else -}}
{{ $nats_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: market-backend-nats-secret
namespace: {{ .Release.Namespace }}
type: Opaque
data:
nats_password: {{ $nats_password }}
---
apiVersion: v1
kind: Secret
metadata:
name: market-secrets
namespace: {{ .Release.Namespace }}
type: Opaque
data:
redis-passwords: {{ $redis_password }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: market-deployment
namespace: {{ .Release.Namespace }}
labels:
app: appstore
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
selector:
matchLabels:
app: appstore
template:
metadata:
labels:
app: appstore
io.bytetrade.app: "true"
annotations:
instrumentation.opentelemetry.io/inject-go: "olares-instrumentation"
instrumentation.opentelemetry.io/go-container-names: "appstore-backend"
instrumentation.opentelemetry.io/otel-go-auto-target-exe: "/opt/app/market"
spec:
priorityClassName: "system-cluster-critical"
initContainers:
- args:
- -it
- authelia-backend.os-framework:9091
image: owncloudci/wait-for:latest
imagePullPolicy: IfNotPresent
name: check-auth
- name: terminus-sidecar-init
image: openservicemesh/init:v1.2.3
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
runAsNonRoot: false
runAsUser: 0
command:
- /bin/sh
- -c
- |
iptables-restore --noflush <<EOF
# sidecar interception rules
*nat
:PROXY_IN_REDIRECT - [0:0]
:PROXY_INBOUND - [0:0]
-A PROXY_IN_REDIRECT -p tcp -j REDIRECT --to-port 15003
-A PROXY_INBOUND -p tcp --dport 15000 -j RETURN
-A PROXY_INBOUND -p tcp -j PROXY_IN_REDIRECT
-A PREROUTING -p tcp -j PROXY_INBOUND
COMMIT
EOF
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
containers:
- name: appstore-backend
image: beclab/market-backend:v0.3.12
imagePullPolicy: IfNotPresent
ports:
- containerPort: 81
env:
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: OS_APP_SECRET
value: '{{ .Values.os.appstore.appSecret }}'
- name: OS_APP_KEY
value: {{ .Values.os.appstore.appKey }}
- name: APP_SOTRE_SERVICE_SERVICE_HOST
value: appstore-server-prod.bttcdn.com
- name: MARKET_PROVIDER
value: '{{ .Values.os.appstore.marketProvider }}'
- name: APP_SOTRE_SERVICE_SERVICE_PORT
value: '443'
- name: APP_SERVICE_SERVICE_HOST
value: app-service.os-framework
- name: APP_SERVICE_SERVICE_PORT
value: '6755'
- name: REPO_URL_PORT
value: "82"
- name: REDIS_ADDRESS
value: 'redis-cluster-proxy.user-system-{{ .Values.bfl.username }}:6379'
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: market-secrets
key: redis-passwords
- name: REDIS_DB_NUMBER
value: '0'
- name: REPO_URL_HOST
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NATS_HOST
value: nats.user-system-{{ .Values.bfl.username }}
- name: NATS_PORT
value: '4222'
- name: NATS_USERNAME
value: market-backend-{{ .Values.bfl.username}}
- name: NATS_PASSWORD
valueFrom:
secretKeyRef:
name: market-backend-nats-secret
key: nats_password
- name: NATS_SUBJECT_USER_APPLICATION
value: terminus.user.application.{{ .Values.bfl.username}}
volumeMounts:
- name: opt-data
mountPath: /opt/app/data
- name: terminus-envoy-sidecar
image: bytetrade/envoy:v1.25.11
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
runAsUser: 1000
ports:
- name: proxy-admin
containerPort: 15000
- name: proxy-inbound
containerPort: 15003
volumeMounts:
- name: terminus-sidecar-config
readOnly: true
mountPath: /etc/envoy/envoy.yaml
subPath: envoy.yaml
command:
- /usr/local/bin/envoy
- --log-level
- debug
- -c
- /etc/envoy/envoy.yaml
env:
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: terminus-ws-sidecar
image: 'beclab/ws-gateway:v1.0.5'
command:
- /ws-gateway
env:
- name: WS_PORT
value: '81'
- name: WS_URL
value: /app-store/v1/websocket/message
resources: { }
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
volumes:
- name: terminus-sidecar-config
configMap:
name: sidecar-ws-configs
items:
- key: envoy.yaml
path: envoy.yaml
- name: opt-data
hostPath:
path: '{{ .Values.userspace.appData}}/appstore/data'
type: DirectoryOrCreate
- name: app
emptyDir: {}
- name: nginx-confd
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: appstore-service
namespace: {{ .Release.Namespace }}
spec:
selector:
app: appstore
type: ClusterIP
ports:
- protocol: TCP
name: appstore-backend
port: 81
targetPort: 81
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ApplicationPermission
metadata:
name: appstore
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: appstore
appid: appstore
key: {{ .Values.os.appstore.appKey }}
secret: {{ .Values.os.appstore.appSecret }}
permissions:
- dataType: event
group: message-disptahcer.system-server
ops:
- Create
version: v1
- dataType: app
group: service.bfl
ops:
- UserApps
version: v1
status:
state: active
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ProviderRegistry
metadata:
name: appstore-backend-provider
namespace: user-system-{{ .Values.bfl.username }}
spec:
dataType: app
deployment: market
description: app store provider
endpoint: appstore-service.{{ .Release.Namespace }}:81
group: service.appstore
kind: provider
namespace: {{ .Release.Namespace }}
opApis:
- name: InstallDevApp
uri: /app-store/v1/applications/provider/installdev
- name: UninstallDevApp
uri: /app-store/v1/applications/provider/uninstalldev
version: v1
status:
state: active
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-redis
namespace: {{ .Release.Namespace }}
spec:
app: market
appNamespace: {{ .Release.Namespace }}
middleware: redis
redis:
password:
valueFrom:
secretKeyRef:
key: redis-passwords
name: market-secrets
namespace: market
---
apiVersion: v1
kind: Service
metadata:
name: appstore-svc
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
app: appstore
ports:
- name: "appstore-backend"
protocol: TCP
port: 81
targetPort: 81
- name: "appstore-websocket"
protocol: TCP
port: 40010
targetPort: 40010
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-backend-nats
namespace: {{ .Release.Namespace }}
spec:
app: market-backend
appNamespace: user
middleware: nats
nats:
password:
valueFrom:
secretKeyRef:
key: nats_password
name: market-backend-nats-secret
refs:
- appName: user-service
appNamespace: user
subjects:
- name: "application.*"
perm:
- pub
- sub
- appName: user-service
appNamespace: user
subjects:
- name: "market.*"
perm:
- pub
- sub
user: market-backend-{{ .Values.bfl.username}}

View File

@@ -1,44 +0,0 @@
bfl:
nodeport: 30883
nodeport_ingress_http: 30083
nodeport_ingress_https: 30082
username: 'test'
url: 'test'
nodeName: test
pvc:
userspace: test
userspace:
userData: test/Home
appData: test/Data
appCache: test
dbdata: test
docs:
nodeport: 30881
desktop:
nodeport: 30180
os:
portfolio:
appKey: '${ks[0]}'
appSecret: test
vault:
appKey: '${ks[0]}'
appSecret: test
desktop:
appKey: '${ks[0]}'
appSecret: test
message:
appKey: '${ks[0]}'
appSecret: test
rss:
appKey: '${ks[0]}'
appSecret: test
search:
appKey: '${ks[0]}'
appSecret: test
search2:
appKey: '${ks[0]}'
appSecret: test
appstore:
marketProvider: ''
kubesphere:
redis_password: ""

View File

@@ -1,294 +1,13 @@
{{- $namespace := printf "%s%s" "user-system-" .Values.bfl.username -}}
{{- $studio_secret := (lookup "v1" "Secret" $namespace "studio-secrets") -}}
{{- $pg_password := "" -}}
{{ if $studio_secret -}}
{{ $pg_password = (index $studio_secret "data" "pg_password") }}
{{ else -}}
{{ $pg_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: studio-secrets
namespace: user-system-{{ .Values.bfl.username }}
type: Opaque
data:
pg_password: {{ $pg_password }}
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: studio-pg
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: studio
appNamespace: {{ .Release.Namespace }}
middleware: postgres
postgreSQL:
user: studio_{{ .Values.bfl.username }}
password:
valueFrom:
secretKeyRef:
key: pg_password
name: studio-secrets
databases:
- name: studio
---
apiVersion: v1
kind: Service
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
namespace: user-space-{{ .Values.bfl.username }}
spec:
selector:
app: studio-server
type: ExternalName
externalName: studio-server.os-framework.svc.cluster.local
ports:
- protocol: TCP
name: studio-server
port: 8080
targetPort: 8088
name: http
- protocol: TCP
port: 8083
targetPort: 8083
name: https
---
kind: Service
apiVersion: v1
metadata:
name: chartmuseum-studio
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8888
selector:
app: studio-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: studio-san-cnf
namespace: {{ .Release.Namespace }}
data:
san.cnf: |
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
[req_distinguished_name]
countryName = CN
stateOrProvinceName = Beijing
localityName = Beijing
0.organizationName = bytetrade
commonName = studio-server.{{ .Release.Namespace }}.svc
[v3_req]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @bytetrade
[bytetrade]
DNS.1 = studio-server.{{ .Release.Namespace }}.svc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
labels:
app: studio-server
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: studio-server
template:
metadata:
labels:
app: studio-server
spec:
serviceAccountName: bytetrade-controller
volumes:
- name: chart
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData}}/studio/Chart'
- name: data
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData }}/studio/Data'
- name: storage-volume
hostPath:
path: '{{ .Values.userspace.appData }}/studio/helm-repo-dev'
type: DirectoryOrCreate
- name: config-san
configMap:
name: studio-san-cnf
items:
- key: san.cnf
path: san.cnf
- name: certs
emptyDir: {}
initContainers:
- name: init-chmod-data
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- sh
- '-c'
- |
chown -R 1000:1000 /home/coder
chown -R 65532:65532 /charts
chown -R 65532:65532 /data
securityContext:
runAsUser: 0
resources: { }
volumeMounts:
- name: storage-volume
mountPath: /home/coder
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- name: generate-certs
image: beclab/openssl:v3
imagePullPolicy: IfNotPresent
command: [ "/bin/sh", "-c" ]
args:
- |
openssl genrsa -out /etc/certs/ca.key 2048
openssl req -new -x509 -days 3650 -key /etc/certs/ca.key -out /etc/certs/ca.crt \
-subj "/CN=bytetrade CA/O=bytetrade/C=CN"
openssl req -new -newkey rsa:2048 -nodes \
-keyout /etc/certs/server.key -out /etc/certs/server.csr \
-config /etc/san/san.cnf
openssl x509 -req -days 3650 -in /etc/certs/server.csr \
-CA /etc/certs/ca.crt -CAkey /etc/certs/ca.key \
-CAcreateserial -out /etc/certs/server.crt \
-extensions v3_req -extfile /etc/san/san.cnf
chown -R 65532 /etc/certs/*
volumeMounts:
- name: config-san
mountPath: /etc/san
- name: certs
mountPath: /etc/certs
containers:
- name: studio
image: beclab/studio-server:v0.1.51
imagePullPolicy: IfNotPresent
args:
- server
ports:
- name: port
containerPort: 8088
protocol: TCP
- name: ssl-port
containerPort: 8083
protocol: TCP
volumeMounts:
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- mountPath: /etc/certs
name: certs
lifecycle:
preStop:
exec:
command:
- "/studio"
- "clean"
env:
- name: BASE_DIR
value: /charts
- name: OS_API_KEY
value: {{ .Values.os.studio.appKey }}
- name: OS_API_SECRET
value: {{ .Values.os.studio.appSecret }}
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: NAME_SPACE
value: {{ .Release.Namespace }}
- name: OWNER
value: '{{ .Values.bfl.username }}'
- name: DB_HOST
value: citus-master-svc.user-system-{{ .Values.bfl.username }}
- name: DB_USERNAME
value: studio_{{ .Values.bfl.username }}
- name: DB_PASSWORD
value: "{{ $pg_password | b64dec }}"
- name: DB_NAME
value: user_space_{{ .Values.bfl.username }}_studio
- name: DB_PORT
value: "5432"
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: "0.5"
memory: 1000Mi
- name: chartmuseum
image: aboveos/helm-chartmuseum:v0.15.0
args:
- '--port=8888'
- '--storage-local-rootdir=/storage'
ports:
- name: http
containerPort: 8888
protocol: TCP
env:
- name: CHART_POST_FORM_FIELD_NAME
value: chart
- name: DISABLE_API
value: 'false'
- name: LOG_JSON
value: 'true'
- name: PROV_POST_FORM_FIELD_NAME
value: prov
- name: STORAGE
value: local
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: 1000m
memory: 512Mi
volumeMounts:
- name: storage-volume
mountPath: /storage
livenessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
targetPort: 8080

View File

@@ -0,0 +1,20 @@
# Olares Apps
## Overview
This directory contains the code for system applications, primarily for LarePass. The following are the pre-installed system applications that offer tools for managing files, knowledge, passwords, and the system itself.
## System Applications Overview
| Application | Description |
| --- | --- |
| Files | A file management app that manages and synchronizes files across devices and sources, enabling seamless sharing and access. |
| Wise | A local-first and AI-native modern reader that helps to collect, read, and manage information from various platforms. Users can run self-hosted recommendation algorithms to filter and sort online content. |
| Vault | A secure password manager for storing and managing sensitive information across devices. |
| Market | A decentralized and permissionless app store for installing, uninstalling, and updating applications and recommendation algorithms. |
| Desktop | A hub for managing and interacting with installed applications. File and application searching are also supported. |
| Profile | An app to customize the user's profile page. |
| Settings | A system configuration application. |
| Dashboard | An app for monitoring system resource usage. |
| Control Hub | The console for Olares, providing precise and autonomous control over the system and its environment. |
| DevBox | A development tool for building and deploying Olares applications. |

View File

@@ -6,7 +6,7 @@ metadata:
annotations:
iam.kubesphere.io/uninitialized: "true"
helm.sh/resource-policy: keep
bytetrade.io/owner-role: platform-admin
bytetrade.io/owner-role: owner
bytetrade.io/terminus-name: "{{.Values.user.terminus_name}}"
bytetrade.io/launcher-auth-policy: two_factor
bytetrade.io/launcher-access-level: "1"
@@ -23,4 +23,4 @@ spec:
groups:
- lldap_admin
status:
state: Active
state: Created

View File

@@ -5,7 +5,7 @@ metadata:
spec:
lldap:
name: ldap
url: "http://lldap-service.os-framework:17170"
url: "http://lldap-service.os-platform:17170"
userBlacklist:
- admin
- terminus
@@ -15,4 +15,4 @@ spec:
credentialsSecret:
kind: Secret
name: lldap-credentials
namespace: os-framework
namespace: os-platform

View File

@@ -24,6 +24,7 @@ cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
pushd ${BASE_DIR}.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd

162
build/build-manifest.py Normal file
View File

@@ -0,0 +1,162 @@
#!/usr/bin/env python3
import argparse
import hashlib
import os
import requests
import sys
import json
CDN_URL = "https://dc3p1870nn3cj.cloudfront.net"
def download_checksum(name):
"""Downloads the checksum for a given name."""
url = f"{CDN_URL}/{name}.checksum.txt"
try:
response = requests.get(url)
response.raise_for_status()
return response.text.split()[0]
except requests.exceptions.RequestException as e:
print(f"Error getting checksum for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def get_image_manifest(name):
"""Downloads the image manifest for a given name."""
url = f"{CDN_URL}/{name}.manifest.json"
try:
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error getting manifest for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def main():
"""Main function."""
parser = argparse.ArgumentParser()
parser.add_argument("manifest_file", help="The manifest file to write to.")
args = parser.parse_args()
manifest_file = args.manifest_file
version = os.environ.get("VERSION", "")
repo_path = os.environ.get("REPO_PATH", "/")
manifest_amd64_data = {}
manifest_arm64_data = {}
# Process components
try:
with open("components", "r") as f:
for line in f:
line = line.strip()
if not line:
continue
# Replace version
if version:
line = line.replace("#__VERSION__", version)
# Replace repo path
if repo_path:
line = line.replace("#__REPO_PATH__", repo_path)
fields = line.split(",")
if len(fields) < 5:
print(f"Format error in components file: {line}", file=sys.stderr)
sys.exit(1)
filename, path, deps, _, fileid = fields[:5]
print(f"Downloading file checksum for {filename}")
name = hashlib.md5(filename.encode()).hexdigest()
url_amd64 = name
url_arm64 = f"arm64/{name}"
checksum_amd64 = download_checksum(url_amd64)
checksum_arm64 = download_checksum(url_arm64)
manifest_amd64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": fileid
}
manifest_arm64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": fileid
}
except FileNotFoundError:
print("Error: 'components' file not found.", file=sys.stderr)
sys.exit(1)
# Process images
path = "images"
for deps_file in ["images.mf"]:
try:
with open(deps_file, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
print(f"Downloading file checksum for {line}")
name = hashlib.md5(line.encode()).hexdigest()
url_amd64 = f"{name}.tar.gz"
url_arm64 = f"arm64/{name}.tar.gz"
checksum_amd64 = download_checksum(name)
checksum_arm64 = download_checksum(f"arm64/{name}")
# Get the image manifest
image_manifest_amd64 = get_image_manifest(name)
image_manifest_arm64 = get_image_manifest(f"arm64/{name}")
filename = f"{name}.tar.gz"
manifest_amd64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": line,
"manifest": image_manifest_amd64
}
manifest_arm64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": line,
"manifest": image_manifest_arm64
}
except FileNotFoundError:
print(f"Warning: '{deps_file}' not found, skipping.", file=sys.stderr)
sys.exit(1)
# Write the manifest file
amd64_manifest_file = f"{manifest_file}.amd64"
with open(amd64_manifest_file, "w") as mf:
json.dump(manifest_amd64_data, mf, indent=2)
arm64_manifest_file = f"{manifest_file}.arm64"
with open(arm64_manifest_file, "w") as mf:
json.dump(manifest_arm64_data, mf, indent=2)
# TODO: compress the manifest files
if __name__ == "__main__":
main()

View File

@@ -46,6 +46,9 @@ while read line; do
done < components
sed -i "s/#__VERSION__/${VERSION}/g" $manifest_file
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" $manifest_file
path="images"
for deps in "images.mf"; do
while read line; do

View File

@@ -16,6 +16,7 @@ rm -rf ${BASE_DIR}/../.dependencies
set -e
pushd ${BASE_DIR}/../.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd
pushd $DIST_PATH

View File

@@ -77,3 +77,5 @@ find $BASE_DIR/../ -type f -name Olares.yaml | while read f; do
done
sed -i "s/#__VERSION__/${VERSION}/g" ${manifest}
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" ${manifest}

200
build/get-manifest.py Normal file
View File

@@ -0,0 +1,200 @@
#!/usr/bin/env python3
import requests
import json
import argparse
import re
import sys
import platform
def parse_image_name(image_name):
"""
Parses a full image name into registry, repository, and reference (tag/digest).
Handles defaults for Docker Hub.
"""
# Default to 'latest' tag if no tag or digest is specified
if ":" not in image_name and "@" not in image_name:
image_name += ":latest"
# Split repository from reference (tag or digest)
if "@" in image_name:
repo_part, reference = image_name.rsplit("@", 1)
else:
repo_part, reference = image_name.rsplit(":", 1)
# Determine registry and repository
if "/" not in repo_part:
# This is an official Docker Hub image, e.g., "ubuntu"
registry = "registry-1.docker.io"
repository = f"library/{repo_part}"
else:
parts = repo_part.split("/")
# If the first part looks like a domain name, it's the registry
if "." in parts[0] or ":" in parts[0]:
registry = parts[0]
repository = "/".join(parts[1:])
else:
# A scoped Docker Hub image, e.g., "bitnami/nginx"
registry = "registry-1.docker.io"
repository = repo_part
return registry, repository, reference
def get_auth_token(registry, repository):
"""
Gets an authentication token from the registry's auth service.
"""
# First, probe the registry to get the auth challenge
try:
probe_url = f"https://{registry}/v2/"
response = requests.get(probe_url, timeout=10)
except requests.exceptions.RequestException as e:
print(f"Error: Could not connect to registry at {probe_url}. Details: {e}", file=sys.stderr)
sys.exit(1)
if response.status_code != 401:
# Either public or something is wrong, we can try without a token
return None
auth_header = response.headers.get("Www-Authenticate")
if not auth_header:
print(f"Error: Registry {registry} returned 401 but did not provide Www-Authenticate header.", file=sys.stderr)
sys.exit(1)
# Parse the Www-Authenticate header to find realm, service, and scope
try:
realm = re.search('realm="([^"]+)"', auth_header).group(1)
service = re.search('service="([^"]+)"', auth_header).group(1)
# Scope for the specific repository is needed
scope = f"repository:{repository}:pull"
except AttributeError:
print(f"Error: Could not parse Www-Authenticate header: {auth_header}", file=sys.stderr)
sys.exit(1)
# Request the actual token from the auth realm
auth_params = {
"service": service,
"scope": scope
}
try:
auth_response = requests.get(realm, params=auth_params, timeout=10)
auth_response.raise_for_status()
return auth_response.json().get("token")
except requests.exceptions.RequestException as e:
print(f"Error: Failed to get auth token from {realm}. Details: {e}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Failed to decode JSON response from auth server: {auth_response.text}", file=sys.stderr)
sys.exit(1)
def get_manifest(registry, repository, reference, token):
"""
Fetches the image manifest from the registry.
"""
manifest_url = f"https://{registry}/v2/{repository}/manifests/{reference}"
headers = {
# Request multiple manifest types, the registry will return the correct one
"Accept": "application/vnd.oci.image.index.v1+json, application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json"
}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
response = requests.get(manifest_url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401 and not token:
print("Error: Received 401 Unauthorized. Attempting to get a token...", file=sys.stderr)
# The initial probe might have passed, but manifest access requires auth.
# We re-run the token acquisition logic.
new_token = get_auth_token(registry, repository)
if new_token:
return get_manifest(registry, repository, reference, new_token)
print(f"Error: Failed to fetch manifest from {manifest_url}. Status: {e.response.status_code}", file=sys.stderr)
print(f"Response: {e.response.text}", file=sys.stderr)
sys.exit(1)
except requests.exceptions.RequestException as e:
print(f"Error: A network error occurred. Details: {e}", file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description="Fetch an OCI/Docker image manifest from a container registry.",
epilog="""Examples:
python get_manifest.py ubuntu:22.04
python get_manifest.py quay.io/brancz/kube-rbac-proxy:v0.18.1 -o manifest.json
python get_manifest.py gcr.io/google-containers/pause:3.9""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("image_name", help="Full name of the container image (e.g., 'ubuntu:latest' or 'quay.io/prometheus/node-exporter:v1.7.0')")
parser.add_argument("-o", "--output-file", help="Optional. Path to write the final manifest JSON to. If not provided, prints to stdout.")
args = parser.parse_args()
registry, repository, reference = parse_image_name(args.image_name)
# Suppress informational prints if writing to a file
verbose_print = print if not args.output_file else lambda *a, **k: None
verbose_print(f"Registry: {registry}")
verbose_print(f"Repository: {repository}")
verbose_print(f"Reference: {reference}", end='\n\n', flush=True)
token = get_auth_token(registry, repository)
if not token and not args.output_file:
print("No authentication token needed or could be retrieved. Proceeding without token...", file=sys.stderr)
manifest = get_manifest(registry, repository, reference, token)
final_manifest = None
media_type = manifest.get("mediaType", "")
if "manifest.list" in media_type or "image.index" in media_type:
verbose_print("Detected a multi-platform image index. Finding manifest for current architecture...")
system_arch = platform.machine()
arch_map = {"x86_64": "amd64", "aarch64": "arm64"}
target_arch = arch_map.get(system_arch, system_arch)
verbose_print(f"System architecture: {system_arch} -> Target: linux/{target_arch}")
target_digest = None
for m in manifest.get("manifests", []):
plat = m.get("platform", {})
if plat.get("os") == "linux" and plat.get("architecture") == target_arch:
target_digest = m.get("digest")
break
if target_digest:
verbose_print(f"Found manifest for linux/{target_arch} with digest: {target_digest}\n")
final_manifest = get_manifest(registry, repository, target_digest, token)
else:
print(f"Error: Could not find a manifest for 'linux/{target_arch}' in the index.", file=sys.stderr)
if not args.output_file:
print("Available platforms:", file=sys.stderr)
for m in manifest.get("manifests", []):
print(f" - {m.get('platform', {}).get('os')}/{m.get('platform', {}).get('architecture')}", file=sys.stderr)
sys.exit(1)
else:
final_manifest = manifest
if final_manifest:
if args.output_file:
try:
with open(args.output_file, 'w') as f:
json.dump(final_manifest, f, indent=2)
print(f"Successfully wrote manifest to {args.output_file}")
except IOError as e:
print(f"Error: Could not write to file {args.output_file}. Details: {e}", file=sys.stderr)
sys.exit(1)
else:
print(json.dumps(final_manifest, indent=2))
if __name__ == "__main__":
main()

View File

@@ -74,6 +74,6 @@ echo "packaging launcher ..."
run_cmd "cp -rf framework/bfl/.olares/config/launcher ${DIST}/wizard/config/"
echo "packaging gpu ..."
run_cmd "cp -rf framework/gpu/.olares/config/gpu ${DIST}/wizard/config/"
run_cmd "cp -rf infrastructure/gpu/.olares/config/gpu ${DIST}/wizard/config/"
echo "packaging completed"

View File

@@ -23,26 +23,28 @@ while read line; do
continue
fi
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
filename=$(echo "$line"|awk -F"," '{print $1}')
echo "if exists $filename ... "
name=$(echo -n "$filename"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
echo "if exists $filename ... "
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz)
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name)
if [ $code -eq 403 ]; then
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
set -ex
aws s3 cp $name s3://terminus-os-install/$path$name --acl=public-read
aws s3 cp $name s3://terminus-os-install/backup/$path$backup_file --acl=public-read

View File

@@ -10,6 +10,7 @@ cat $1|while read image; do
echo "if exists $image ... "
name=$(echo -n "$image"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
manifest="$name.manifest.json"
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz > /dev/null
if [ $? -ne 0 ]; then
@@ -68,48 +69,29 @@ cat $1|while read image; do
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image"
echo "failed to check image checksum"
exit -1
fi
fi
fi
# upload to tencent cloud cos
# curl -fsSLI https://cdn.joinolares.cn/$path$name.tar.gz > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# # re-upload checksum.txt
# curl -fsSLI https://cdn.joinolares.cn/$path$checksum > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# upload manifest.json
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$manifest > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$manifest)
if [ $code -eq 403 ]; then
set -ex
BASE_DIR=$(dirname $(realpath -s $0))
python3 $BASE_DIR/get-manifest.py $image -o $manifest
aws s3 cp $manifest s3://terminus-os-install/$path$manifest --acl=public-read
echo "upload $name manifest completed"
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image manifest"
exit -1
fi
fi
fi
done

View File

@@ -17,8 +17,12 @@ builds:
ignore:
- goos: darwin
goarch: arm
- goos: darwin
goarch: amd64
- goos: windows
goarch: arm
- goos: windows
goarch: arm64
ldflags:
- -s
- -w

View File

@@ -1 +1,92 @@
# installer
# Olares CLI
This directory contains the code for **olares-cli**, the official command-line interface for administering an **Olares** cluster. It provides a modular, pipeline-based architecture for orchestrating complex system operations. See the full [Olares CLI Documentation](https://docs.olares.com/developer/install/cli-1.12/olares-cli.html) for command reference and tutorials.
Key responsibilities include:
- **Cluster management**: Installing, upgrading, restarting, and maintaining an Olares cluster.
- **Node management**: Adding to or removing nodes from an Olares cluster.
## Execution Model
For most of the commands, `olares-cli` is executed through a four-tier hierarchy:
```
Pipeline ➜ Module ➜ Task ➜ Action
````
### Example: `install-olares` Pipeline
```text
Pipeline: Install Olares
├── ...other modules
└── Module: Bootstrap OS
├── ...other tasks
├── Task: Check Prerequisites
│ └── Action: run-precheck.sh
└── Task: Configure System
└── Action: apply-sysctl
````
## Repository layout
```text
cli/
├── cmd/ # Cobra command definitions
│ ├── main.go # CLI entry point
│ └── ctl/
│ ├── root.go
│ ├── os/ # OS-level maintenance commands
│ ├── node/ # Cluster node operations
│ └── gpu/ # GPU management
└── pkg/
├── core/
│ ├── action/ # Re-usable action primitives
│ ├── module/ # Module abstractions
│ ├── pipeline/ # Pipeline abstractions
│ └── task/ # Task abstractions
└── pipelines/ # Pre-built pipelines
│ ├── ... # actual modules and tasks for various commands and components
```
## Build from source
### Prerequisites
* **Go 1.24+**
* **GoReleaser** (optional, for cross-compiling and packaging)
### Sample commands
```bash
# Clone the repo and enter the CLI folder
cd cli
# 1) Build for the host OS/ARCH
go build -o olares-cli ./cmd/main.go
# 2) Cross-compile for Linux amd64 (from macOS, for example)
GOOS=linux GOARCH=amd64 go build -o olares-cli ./cmd/main.go
# 3) Produce multi-platform artifacts (tar.gz, checksums, etc.)
goreleaser release --snapshot --clean
```
---
## Development workflow
### Add a new command
1. Create the command file in `cmd/ctl/<category>/`.
2. Define a pipeline in `pkg/pipelines/`.
3. Implement modules & tasks inside the relevant `pkg/` sub-packages.
### Test your build
1. Upload the self-built `olares-cli` binary to a machine that's running Olares.
2. Replace the existing `olares-cli` binary on the machine using `sudo cp -f olares-cli /usr/local/bin`.
3. Execute arbitrary commands using `olares-cli`

View File

@@ -60,7 +60,7 @@ echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
@@ -84,7 +84,7 @@ sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /et
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf

View File

@@ -265,7 +265,7 @@ const (
CacheAppServicePod = "app_service_pod_name"
CacheAppValues = "app_built_in_values"
CacheCountPodsUsingHostIP = "count_pods_using_host_ip"
CacheCountPodsWaitForRecreation = "count_pods_wait_for_recreation"
CacheUpgradeUsers = "upgrade_users"
CacheUpgradeAdminUser = "upgrade_admin_user"

View File

@@ -73,7 +73,6 @@ type Argument struct {
ImagesDir string `json:"images_dir"`
Namespace string `json:"namespace"`
DeleteCRI bool `json:"delete_cri"`
DeleteCache bool `json:"delete_cache"`
Role string `json:"role"`
Type string `json:"type"`
Kubetype string `json:"kube_type"`
@@ -322,10 +321,26 @@ func (a *Argument) SaveReleaseInfo() error {
if a.OlaresVersion == "" {
return errors.New("invalid: empty olares version")
}
releaseInfoMap := map[string]string{
ENV_OLARES_BASE_DIR: a.BaseDir,
ENV_OLARES_VERSION: a.OlaresVersion,
}
if a.User != nil {
releaseInfoMap["OLARES_NAME"] = fmt.Sprintf("%s@%s", a.User.UserName, a.User.DomainName)
} else {
if util.IsExist(OlaresReleaseFile) {
// if the user is not set, try to load the user name from the release file
envs, err := godotenv.Read(OlaresReleaseFile)
if err == nil {
if userName, ok := envs["OLARES_NAME"]; ok {
releaseInfoMap["OLARES_NAME"] = userName
}
}
}
}
if !util.IsExist(filepath.Dir(OlaresReleaseFile)) {
if err := os.MkdirAll(filepath.Dir(OlaresReleaseFile), 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %v", filepath.Dir(OlaresReleaseFile), err)
@@ -395,10 +410,6 @@ func (a *Argument) SetRegistryMirrors(registryMirrors string) {
a.RegistryMirrors = registryMirrors
}
func (a *Argument) SetDeleteCache(deleteCache bool) {
a.DeleteCache = deleteCache
}
func (a *Argument) SetDeleteCRI(deleteCRI bool) {
a.DeleteCRI = deleteCRI
}

View File

@@ -1,17 +1,16 @@
package common
const (
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringFederated = "kubesphere-monitoring-federated"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
ChartNameRedis = "redis"
ChartNameSnapshotController = "snapshot-controller"

View File

@@ -133,8 +133,11 @@ type DisableTerminusdService struct {
}
func (s *DisableTerminusdService) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
stdout, _ := runtime.GetRunner().SudoCmd("systemctl is-active olaresd", false, false)
if stdout == "active" {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
}
}
return nil
}
@@ -144,10 +147,18 @@ type UninstallTerminusd struct {
}
func (r *UninstallTerminusd) Execute(runtime connector.Runtime) error {
var olaresdFiles []string
svcpath := filepath.Join("/etc/systemd/system", templates.TerminusdService.Name())
svcenvpath := filepath.Join("/etc/systemd/system", templates.TerminusdEnv.Name())
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s && rm -rf %s && rm -rf /usr/local/bin/olaresd", svcpath, svcenvpath), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
binPath := "/usr/local/bin/olaresd"
olaresdFiles = append(olaresdFiles, svcpath, svcenvpath, binPath)
for _, pidFile := range []string{"installing.pid", "changingip.pid"} {
olaresdFiles = append(olaresdFiles, filepath.Join(runtime.GetBaseDir(), pidFile))
}
for _, f := range olaresdFiles {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s", f), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
}
}
return nil
}

View File

@@ -263,30 +263,25 @@ type NodeLabelingModule struct {
func (l *NodeLabelingModule) Init() {
l.Name = "NodeLabeling"
updateNode := &task.RemoteTask{
Name: "UpdateNode",
Hosts: l.Runtime.GetHostsByRole(common.Master),
updateNode := &task.LocalTask{
Name: "UpdateNode",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(UpdateNodeLabels),
Parallel: false,
Retry: 1,
Action: new(UpdateNodeLabels),
Retry: 1,
}
restartPlugin := &task.RemoteTask{
Name: "RestartPlugin",
Hosts: l.Runtime.GetHostsByRole(common.Master),
restartPlugin := &task.LocalTask{
Name: "RestartPlugin",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(RestartPlugin),
Parallel: false,
Retry: 1,
Action: new(RestartPlugin),
Retry: 1,
}
l.Tasks = []task.Interface{

View File

@@ -195,11 +195,13 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
defaultKubeletArs := map[string]string{
"kube-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"system-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"eviction-hard": "memory.available<5%,nodefs.available<10%",
"eviction-hard": "memory.available<5%,nodefs.available<10%,imagefs.available<10%",
"config": "/etc/rancher/k3s/kubelet.config",
"containerd": container.DefaultContainerdCRISocket,
"cgroup-driver": "systemd",
"runtime-request-timeout": "5m",
"image-gc-high-threshold": "91",
"image-gc-low-threshold": "90",
}
defaultKubeProxyArgs := map[string]string{
"proxy-mode": "ipvs",

View File

@@ -307,6 +307,8 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
"evictionPressureTransitionPeriod": "30s",
"featureGates": FeatureGatesDefaultConfiguration,
"runtimeRequestTimeout": "5m",
"imageGCHighThresholdPercent": 91,
"imageGCLowThresholdPercent": 90,
}
if securityEnhancement {

View File

@@ -47,24 +47,6 @@ func (m *DeleteKubeSphereCachesModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCache"
deleteCache := &task.LocalTask{
Name: "DeleteCache",
Prepare: new(ShouldDeleteCache),
Action: new(DeleteCache),
}
m.Tasks = []task.Interface{
deleteCache,
}
}
type DeployModule struct {
common.KubeModule
Skip bool

File diff suppressed because one or more lines are too long

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -32,7 +32,7 @@ spec:
- command:
- ks-apiserver
- --logtostderr=true
image: beclab/ks-apiserver:0.0.19
image: beclab/ks-apiserver:0.0.21
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-apiserver
ports:

View File

@@ -1,121 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
strategy:
rollingUpdate:
maxSurge: 0
type: RollingUpdate
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
template:
metadata:
labels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- command:
- controller-manager
- --logtostderr=true
- --leader-elect=false
image: beclab/ks-controller-manager:0.0.19
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-controller-manager
ports:
- containerPort: 8080
protocol: TCP
resources:
{{- toYaml .Values.controller.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/kubesphere/
name: kubesphere-config
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.controller.extraVolumeMounts }}
{{- toYaml .Values.controller.extraVolumeMounts | nindent 8 }}
{{- end }}
env:
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
terminationGracePeriodSeconds: 30
volumes:
- name: kubesphere-config
configMap:
name: kubesphere-config
defaultMode: 420
- hostPath:
path: /etc/localtime
type: ""
name: host-time
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 6 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-controller-manager
namespaces:
- kubesphere-system
{{- with .Values.nodeAffinity }}
nodeAffinity:
{{ toYaml . | indent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
ports:
- port: 443
protocol: TCP
targetPort: 8443
selector:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
sessionAffinity: None
type: ClusterIP

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -748,12 +748,12 @@ spec:
sum (node_cpu_seconds_total{job="node-exporter", mode=~"user|nice|system|iowait|irq|softirq"}) by (cpu, instance, job, namespace, pod)
record: node_cpu_used_seconds_total
- expr: |
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, host_ip, role, pod)
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, role, pod)
record: 'node_namespace_pod:kube_pod_info:'
- expr: |
count by (node, host_ip, role) (sum by (node, cpu, host_ip, role) (
count by (node, role) (sum by (node, cpu, role) (
node_cpu_seconds_total{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
))
record: node:node_num_cpu:sum
@@ -761,27 +761,27 @@ spec:
avg(irate(node_cpu_used_seconds_total{job="node-exporter"}[5m]))
record: :node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_used_seconds_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"user"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_user_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"system"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_system_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"iowait"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_iowait_cpu_utilisation:avg1m
- expr: |
@@ -806,9 +806,9 @@ spec:
label_replace(node_memory_Cached_bytes, "node", "$1", "instance", "(.*)")
record: node:node_memory_Cached_bytes
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_Slab_bytes{job="node-exporter"} + node_memory_KernelStack_bytes{job="node-exporter"} + node_memory_PageTables_bytes{job="node-exporter"}+ node_memory_HardwareCorrupted_bytes{job="node-exporter"}+node_memory_Bounce_bytes{job="node-exporter"}-node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_system_reserved
@@ -825,16 +825,16 @@ spec:
sum(node_memory_MemTotal_bytes{job="node-exporter"})
record: ':node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"} + node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_available:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
node_memory_MemTotal_bytes{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_total:sum
@@ -842,30 +842,30 @@ spec:
1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum)
record: 'node:node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_reads_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_reads:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_writes_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_writes:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_read_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_read:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_written_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_written:sum
@@ -874,74 +874,74 @@ spec:
sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
record: :node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m]) +
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_transmitted:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_received:sum_irate
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_total:'
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_free:'
- expr: |
sum by (node, host_ip, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load1:ratio
- expr: |
sum by (node, host_ip, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load5:ratio
- expr: |
sum by (node, host_ip, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load15:ratio
- expr: |
sum by (node, host_ip, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by (node, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: node:pod_count:sum
- expr: |
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(host_ip, role) max by(node, host_ip, role) (node_namespace_pod:kube_pod_info:{node!="",host_ip!=""}))
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(role) max by(node, role) (node_namespace_pod:kube_pod_info:{node!=""}))
record: node:pod_capacity:sum
- expr: |
node:pod_running:count / node:pod_capacity:sum
record: node:pod_utilization:ratio
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, role)
record: node:pod_running:count
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, role)
record: node:pod_succeeded:count
- expr: |
count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, role)
record: node:pod_abnormal:count
- expr: |
(count by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"} unless on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Succeeded"} > 0) unless on(pod, namespace, cluster) ((kube_pod_status_ready{condition="true",job="kube-state-metrics"} > 0) and on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} > 0)) unless on(pod, namespace, cluster) kube_pod_container_status_waiting_reason{job="kube-state-metrics",reason="ContainerCreating"} > 0) or on(namespace, cluster) (group by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"}) * 0)) * on(namespace, cluster) group_left(user) (kube_namespace_labels{job="kube-state-metrics"}) > 0
record: user:pod_abnormal:count
- expr: |
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: node:pod_abnormal:ratio
- expr: |
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: user:pod_abnormal:ratio
- expr: |
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: 'node:disk_space_available:'
- expr: |
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: node:disk_space_utilization:ratio
- expr: |
(1 - (node:node_inodes_free: / node:node_inodes_total:))

View File

@@ -42,7 +42,7 @@ spec:
- --collector.netdev.address-info
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: beclab/node-exporter:0.0.2
image: beclab/node-exporter:0.0.3
name: node-exporter
securityContext:
privileged: true

View File

@@ -58,12 +58,12 @@ var kscorecrds = []map[string]string{
"resource": "default-http-backend",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "secrets",
"resource": "ks-controller-manager-webhook-cert",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "secrets",
// "resource": "ks-controller-manager-webhook-cert",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "serviceaccounts",
@@ -100,24 +100,24 @@ var kscorecrds = []map[string]string{
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "services",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "services",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "deployments",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
//{
// "ns": "kubesphere-system",
// "kind": "validatingwebhookconfigurations",

View File

@@ -65,7 +65,7 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
kubectlpath = path.Join(common.BinDir, common.CommandKubectl)
}
for _, ns := range []string{common.NamespaceKubesphereControlsSystem, common.NamespaceKubesphereMonitoringFederated} {
for _, ns := range []string{common.NamespaceKubesphereControlsSystem} {
if stdout, err := runtime.GetRunner().Cmd(fmt.Sprintf("%s create ns %s", kubectlpath, ns), false, true); err != nil {
if !strings.Contains(stdout, "already exists") {
logger.Errorf("create ns %s failed: %v", ns, err)
@@ -98,8 +98,6 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
common.NamespaceKubeSystem,
common.NamespaceKubekeySystem,
common.NamespaceKubesphereControlsSystem,
common.NamespaceKubesphereMonitoringFederated,
common.NamespaceKubesphereMonitoringSystem,
common.NamespaceKubesphereSystem,
}

View File

@@ -23,17 +23,6 @@ import (
versionutil "k8s.io/apimachinery/pkg/util/version"
)
type ShouldDeleteCache struct {
common.KubePrepare
}
func (p *ShouldDeleteCache) PreCheck(runtime connector.Runtime) (bool, error) {
if p.KubeConf.Arg.DeleteCache {
return true, nil
}
return false, nil
}
type VersionBelowV3 struct {
common.KubePrepare
}

View File

@@ -52,19 +52,6 @@ func (d *DeleteKubeSphereCaches) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCache struct {
common.KubeAction
}
func (t *DeleteCache) Execute(runtime connector.Runtime) error {
// var cacheDir = path.Join(runtime.GetBaseDir(), cc.ImagesDir)
// if err := util.RemoveDir(cacheDir); err != nil {
// return err
// }
// logger.Debugf("delete caches success")
return nil
}
type AddInstallerConfig struct {
common.KubeAction
}
@@ -368,7 +355,7 @@ func (c *Check) Execute(runtime connector.Runtime) error {
return fmt.Errorf("kubectl not found")
}
var labels = []string{"app=ks-apiserver", "app=ks-controller-manager"}
var labels = []string{"app=ks-apiserver"}
for _, label := range labels {
var cmd = fmt.Sprintf("%s get pod -n %s -l '%s' -o jsonpath='{.items[0].status.phase}'", kubectlpath, common.NamespaceKubesphereSystem, label)

View File

@@ -6,6 +6,7 @@ import (
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/module"
"github.com/beclab/Olares/cli/pkg/core/pipeline"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/pkg/k3s"
"github.com/beclab/Olares/cli/pkg/kubernetes"
"github.com/beclab/Olares/cli/pkg/manifest"
@@ -75,6 +76,7 @@ func (m *AddNodeModule) Init() {
&k3s.JoinNodesModule{},
}
}
m.underlyingModules = append(m.underlyingModules, &gpu.NodeLabelingModule{})
for _, underlyingModule := range m.underlyingModules {
underlyingModule.Default(m.Runtime, m.PipelineCache, m.ModuleCache)
underlyingModule.AutoAssert()

View File

@@ -105,6 +105,7 @@ func (p *phaseBuilder) phaseInstall() *phaseBuilder {
&certs.UninstallCertsFilesModule{},
&storage.DeleteUserDataModule{},
&terminus.DeleteWizardFilesModule{},
&terminus.DeleteUpgradeFilesModule{},
&storage.RemoveJuiceFSModule{},
&storage.DeletePhaseFlagModule{
PhaseFile: common.TerminusStateFileInstalled,
@@ -132,33 +133,13 @@ func (p *phaseBuilder) phasePrepare() *phaseBuilder {
PhaseFile: common.TerminusStateFilePrepared,
BaseDir: p.runtime.GetBaseDir(),
},
&daemon.UninstallTerminusdModule{},
&terminus.RemoveReleaseFileModule{},
)
}
return p
}
func (p *phaseBuilder) phaseDownload() *phaseBuilder {
terminusdAction := &daemon.CheckTerminusdService{}
err := terminusdAction.Execute()
if p.convert() >= PhaseDownload {
if err == nil {
p.modules = append(p.modules, &daemon.UninstallTerminusdModule{})
}
p.modules = append(p.modules,
&kubesphere.DeleteCacheModule{},
)
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &storage.DeleteCacheModule{
BaseDir: p.runtime.GetBaseDir(),
})
}
}
return p
}
func (p *phaseBuilder) phaseMacos() {
p.modules = []module.Module{
&precheck.GreetingsModule{},
@@ -168,9 +149,6 @@ func (p *phaseBuilder) phaseMacos() {
}
if p.convert() >= PhaseDownload {
p.modules = append(p.modules, &kubesphere.DeleteKubeSphereCachesModule{})
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &kubesphere.DeleteCacheModule{})
}
}
}
@@ -189,8 +167,7 @@ func UninstallTerminus(phase string, runtime *common.KubeRuntime) pipeline.Pipel
builder.
phaseInstall().
phaseStorage().
phasePrepare().
phaseDownload()
phasePrepare()
}
return pipeline.Pipeline{

View File

@@ -65,6 +65,7 @@ data:
health
ready
kubernetes {{ .DNSDomain }} in-addr.arpa ip6.arpa {
endpoint_pod_names
pods insecure
fallthrough in-addr.arpa ip6.arpa
}

View File

@@ -5993,6 +5993,8 @@ spec:
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
- name: FELIX_HEALTHHOST
value: 127.0.0.1
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:

View File

@@ -30,6 +30,17 @@ func (m *Manager) Package() error {
return err
}
osChartTemplatePath := "wizard/config/os-chart-template"
for _, osm := range []string{"os-platform", "os-framework"} {
if err := util.CopyDirectory(filepath.Join(buildTemplate, osChartTemplatePath), filepath.Join(m.distPath, fmt.Sprintf("/wizard/config/%s", osm))); err != nil {
return err
}
}
if err := util.RemoveDir(filepath.Join(m.distPath, osChartTemplatePath)); err != nil {
return err
}
// Package modules
for _, mod := range modules {
if err := m.packageModule(mod); err != nil {
@@ -50,6 +61,13 @@ func (m *Manager) Package() error {
}
func (m *Manager) packageModule(mod string) error {
var distDeployType string
switch mod {
case "platform":
distDeployType = "os-platform"
case "framework":
distDeployType = "os-framework"
}
modPath := filepath.Join(m.olaresRepoRoot, mod)
err := filepath.Walk(modPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
@@ -78,7 +96,7 @@ func (m *Manager) packageModule(mod string) error {
// Package cluster deployments
deployPath := filepath.Join(path, "config/cluster/deploy")
if err := util.CopyDirectoryIfExists(deployPath, filepath.Join(m.distPath, "wizard/config/system/templates/deploy")); err != nil {
if err := util.CopyDirectoryIfExists(deployPath, filepath.Join(m.distPath, fmt.Sprintf("wizard/config/%s/templates/deploy", distDeployType))); err != nil {
return err
}
@@ -99,7 +117,7 @@ func (m *Manager) packageLauncher() error {
func (m *Manager) packageGPU() error {
fmt.Println("packaging gpu ...")
return util.CopyDirectory(
filepath.Join(m.olaresRepoRoot, "framework/gpu/.olares/config/gpu"),
filepath.Join(m.olaresRepoRoot, "infrastructure/gpu/.olares/config/gpu"),
filepath.Join(m.distPath, "wizard/config/gpu"),
)
}

View File

@@ -2,16 +2,16 @@ package builder
import (
"fmt"
"os"
"path/filepath"
"github.com/beclab/Olares/cli/pkg/core/util"
"github.com/beclab/Olares/cli/pkg/release/app"
"github.com/beclab/Olares/cli/pkg/release/manifest"
"os"
"path/filepath"
)
type Builder struct {
olaresRepoRoot string
vendorRepoPath string
distPath string
version string
manifestManager *manifest.Manager
@@ -20,8 +20,13 @@ type Builder struct {
func NewBuilder(olaresRepoRoot, version, cdnURL string, ignoreMissingImages bool) *Builder {
distPath := filepath.Join(olaresRepoRoot, ".dist/install-wizard")
vendorRepoPath := os.Getenv("OLARES_VENDOR_REPO_PATH")
if vendorRepoPath == "" {
vendorRepoPath = "/"
}
return &Builder{
olaresRepoRoot: olaresRepoRoot,
vendorRepoPath: vendorRepoPath,
distPath: distPath,
version: version,
manifestManager: manifest.NewManager(olaresRepoRoot, distPath, cdnURL, ignoreMissingImages),
@@ -69,6 +74,9 @@ func (b *Builder) archive() (string, error) {
if err := util.ReplaceInFile(file, "#__VERSION__", b.version); err != nil {
return "", err
}
if err := util.ReplaceInFile(file, "#__REPO_PATH__", b.vendorRepoPath); err != nil {
return "", err
}
}
tarFile := filepath.Join(b.olaresRepoRoot, fmt.Sprintf("install-wizard-%s.tar.gz", versionStr))

View File

@@ -4,10 +4,12 @@ import (
"bufio"
"crypto/md5"
"fmt"
"github.com/Masterminds/semver/v3"
dockerref "github.com/containerd/containerd/reference/docker"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"sigs.k8s.io/kustomize/kyaml/yaml"
@@ -265,11 +267,55 @@ func (m *Manager) scan() error {
m.extractedImages = sortedImages
for _, component := range uniqueComponents {
component, err = m.patchComponent(component)
if err != nil {
return err
}
m.extractedComponents = append(m.extractedComponents, component)
}
return nil
}
func (m *Manager) getLatestDailyBuildTag() (string, error) {
cmd := exec.Command("git", "tag", "-l")
cmd.Dir = m.olaresRepoRoot
output, err := cmd.CombinedOutput()
if err != nil {
return "", fmt.Errorf("failed to get git tags: %v", err)
}
tags := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(tags) == 0 || (len(tags) == 1 && tags[0] == "") {
return "", fmt.Errorf("no git tags found")
}
var dailyTags []string
dailyBuildRegex := regexp.MustCompile(`^\d+\.\d+\.\d-\d{8}$`)
for _, tag := range tags {
tag = strings.TrimSpace(tag)
if dailyBuildRegex.MatchString(tag) {
dailyTags = append(dailyTags, tag)
}
}
if len(dailyTags) == 0 {
return "", fmt.Errorf("no daily build tags found")
}
sort.Slice(dailyTags, func(i, j int) bool {
iv, err := semver.NewVersion(dailyTags[i])
if err != nil {
return true
}
jv, err := semver.NewVersion(dailyTags[j])
if err != nil {
return false
}
return iv.LessThan(jv)
})
return dailyTags[len(dailyTags)-1], nil
}
// Helper function to patch extracted image name
// before validating it
@@ -298,3 +344,22 @@ func (m *Manager) patchImage(image string) (string, error) {
image = strings.ReplaceAll(image, backupServerImageVersionTpl, backupVersion)
return image, nil
}
func (m *Manager) patchComponent(component BinaryOutput) (BinaryOutput, error) {
if component.ID != "olaresd" {
return component, nil
}
latestDailyBuildTag, err := m.getLatestDailyBuildTag()
if err != nil {
return BinaryOutput{}, fmt.Errorf("failed to get latest daily build tag (required to replace olaresd version): %v", err)
}
fmt.Printf("patching olaresd version to %s\n", latestDailyBuildTag)
component.Name = strings.ReplaceAll(component.Name, "#__VERSION__", latestDailyBuildTag)
component.AMD64 = strings.ReplaceAll(component.AMD64, "#__VERSION__", latestDailyBuildTag)
component.ARM64 = strings.ReplaceAll(component.ARM64, "#__VERSION__", latestDailyBuildTag)
return component, nil
}

View File

@@ -214,29 +214,6 @@ func (m *DeletePhaseFlagModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
BaseDir string
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCaches"
deleteCaches := &task.RemoteTask{
Name: "DeleteCaches",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Action: &DeleteCaches{
BaseDir: m.BaseDir,
},
Parallel: false,
Retry: 1,
}
m.Tasks = []task.Interface{
deleteCaches,
}
}
type DeleteUserDataModule struct {
common.KubeModule
}

View File

@@ -325,38 +325,6 @@ func (t *DeletePhaseFlagFile) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCaches struct {
common.KubeAction
BaseDir string
}
func (t *DeleteCaches) Execute(runtime connector.Runtime) error {
var cachesDirs []string
filepath.WalkDir(t.BaseDir, func(path string, d fs.DirEntry, err error) error {
if path != t.BaseDir {
if d.IsDir() {
cachesDirs = append(cachesDirs, path)
return filepath.SkipDir
}
}
return nil
},
)
if cachesDirs != nil && len(cachesDirs) > 0 {
for _, cachesDir := range cachesDirs {
if util.IsExist(cachesDir) {
if err := util.RemoveDir(cachesDir); err != nil {
logger.Errorf("remove %s failed %v", cachesDir, err)
}
}
}
}
return nil
}
type DeleteTerminusUserData struct {
common.KubeAction
}

View File

@@ -199,6 +199,23 @@ func (m *InstalledModule) Init() {
}
}
type DeleteUpgradeFilesModule struct {
common.KubeModule
}
func (d *DeleteUpgradeFilesModule) Init() {
d.Name = "DeleteUpgradeFiles"
deleteUpgradeFiles := &task.LocalTask{
Name: "DeleteUpgradeFiles",
Action: &DeleteUpgradeFiles{},
}
d.Tasks = []task.Interface{
deleteUpgradeFiles,
}
}
type DeleteWizardFilesModule struct {
common.KubeModule
}

View File

@@ -86,6 +86,12 @@ func (t *InstallOsSystem) Execute(runtime connector.Runtime) error {
// TODO: wait for the platform to be ready
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
var frameworkPath = path.Join(runtime.GetInstallerDir(), "wizard", "config", "os-framework")
if err := utils.UpgradeCharts(ctx, actionConfig, settings, common.ChartNameOSFramework, frameworkPath, "", common.NamespaceOsFramework, vals, false); err != nil {
return err

View File

@@ -296,6 +296,30 @@ func (t *InstallFinished) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteUpgradeFiles struct {
common.KubeAction
}
func (d *DeleteUpgradeFiles) Execute(runtime connector.Runtime) error {
baseDir := runtime.GetBaseDir()
files, err := os.ReadDir(baseDir)
if err != nil {
return errors.Wrapf(err, "failed to read directory %s", baseDir)
}
for _, file := range files {
if strings.HasPrefix(file.Name(), "upgrade.") {
filePath := path.Join(baseDir, file.Name())
if err := os.RemoveAll(filePath); err != nil && !os.IsNotExist(err) {
logger.Warnf("failed to delete %s: %v", filePath, err)
}
}
}
return nil
}
type DeleteWizardFiles struct {
common.KubeAction
}
@@ -453,14 +477,21 @@ func (a *DeletePodsUsingHostIP) Execute(runtime connector.Runtime) error {
if err != nil {
return errors.Wrap(err, "failed to get pods using host IP")
}
a.PipelineCache.Set(common.CacheCountPodsUsingHostIP, len(targetPods))
var waitRecreationPodsCount int
for _, pod := range targetPods {
logger.Infof("restarting pod %s/%s that's using host IP", pod.Namespace, pod.Name)
err = kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
if err != nil && !kerrors.IsNotFound(err) {
return errors.Wrap(err, "failed to delete pod")
}
// pods not created by any owner resource
// may not be recreated immediately and should not be waited
if len(pod.OwnerReferences) > 0 {
waitRecreationPodsCount++
}
}
a.PipelineCache.Set(common.CacheCountPodsWaitForRecreation, waitRecreationPodsCount)
// try our best to wait for the pods to be actually deleted
// to avoid the next module getting the pods with a still running phase
@@ -479,7 +510,7 @@ type WaitForPodsUsingHostIPRecreate struct {
}
func (a *WaitForPodsUsingHostIPRecreate) Execute(runtime connector.Runtime) error {
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsUsingHostIP)
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsWaitForRecreation)
if !ok {
return errors.New("failed to get the count of pods using host IP")
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/beclab/Olares/cli/pkg/common"
@@ -197,7 +198,7 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
return err
}
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsPlatform)
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
@@ -221,3 +222,67 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
}
return nil
}
type UpdateSysctlReservedPorts struct {
common.KubeAction
}
func (u *UpdateSysctlReservedPorts) Execute(runtime connector.Runtime) error {
const sysctlFile = "/etc/sysctl.conf"
const reservedPortsKey = "net.ipv4.ip_local_reserved_ports"
const expectedValue = "30000-32767,46800-50000"
content, err := os.ReadFile(sysctlFile)
if err != nil {
return fmt.Errorf("failed to read sysctl.conf: %v", err)
}
lines := strings.Split(string(content), "\n")
var foundKey bool
var needUpdate bool
var updatedLines []string
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
if strings.HasPrefix(trimmedLine, reservedPortsKey) {
foundKey = true
parts := strings.SplitN(trimmedLine, "=", 2)
if len(parts) == 2 {
currentValue := strings.TrimSpace(parts[1])
if currentValue != expectedValue {
logger.Infof("updating %s from %s to %s", reservedPortsKey, currentValue, expectedValue)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
}
if !foundKey {
logger.Infof("key %s not found in sysctl.conf, adding it", reservedPortsKey)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
}
if needUpdate {
updatedContent := strings.Join(updatedLines, "\n")
if err := os.WriteFile(sysctlFile, []byte(updatedContent), 0644); err != nil {
return fmt.Errorf("failed to write updated sysctl.conf: %v", err)
}
if _, err := runtime.GetRunner().SudoCmd("sysctl -p", false, false); err != nil {
return fmt.Errorf("failed to reload sysctl: %v", err)
}
logger.Infof("updated and reloaded sysctl configuration")
} else {
logger.Debugf("%s already has the expected value: %s", reservedPortsKey, expectedValue)
}
return nil
}

View File

@@ -18,7 +18,16 @@ type UpgradeModule struct {
}
var (
preTasks []*upgradeTask
preTasks = []*upgradeTask{
{
Task: &task.LocalTask{
Name: "UpdateSysctlReservedPorts",
Action: new(UpdateSysctlReservedPorts),
},
Current: &explicitVersionMatcher{max: semver.New(1, 12, 0, "20250701", "")},
Target: anyVersion,
},
}
coreTasks = []*upgradeTask{
{

View File

@@ -5,5 +5,5 @@ output:
-
id: olaresd
name: olaresd-v#__VERSION__.tar.gz
amd64: https://dc3p1870nn3cj.cloudfront.net/olaresd-v#__VERSION__-linux-amd64.tar.gz
arm64: https://dc3p1870nn3cj.cloudfront.net/olaresd-v#__VERSION__-linux-arm64.tar.gz
amd64: https://dc3p1870nn3cj.cloudfront.net#__REPO_PATH__olaresd-v#__VERSION__-linux-amd64.tar.gz
arm64: https://dc3p1870nn3cj.cloudfront.net#__REPO_PATH__olaresd-v#__VERSION__-linux-arm64.tar.gz

View File

@@ -1,4 +1,6 @@
current_dir := $(dir $(abspath $(firstword $(MAKEFILE_LIST))))
.PHONY: all tidy fmt vet build
all: tidy build
@@ -17,3 +19,11 @@ build: fmt vet ;$(info $(M)...Begin to build terminusd.) @
build-linux: fmt vet ;$(info $(M)...Begin to build terminusd (linux version).) @
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/olaresd cmd/terminusd/main.go
build-linux-in-docker:
docker run -it --platform linux/amd64 --rm \
-v $(current_dir):/olaresd \
-w /olaresd \
-e DEBIAN_FRONTEND=noninteractive \
golang:1.24 \
sh -c "apt-get -y update; apt-get -y install libudev-dev; make build-linux"

170
daemon/README.md Normal file
View File

@@ -0,0 +1,170 @@
# Olares System Daemon (`olaresd`)
`olaresd` is the foundational process that boots on every Olares node. It runs as a `systemd` service on port `18088`, exposing a secure REST API for hardware abstraction, network orchestration, storage management, and turnkey cluster operations—all before Kubernetes starts.
Olaresd is installed as a systemd service in `/etc/systemd/system/olaresd.service`.
## Key features
- **System monitoring**: Continuous health checks of cluster and node status.
- **Cluster lifecycle management**: Automated install, upgrade, IP-switching, restart, and maintenance operations.
- **Hardware Abstraction**: USB auto-mounting, storage provisioning, and management.
- **Network Management**: mDNS service discovery, WiFi onboarding, and network interface control.
## REST API reference
The daemon provides an authenticated REST API (using signature-based auth):
**Base URL**: `http://<node-ip>:18088`
### System commands `/command/`
**Lifecycle operations**
| Method | Endpoint | Description |
|--------|-----------------------------|------------------------------|
| POST | `/command/install` | Install Olares |
| POST | `/command/uninstall` | Uninstall Olares |
| POST | `/command/upgrade` | Upgrade Olares |
| DELETE | `/command/upgrade` | Cancel upgrade |
| POST | `/command/reboot` | Reboot node |
| POST | `/command/shutdown` | Shutdown node |
**Network configuration**
| Method | Endpoint | Description |
|--------|-----------------------------|------------------------------|
| POST | `/command/connect-wifi` | Connect to WiFi |
| POST | `/command/change-host` | Change Olares IP binding |
**Storage management**
| Method | Endpoint | Description |
|--------|-----------------------------------|------------------------------------|
| POST | `/command/mount-samba` | Mount SMB shares |
| POST | `/command/v2/mount-samba` | Enhanced SMB mounting |
| POST | `/command/umount-samba` | Unmount SMB shares |
| POST | `/command/umount-samba-incluster` | Cluster-wide SMB unmount |
| POST | `/command/umount-usb` | Unmount USB device |
| POST | `/command/umount-usb-incluster` | Cluster-wide USB unmount |
**System Maintenance**
| Method | Endpoint | Description |
|--------|-----------------------------|-------------------------------------|
| POST | `/command/collect-logs` | Collect system logs for diagnostics |
---
### System information (`/system/`)
**System status**
| Method | Endpoint | Description |
|--------|--------------------------|-----------------------------|
| GET | `/system/status` | Get full system status |
| GET | `/system/ifs` | List network interfaces |
| GET | `/system/hosts-file` | View `/etc/hosts` |
| POST | `/system/hosts-file` | Update `/etc/hosts` |
**Mount information**
| Method | Endpoint | Description |
|--------|---------------------------------|--------------------------------|
| GET | `/system/mounted-usb` | Mounted USB devices |
| GET | `/system/mounted-hdd` | Mounted hard drives |
| GET | `/system/mounted-smb` | Mounted SMB shares |
| GET | `/system/mounted-path` | All mount points |
**Cluster-wide mounts**
| Method | Endpoint | Description |
|--------|--------------------------------------|----------------------------------|
| GET | `/system/mounted-usb-incluster` | USB mounts in cluster |
| GET | `/system/mounted-hdd-incluster` | HDD mounts in cluster |
| GET | `/system/mounted-smb-incluster` | SMB mounts in cluster |
| GET | `/system/mounted-path-incluster` | All cluster mounts |
---
### Container management (`/containerd/`)
**Registry Management**
| Method | Endpoint | Description |
|--------|-------------------------------------------|-------------------------------------|
| GET | `/containerd/registries` | List registries |
| GET | `/containerd/registry/mirrors/` | List registry mirrors |
| GET | `/containerd/registry/mirrors/:registry` | Get specific registry mirror |
| PUT | `/containerd/registry/mirrors/:registry` | Update registry mirror |
| DELETE | `/containerd/registry/mirrors/:registry` | Delete registry mirror |
**Image Management**
| Method | Endpoint | Description |
|--------|----------------------------------|--------------------------------|
| GET | `/containerd/images/` | List container images |
| DELETE | `/containerd/images/:image` | Delete specific image |
| POST | `/containerd/images/prune` | Remove unused images |
## Build from source
### Prerequisites
* Go 1.24+
* GoReleaser (Optional, for creating release artifacts)
### Steps
1. **Navigate to the daemon directory:**
```bash
cd daemon
```
2. **Build for your host OS/architecture:**
```bash
go build -o olaresd ./cmd/olaresd/main.go
```
3. **Cross-compile for another target (e.g., Linux AMD64):**
```bash
GOOS=linux GOARCH=amd64 go build -o olaresd ./cmd/olaresd/main.go
```
4. **Produce release artifacts (optional):**
```bash
goreleaser release --snapshot --clean
```
## Extend `olaresd`
To add a new command API:
1. **Define command**: Add a new command struct in `pkg/commands/`.
2. **Implement handler**: Create the corresponding HTTP handler logic in `internal/apiserver/handlers/`.
3. **Register route**: Register the new API route in `internal/apiserver/server.go`.
4. **Update state**: If the command modifies the cluster's state, ensure you update the logic in `pkg/cluster/state/`.
5. **Validate**: Run `go vet ./... && go test ./...` to check for issues and ensure all tests pass before opening a pull request.
### Test a custom build
1. Copy the binary to your Olares node.
2. On the node, replace the existing binary:
```bash
# Move the new binary into place
sudo cp -f /tmp/olaresd /usr/local/bin/
3. Restart the daemon to apply changes:
```
sudo systemctl restart olaresd
```

View File

@@ -14,6 +14,7 @@ import (
"github.com/beclab/Olares/daemon/internel/ble"
"github.com/beclab/Olares/daemon/internel/mdns"
"github.com/beclab/Olares/daemon/internel/watcher"
"github.com/beclab/Olares/daemon/internel/watcher/cert"
"github.com/beclab/Olares/daemon/internel/watcher/system"
"github.com/beclab/Olares/daemon/internel/watcher/upgrade"
"github.com/beclab/Olares/daemon/internel/watcher/usb"
@@ -48,10 +49,7 @@ func main() {
mainCtx, cancel := context.WithCancel(context.Background())
apis, err := apiserver.NewServer(mainCtx, port)
if err != nil {
panic(err)
}
apis := apiserver.NewServer(mainCtx, port)
if err := state.CheckCurrentStatus(mainCtx); err != nil {
klog.Error(err)
@@ -96,6 +94,7 @@ func main() {
// usb.NewUsbWatcher(),
usb.NewUmountWatcher(),
upgrade.NewUpgradeWatcher(),
cert.NewCertWatcher(),
}, func() {
if s != nil {
if err := s.Restart(); err != nil {

View File

@@ -6,6 +6,7 @@ toolchain go1.24.4
replace (
bytetrade.io/web3os/app-service => github.com/beclab/app-service v0.2.33
bytetrade.io/web3os/backups-sdk => github.com/Above-Os/backups-sdk v0.1.17
bytetrade.io/web3os/bfl => github.com/beclab/bfl v0.3.36
k8s.io/api => k8s.io/api v0.31.0
k8s.io/apimachinery => k8s.io/apimachinery v0.31.0
@@ -79,7 +80,6 @@ require (
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -123,7 +123,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
@@ -134,9 +134,9 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/runc v1.1.13 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opencontainers/runc v1.3.0 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/selinux v1.11.1 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect

View File

@@ -63,8 +63,6 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -120,7 +118,6 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
@@ -233,8 +230,8 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
@@ -273,12 +270,12 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs=
github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
@@ -440,7 +437,6 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -0,0 +1,83 @@
package handlers
import (
"github.com/beclab/Olares/daemon/internel/apiserver/server"
changehost "github.com/beclab/Olares/daemon/pkg/commands/change_host"
collectlogs "github.com/beclab/Olares/daemon/pkg/commands/collect_logs"
connectwifi "github.com/beclab/Olares/daemon/pkg/commands/connect_wifi"
"github.com/beclab/Olares/daemon/pkg/commands/install"
mountsmb "github.com/beclab/Olares/daemon/pkg/commands/mount_smb"
"github.com/beclab/Olares/daemon/pkg/commands/reboot"
"github.com/beclab/Olares/daemon/pkg/commands/shutdown"
umountsmb "github.com/beclab/Olares/daemon/pkg/commands/umount_smb"
umountusb "github.com/beclab/Olares/daemon/pkg/commands/umount_usb"
"github.com/beclab/Olares/daemon/pkg/commands/uninstall"
"github.com/beclab/Olares/daemon/pkg/commands/upgrade"
"k8s.io/klog/v2"
)
func init() {
s := server.API
cmd := s.App.Group("command")
cmd.Post("/install", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostTerminusInit, install.New))))
cmd.Post("/uninstall", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostTerminusUninstall, uninstall.New))))
cmd.Post("/upgrade", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.RequestOlaresUpgrade, upgrade.NewCreateUpgradeTarget))))
cmd.Delete("/upgrade", handlers.RequireSignature(
handlers.RunCommand(handlers.CancelOlaresUpgrade, upgrade.NewRemoveUpgradeTarget)))
cmd.Post("/reboot", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostReboot, reboot.New))))
cmd.Post("/shutdown", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostShutdown, shutdown.New))))
cmd.Post("/connect-wifi", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostConnectWifi, connectwifi.New))))
cmd.Post("/change-host", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostChangeHost, changehost.New))))
cmd.Post("/umount-usb", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostUmountUsb, umountusb.New))))
cmd.Post("/umount-usb-incluster", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostUmountUsbInCluster, umountusb.New))))
cmd.Post("/collect-logs", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostCollectLogs, collectlogs.New))))
cmd.Post("/mount-samba", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostMountSambaDriver, mountsmb.New))))
cmd.Post("/umount-samba", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostUmountSmb, umountsmb.New))))
cmd.Post("/umount-samba-incluster", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostUmountSmbInCluster, umountsmb.New))))
cmdv2 := cmd.Group("v2")
cmdv2.Post("/mount-samba", handlers.RequireSignature(
handlers.WaitServerRunning(
handlers.RunCommand(handlers.PostMountSambaDriverV2, mountsmb.New))))
klog.Info("command handlers initialized")
}

View File

@@ -0,0 +1,28 @@
package handlers
import (
"github.com/beclab/Olares/daemon/internel/apiserver/server"
"k8s.io/klog/v2"
)
func init() {
s := server.API
containerd := s.App.Group("containerd")
containerd.Get("/registries", handlers.RequireSignature(handlers.ListRegistries))
registry := containerd.Group("registry")
mirrors := registry.Group("mirrors")
mirrors.Get("/", handlers.RequireSignature(handlers.GetRegistryMirrors))
mirrors.Get("/:registry", handlers.RequireSignature(handlers.GetRegistryMirror))
mirrors.Put("/:registry", handlers.RequireSignature(handlers.UpdateRegistryMirror))
mirrors.Delete("/:registry", handlers.RequireSignature(handlers.DeleteRegistryMirror))
image := containerd.Group("images")
image.Get("/", handlers.RequireSignature(handlers.ListImages))
image.Delete("/:image", handlers.RequireSignature(handlers.DeleteImage))
image.Post("/prune", handlers.RequireSignature(handlers.PruneImages))
klog.Info("containerd handlers initialized")
}

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -13,7 +13,7 @@ type ChangeHostReq struct {
IP string `json:"ip"`
}
func (h *handlers) PostChangeHost(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostChangeHost(ctx *fiber.Ctx, cmd commands.Interface) error {
var req ChangeHostReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -8,7 +8,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) PostCollectLogs(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostCollectLogs(ctx *fiber.Ctx, cmd commands.Interface) error {
_, err := cmd.Execute(ctx.Context(), nil)
if err != nil {
klog.Error("execute command error, ", err, ", ", cmd.OperationName().Stirng())

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -14,7 +14,7 @@ type ConnectWifiReq struct {
SSID string `json:"ssid"`
}
func (h *handlers) PostConnectWifi(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostConnectWifi(ctx *fiber.Ctx, cmd commands.Interface) error {
var req ConnectWifiReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -8,7 +8,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) ListRegistries(ctx *fiber.Ctx) error {
func (h *Handlers) ListRegistries(ctx *fiber.Ctx) error {
images, err := containerd.ListRegistries(ctx)
if err != nil {
klog.Error("list registries error, ", err)
@@ -17,7 +17,7 @@ func (h *handlers) ListRegistries(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", images)
}
func (h *handlers) GetRegistryMirrors(ctx *fiber.Ctx) error {
func (h *Handlers) GetRegistryMirrors(ctx *fiber.Ctx) error {
mirrors, err := containerd.GetRegistryMirrors(ctx)
if err != nil {
klog.Error("get registry mirrors error, ", err)
@@ -27,7 +27,7 @@ func (h *handlers) GetRegistryMirrors(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", mirrors)
}
func (h *handlers) GetRegistryMirror(ctx *fiber.Ctx) error {
func (h *Handlers) GetRegistryMirror(ctx *fiber.Ctx) error {
mirror, err := containerd.GetRegistryMirror(ctx)
if err != nil {
klog.Error("get registry mirror error, ", err)
@@ -37,7 +37,7 @@ func (h *handlers) GetRegistryMirror(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", mirror)
}
func (h *handlers) UpdateRegistryMirror(ctx *fiber.Ctx) error {
func (h *Handlers) UpdateRegistryMirror(ctx *fiber.Ctx) error {
mirror, err := containerd.UpdateRegistryMirror(ctx)
if err != nil {
klog.Error("update registry mirror error, ", err)
@@ -47,7 +47,7 @@ func (h *handlers) UpdateRegistryMirror(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", mirror)
}
func (h *handlers) DeleteRegistryMirror(ctx *fiber.Ctx) error {
func (h *Handlers) DeleteRegistryMirror(ctx *fiber.Ctx) error {
if err := containerd.DeleteRegistryMirror(ctx); err != nil {
klog.Error("delete registry mirror error, ", err)
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -56,7 +56,7 @@ func (h *handlers) DeleteRegistryMirror(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success")
}
func (h *handlers) ListImages(ctx *fiber.Ctx) error {
func (h *Handlers) ListImages(ctx *fiber.Ctx) error {
registry := ctx.Query("registry")
images, err := containerd.ListImages(ctx, registry)
if err != nil {
@@ -66,7 +66,7 @@ func (h *handlers) ListImages(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", images)
}
func (h *handlers) DeleteImage(ctx *fiber.Ctx) error {
func (h *Handlers) DeleteImage(ctx *fiber.Ctx) error {
if err := containerd.DeleteImage(ctx); err != nil {
klog.Error("delete image error, ", err)
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -74,7 +74,7 @@ func (h *handlers) DeleteImage(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success")
}
func (h *handlers) PruneImages(ctx *fiber.Ctx) error {
func (h *Handlers) PruneImages(ctx *fiber.Ctx) error {
res, err := containerd.PruneImages(ctx)
if err != nil {
klog.Error("prune images error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -9,7 +9,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) GetHostsfile(ctx *fiber.Ctx) error {
func (h *Handlers) GetHostsfile(ctx *fiber.Ctx) error {
items, err := nets.GetHostsFile()
if err != nil {
return h.ErrJSON(ctx, http.StatusServiceUnavailable, err.Error())
@@ -22,7 +22,7 @@ type writeHostsfileReq struct {
Items []*nets.HostsItem `json:"items"`
}
func (h *handlers) PostHostsfile(ctx *fiber.Ctx) error {
func (h *Handlers) PostHostsfile(ctx *fiber.Ctx) error {
var req writeHostsfileReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -21,6 +21,7 @@ type NetIf struct {
Strength *int `json:"strength,omitempty"`
MTU int `json:"mtu,omitempty"`
InternetConnected *bool `json:"internetConnected,omitempty"`
Hostname string `json:"hostname,omitempty"` // Hostname of the device
Ipv4Gateway *string `json:"ipv4Gateway,omitempty"`
Ipv6Gateway *string `json:"ipv6Gateway,omitempty"`
@@ -34,7 +35,7 @@ type NetIf struct {
TxRate *float64 `json:"txRate,omitempty"` // in bytes per second
}
func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
func (h *Handlers) GetNetIfs(ctx *fiber.Ctx) error {
test := ctx.Query("testConnectivity", "false")
ifaces, err := nets.GetInternalIpv4Addr(test != "true")
@@ -65,6 +66,7 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
IP: i.IP,
IsHostIp: i.IP == hostip,
MTU: i.Iface.MTU,
Hostname: host,
}
if wifiDevs != nil {
@@ -80,22 +82,32 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
}
}
if test == "true" {
r.InternetConnected = ptr.To(utils.CheckInterfaceIPv4Connectivity(ctx.Context(), i.Iface.Name))
devices, err := utils.GetAllDevice(ctx.Context())
if err != nil {
klog.Error("get all devices error, ", err)
return h.ErrJSON(ctx, http.StatusServiceUnavailable, err.Error())
}
devices, err := utils.GetAllDevice(ctx.Context())
if err != nil {
klog.Error("get all devices error, ", err)
return h.ErrJSON(ctx, http.StatusServiceUnavailable, err.Error())
}
if d, ok := devices[r.Iface]; ok {
r.Ipv4Gateway = &d.Ipv4Gateway
r.Ipv6Gateway = &d.Ipv6Gateway
r.Ipv4DNS = &d.Ipv4DNS
r.Ipv6DNS = &d.Ipv6DNS
r.Ipv6Address = &d.Ipv6Address
r.Ipv4Mask = &d.Ipv4Mask
r.Method = &d.Method
if d, ok := devices[r.Iface]; ok {
r.Ipv4Gateway = &d.Ipv4Gateway
r.Ipv6Gateway = &d.Ipv6Gateway
r.Ipv4DNS = &d.Ipv4DNS
r.Ipv6DNS = &d.Ipv6DNS
r.Ipv6Address = &d.Ipv6Address
r.Ipv4Mask = &d.Ipv4Mask
r.Method = &d.Method
}
if rx, tx, err := utils.GetInterfaceTraffic(r.Iface); err == nil {
r.RxRate = ptr.To(rx)
r.TxRate = ptr.To(tx)
} else {
klog.Error("get interface rx/tx rate error, ", err)
}
if test == "true" {
if r.IP != "" {
r.InternetConnected = ptr.To(utils.CheckInterfaceIPv4Connectivity(ctx.Context(), i.Iface.Name))
}
if r.Ipv6Address != nil && *r.Ipv6Address != "" {
@@ -104,12 +116,6 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
r.Ipv6Connectivity = &connected
}
if rx, tx, err := utils.GetInterfaceTraffic(r.Iface); err == nil {
r.RxRate = ptr.To(rx)
r.TxRate = ptr.To(tx)
} else {
klog.Error("get interface rx/tx rate error, ", err)
}
}
res = append(res, r)
@@ -133,8 +139,8 @@ func (h *handlers) GetNetIfs(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "", res)
}
func (h *handlers) findAp(ssid string) *ble.AccessPoint {
for _, ap := range h.apList {
func (h *Handlers) findAp(ssid string) *ble.AccessPoint {
for _, ap := range h.ApList {
if ap.SSID == ssid {
return &ap
}

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -15,7 +15,7 @@ type MountReq struct {
Password string `json:"password"`
}
func (h *handlers) PostMountSambaDriver(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostMountSambaDriver(ctx *fiber.Ctx, cmd commands.Interface) error {
var req MountReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -17,7 +17,7 @@ type ListSmbResponse struct {
Mounted bool `json:"mounted"`
}
func (h *handlers) PostMountSambaDriverV2(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostMountSambaDriverV2(ctx *fiber.Ctx, cmd commands.Interface) error {
var req MountReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -9,7 +9,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) getMountedHdd(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
func (h *Handlers) getMountedHdd(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
paths, err := utils.MountedHddPath(ctx.Context())
if err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -35,11 +35,11 @@ func (h *handlers) getMountedHdd(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *d
return h.OkJSON(ctx, "success", res)
}
func (h *handlers) GetMountedHdd(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedHdd(ctx *fiber.Ctx) error {
return h.getMountedHdd(ctx, nil)
}
func (h *handlers) GetMountedHddInCluster(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedHddInCluster(ctx *fiber.Ctx) error {
return h.getMountedHdd(ctx, func(us *disk.UsageStat) *disk.UsageStat {
us.Path = nodePathToClusterPath(us.Path)
return us

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -20,7 +20,7 @@ type mountedPath struct {
ReadOnly bool `json:"read_only"`
}
func (h *handlers) getMountedPath(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
func (h *Handlers) getMountedPath(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
paths, err := utils.MountedPath(ctx.Context())
if err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -58,11 +58,11 @@ func (h *handlers) getMountedPath(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *
return h.OkJSON(ctx, "success", res)
}
func (h *handlers) GetMountedPath(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedPath(ctx *fiber.Ctx) error {
return h.getMountedPath(ctx, nil)
}
func (h *handlers) GetMountedPathInCluster(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedPathInCluster(ctx *fiber.Ctx) error {
return h.getMountedPath(ctx, func(us *disk.UsageStat) *disk.UsageStat {
us.Path = nodePathToClusterPath(us.Path)
return us

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -15,7 +15,7 @@ type mountedSmbPathResponse struct {
Device string `json:"device"`
}
func (h *handlers) getMountedSmb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
func (h *Handlers) getMountedSmb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
paths, err := utils.MountedSambaPath(ctx.Context())
if err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -41,11 +41,11 @@ func (h *handlers) getMountedSmb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *d
return h.OkJSON(ctx, "success", res)
}
func (h *handlers) GetMountedSmb(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedSmb(ctx *fiber.Ctx) error {
return h.getMountedSmb(ctx, nil)
}
func (h *handlers) GetMountedSmbInCluster(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedSmbInCluster(ctx *fiber.Ctx) error {
return h.getMountedSmb(ctx, func(us *disk.UsageStat) *disk.UsageStat {
us.Path = nodePathToClusterPath(us.Path)
return us

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -9,7 +9,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) getMountedUsb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
func (h *Handlers) getMountedUsb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *disk.UsageStat) error {
paths, err := utils.MountedUsbPath(ctx.Context())
if err != nil {
return h.ErrJSON(ctx, http.StatusInternalServerError, err.Error())
@@ -33,11 +33,11 @@ func (h *handlers) getMountedUsb(ctx *fiber.Ctx, mutate func(*disk.UsageStat) *d
return h.OkJSON(ctx, "success", res)
}
func (h *handlers) GetMountedUsb(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedUsb(ctx *fiber.Ctx) error {
return h.getMountedUsb(ctx, nil)
}
func (h *handlers) GetMountedUsbInCluster(ctx *fiber.Ctx) error {
func (h *Handlers) GetMountedUsbInCluster(ctx *fiber.Ctx) error {
return h.getMountedUsb(ctx, func(us *disk.UsageStat) *disk.UsageStat {
us.Path = nodePathToClusterPath(us.Path)
return us

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"fmt"
@@ -8,12 +8,14 @@ import (
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/beclab/Olares/daemon/pkg/commands/upgrade"
"github.com/gofiber/fiber/v2"
"k8s.io/klog/v2"
)
type UpgradeReq struct {
Version string `json:"version"`
Version string `json:"version"`
DownloadOnly bool `json:"downloadOnly,omitempty"` // false means download-and-upgrade
}
func (r *UpgradeReq) Check() error {
@@ -33,7 +35,7 @@ func (r *UpgradeReq) Check() error {
return nil
}
func (h *handlers) RequestOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) RequestOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface) error {
var req UpgradeReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)
@@ -43,17 +45,25 @@ func (h *handlers) RequestOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface)
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
if _, err := cmd.Execute(ctx.Context(), req.Version); err != nil {
upgradeReq := upgrade.UpgradeRequest{
Version: req.Version,
DownloadOnly: req.DownloadOnly,
}
if _, err := cmd.Execute(ctx.Context(), upgradeReq); err != nil {
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
if req.DownloadOnly {
return h.OkJSON(ctx, "successfully created download target")
}
return h.OkJSON(ctx, "successfully created upgrade target")
}
func (h *handlers) CancelOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) CancelOlaresUpgrade(ctx *fiber.Ctx, cmd commands.Interface) error {
if _, err := cmd.Execute(ctx.Context(), nil); err != nil {
return h.ErrJSON(ctx, http.StatusBadRequest, err.Error())
}
return h.OkJSON(ctx, "successfully removed upgrade target")
return h.OkJSON(ctx, "successfully cancelled upgrade/download")
}

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -8,7 +8,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) PostReboot(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostReboot(ctx *fiber.Ctx, cmd commands.Interface) error {
_, err := cmd.Execute(ctx.Context(), nil)
if err != nil {
klog.Error("execute command error, ", err, ", ", cmd.OperationName().Stirng())

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -8,7 +8,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) PostShutdown(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostShutdown(ctx *fiber.Ctx, cmd commands.Interface) error {
_, err := cmd.Execute(ctx.Context(), nil)
if err != nil {
klog.Error("execute command error, ", err, ", ", cmd.OperationName().Stirng())

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -16,7 +16,7 @@ type TerminusInitReq struct {
Domain string `json:"domain"`
}
func (h *handlers) PostTerminusInit(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostTerminusInit(ctx *fiber.Ctx, cmd commands.Interface) error {
var req TerminusInitReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,10 +1,10 @@
package apiserver
package handlers
import (
"github.com/beclab/Olares/daemon/pkg/cluster/state"
"github.com/gofiber/fiber/v2"
)
func (h *handlers) GetTerminusState(ctx *fiber.Ctx) error {
func (h *Handlers) GetTerminusState(ctx *fiber.Ctx) error {
return h.OkJSON(ctx, "success", state.CurrentState)
}

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -8,7 +8,7 @@ import (
"k8s.io/klog/v2"
)
func (h *handlers) PostTerminusUninstall(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostTerminusUninstall(ctx *fiber.Ctx, cmd commands.Interface) error {
// run in background
_, err := cmd.Execute(h.mainCtx, nil)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -13,7 +13,7 @@ type UmountSmbReq struct {
Path string ``
}
func (h *handlers) umountSmbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathInNode string) error {
func (h *Handlers) umountSmbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathInNode string) error {
_, err := cmd.Execute(ctx.Context(), &umountsmb.Param{
MountPath: pathInNode,
})
@@ -25,7 +25,7 @@ func (h *handlers) umountSmbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathI
return h.OkJSON(ctx, "success to umount")
}
func (h *handlers) PostUmountSmb(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostUmountSmb(ctx *fiber.Ctx, cmd commands.Interface) error {
var req UmountSmbReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)
@@ -38,7 +38,7 @@ func (h *handlers) PostUmountSmb(ctx *fiber.Ctx, cmd commands.Interface) error {
return h.umountSmbInNode(ctx, cmd, req.Path)
}
func (h *handlers) PostUmountSmbInCluster(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostUmountSmbInCluster(ctx *fiber.Ctx, cmd commands.Interface) error {
var req UmountSmbReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -13,7 +13,7 @@ type UmountReq struct {
Path string ``
}
func (h *handlers) umountUsbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathInNode string) error {
func (h *Handlers) umountUsbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathInNode string) error {
_, err := cmd.Execute(ctx.Context(), &umountusb.Param{
Path: pathInNode,
})
@@ -25,7 +25,7 @@ func (h *handlers) umountUsbInNode(ctx *fiber.Ctx, cmd commands.Interface, pathI
return h.OkJSON(ctx, "success to umount")
}
func (h *handlers) PostUmountUsb(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostUmountUsb(ctx *fiber.Ctx, cmd commands.Interface) error {
var req UmountReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)
@@ -38,7 +38,7 @@ func (h *handlers) PostUmountUsb(ctx *fiber.Ctx, cmd commands.Interface) error {
return h.umountUsbInNode(ctx, cmd, req.Path)
}
func (h *handlers) PostUmountUsbInCluster(ctx *fiber.Ctx, cmd commands.Interface) error {
func (h *Handlers) PostUmountUsbInCluster(ctx *fiber.Ctx, cmd commands.Interface) error {
var req UmountReq
if err := h.ParseBody(ctx, &req); err != nil {
klog.Error("parse request error, ", err)

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"context"
@@ -10,12 +10,19 @@ import (
"github.com/gofiber/fiber/v2"
)
type handlers struct {
type Handlers struct {
mainCtx context.Context
apList []ble.AccessPoint
ApList []ble.AccessPoint
}
func (h *handlers) ParseBody(ctx *fiber.Ctx, value any) error {
var handlers *Handlers = &Handlers{}
func NewHandlers(ctx context.Context) *Handlers {
handlers.mainCtx = ctx
return handlers
}
func (h *Handlers) ParseBody(ctx *fiber.Ctx, value any) error {
err := ctx.BodyParser(value)
if err != nil {
@@ -35,7 +42,7 @@ func (h *handlers) ParseBody(ctx *fiber.Ctx, value any) error {
return nil
}
func (h *handlers) ErrJSON(ctx *fiber.Ctx, code int, message string, data ...interface{}) error {
func (h *Handlers) ErrJSON(ctx *fiber.Ctx, code int, message string, data ...interface{}) error {
switch len(data) {
case 0:
return ctx.Status(code).JSON(fiber.Map{
@@ -58,10 +65,10 @@ func (h *handlers) ErrJSON(ctx *fiber.Ctx, code int, message string, data ...int
}
func (h *handlers) OkJSON(ctx *fiber.Ctx, message string, data ...interface{}) error {
func (h *Handlers) OkJSON(ctx *fiber.Ctx, message string, data ...interface{}) error {
return h.ErrJSON(ctx, http.StatusOK, message, data...)
}
func (h *handlers) NeedChoiceJSON(ctx *fiber.Ctx, message string, data ...interface{}) error {
func (h *Handlers) NeedChoiceJSON(ctx *fiber.Ctx, message string, data ...interface{}) error {
return h.ErrJSON(ctx, http.StatusMultipleChoices, message, data...)
}

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"path/filepath"

View File

@@ -1,4 +1,4 @@
package apiserver
package handlers
import (
"net/http"
@@ -13,7 +13,7 @@ const (
SIGNATURE_HEADER = "X-Signature"
)
func (h *handlers) WaitServerRunning(next func(ctx *fiber.Ctx) error) func(ctx *fiber.Ctx) error {
func (h *Handlers) WaitServerRunning(next func(ctx *fiber.Ctx) error) func(ctx *fiber.Ctx) error {
return func(ctx *fiber.Ctx) error {
if state.CurrentState.TerminusdState != state.Running {
return h.ErrJSON(ctx, http.StatusForbidden, "server is not running, please wait and retry again later")
@@ -23,7 +23,7 @@ func (h *handlers) WaitServerRunning(next func(ctx *fiber.Ctx) error) func(ctx *
}
}
func (h *handlers) RequireSignature(next func(ctx *fiber.Ctx) error) func(ctx *fiber.Ctx) error {
func (h *Handlers) RequireSignature(next func(ctx *fiber.Ctx) error) func(ctx *fiber.Ctx) error {
return func(ctx *fiber.Ctx) error {
headers := ctx.GetReqHeaders()
signature, ok := headers[SIGNATURE_HEADER]
@@ -42,7 +42,7 @@ func (h *handlers) RequireSignature(next func(ctx *fiber.Ctx) error) func(ctx *f
}
}
func (h *handlers) RunCommand(next func(ctx *fiber.Ctx, cmd commands.Interface) error,
func (h *Handlers) RunCommand(next func(ctx *fiber.Ctx, cmd commands.Interface) error,
cmdNew func() commands.Interface) func(ctx *fiber.Ctx) error {
return func(ctx *fiber.Ctx) error {

View File

@@ -0,0 +1,25 @@
package handlers
import (
"github.com/beclab/Olares/daemon/internel/apiserver/server"
"k8s.io/klog/v2"
)
func init() {
s := server.API
system := s.App.Group("system")
system.Get("/status", handlers.RequireSignature(handlers.GetTerminusState))
system.Get("/ifs", handlers.RequireSignature(handlers.GetNetIfs))
system.Get("/hosts-file", handlers.RequireSignature(handlers.GetHostsfile))
system.Post("/hosts-file", handlers.RequireSignature(handlers.PostHostsfile))
system.Get("/mounted-usb", handlers.RequireSignature(handlers.GetMountedUsb))
system.Get("/mounted-hdd", handlers.RequireSignature(handlers.GetMountedHdd))
system.Get("/mounted-smb", handlers.RequireSignature(handlers.GetMountedSmb))
system.Get("/mounted-path", handlers.RequireSignature(handlers.GetMountedPath))
system.Get("/mounted-usb-incluster", handlers.RequireSignature(handlers.GetMountedUsbInCluster))
system.Get("/mounted-hdd-incluster", handlers.RequireSignature(handlers.GetMountedHddInCluster))
system.Get("/mounted-smb-incluster", handlers.RequireSignature(handlers.GetMountedSmbInCluster))
system.Get("/mounted-path-incluster", handlers.RequireSignature(handlers.GetMountedPathInCluster))
klog.Info("system handlers initialized")
}

View File

@@ -2,146 +2,26 @@ package apiserver
import (
"context"
"fmt"
"github.com/beclab/Olares/daemon/internel/apiserver/handlers"
"github.com/beclab/Olares/daemon/internel/apiserver/server"
"github.com/beclab/Olares/daemon/internel/ble"
changehost "github.com/beclab/Olares/daemon/pkg/commands/change_host"
collectlogs "github.com/beclab/Olares/daemon/pkg/commands/collect_logs"
connectwifi "github.com/beclab/Olares/daemon/pkg/commands/connect_wifi"
"github.com/beclab/Olares/daemon/pkg/commands/install"
mountsmb "github.com/beclab/Olares/daemon/pkg/commands/mount_smb"
"github.com/beclab/Olares/daemon/pkg/commands/reboot"
"github.com/beclab/Olares/daemon/pkg/commands/shutdown"
umountsmb "github.com/beclab/Olares/daemon/pkg/commands/umount_smb"
umountusb "github.com/beclab/Olares/daemon/pkg/commands/umount_usb"
"github.com/beclab/Olares/daemon/pkg/commands/uninstall"
"github.com/beclab/Olares/daemon/pkg/commands/upgrade"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/logger"
"k8s.io/klog/v2"
)
type server struct {
handlers *handlers
port int
app *fiber.App
}
func NewServer(ctx context.Context, port int) *server.Server {
server.API.Port = port
h := handlers.NewHandlers(ctx)
func NewServer(ctx context.Context, port int) (*server, error) {
return &server{handlers: &handlers{mainCtx: ctx}, port: port}, nil
}
func (s *server) Start() error {
app := fiber.New()
s.app = app
app.Use(cors.New())
app.Use(logger.New())
cmd := app.Group("command")
cmd.Post("/install", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostTerminusInit, install.New))))
cmd.Post("/uninstall", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostTerminusUninstall, uninstall.New))))
cmd.Post("/upgrade", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.RequestOlaresUpgrade, upgrade.NewCreateTarget))))
cmd.Delete("/upgrade", s.handlers.RequireSignature(
s.handlers.RunCommand(s.handlers.CancelOlaresUpgrade, upgrade.NewRemoveTarget)))
cmd.Post("/reboot", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostReboot, reboot.New))))
cmd.Post("/shutdown", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostShutdown, shutdown.New))))
cmd.Post("/connect-wifi", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostConnectWifi, connectwifi.New))))
cmd.Post("/change-host", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostChangeHost, changehost.New))))
cmd.Post("/umount-usb", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostUmountUsb, umountusb.New))))
cmd.Post("/umount-usb-incluster", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostUmountUsbInCluster, umountusb.New))))
cmd.Post("/collect-logs", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostCollectLogs, collectlogs.New))))
cmd.Post("/mount-samba", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostMountSambaDriver, mountsmb.New))))
cmd.Post("/umount-samba", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostUmountSmb, umountsmb.New))))
cmd.Post("/umount-samba-incluster", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostUmountSmbInCluster, umountsmb.New))))
cmdv2 := cmd.Group("v2")
cmdv2.Post("/mount-samba", s.handlers.RequireSignature(
s.handlers.WaitServerRunning(
s.handlers.RunCommand(s.handlers.PostMountSambaDriverV2, mountsmb.New))))
system := app.Group("system")
system.Get("/status", s.handlers.RequireSignature(s.handlers.GetTerminusState))
system.Get("/ifs", s.handlers.RequireSignature(s.handlers.GetNetIfs))
system.Get("/hosts-file", s.handlers.RequireSignature(s.handlers.GetHostsfile))
system.Post("/hosts-file", s.handlers.RequireSignature(s.handlers.PostHostsfile))
system.Get("/mounted-usb", s.handlers.RequireSignature(s.handlers.GetMountedUsb))
system.Get("/mounted-hdd", s.handlers.RequireSignature(s.handlers.GetMountedHdd))
system.Get("/mounted-smb", s.handlers.RequireSignature(s.handlers.GetMountedSmb))
system.Get("/mounted-path", s.handlers.RequireSignature(s.handlers.GetMountedPath))
system.Get("/mounted-usb-incluster", s.handlers.RequireSignature(s.handlers.GetMountedUsbInCluster))
system.Get("/mounted-hdd-incluster", s.handlers.RequireSignature(s.handlers.GetMountedHddInCluster))
system.Get("/mounted-smb-incluster", s.handlers.RequireSignature(s.handlers.GetMountedSmbInCluster))
system.Get("/mounted-path-incluster", s.handlers.RequireSignature(s.handlers.GetMountedPathInCluster))
containerd := app.Group("containerd")
containerd.Get("/registries", s.handlers.RequireSignature(s.handlers.ListRegistries))
registry := containerd.Group("registry")
mirrors := registry.Group("mirrors")
mirrors.Get("/", s.handlers.RequireSignature(s.handlers.GetRegistryMirrors))
mirrors.Get("/:registry", s.handlers.RequireSignature(s.handlers.GetRegistryMirror))
mirrors.Put("/:registry", s.handlers.RequireSignature(s.handlers.UpdateRegistryMirror))
mirrors.Delete("/:registry", s.handlers.RequireSignature(s.handlers.DeleteRegistryMirror))
image := containerd.Group("images")
image.Get("/", s.handlers.RequireSignature(s.handlers.ListImages))
image.Delete("/:image", s.handlers.RequireSignature(s.handlers.DeleteImage))
image.Post("/prune", s.handlers.RequireSignature(s.handlers.PruneImages))
return app.Listen(fmt.Sprintf(":%d", s.port))
}
func (s *server) Shutdown() error {
klog.Info("shutdown api server")
if s.app == nil {
return nil
server.API.UpdateAps = func(aplist []ble.AccessPoint) {
h.ApList = aplist
}
return s.app.Shutdown()
}
func (s *server) UpdateAps(aplist []ble.AccessPoint) {
s.handlers.apList = aplist
s := server.API
s.App.Use(cors.New())
s.App.Use(logger.New())
return s
}

View File

@@ -0,0 +1,31 @@
package server
import (
"fmt"
"github.com/beclab/Olares/daemon/internel/ble"
"github.com/gofiber/fiber/v2"
"k8s.io/klog/v2"
)
type Server struct {
Port int
App *fiber.App
UpdateAps func(aplist []ble.AccessPoint)
}
var API *Server = &Server{
App: fiber.New(),
}
func (s *Server) Start() error {
return s.App.Listen(fmt.Sprintf(":%d", s.Port))
}
func (s *Server) Shutdown() error {
klog.Info("shutdown api server")
if s.App == nil {
return nil
}
return s.App.Shutdown()
}

Some files were not shown because too many files have changed in this diff Show More