Compare commits

...

191 Commits

Author SHA1 Message Date
eball
f692d1ea02 ci: bump version to 1.12.1 2025-07-30 21:01:30 +08:00
eball
fe86ef0190 Update release.yaml 2025-07-30 20:48:40 +08:00
eball
ba13d6092b hami: sync hami-core 2.6.0 (#1646) 2025-07-30 20:05:42 +08:00
dkeven
8180024d6d fix(upgrade): split olares version update and upgrade of settings chart (#1647)
* fix(upgrade): split olares version update and upgrade of settings chart

* feat: upgrade l4-proxy image to v0.3.2

* fix: update appservice tag

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-30 20:04:46 +08:00
dkeven
c05f82c4bb fix(cli): trucate file before untar (#1644) 2025-07-30 13:52:30 +08:00
Calvin W.
604b2191ce docs: optimize doc structure to fix redundant or out-dated topics (#1643)
* docs: update readme structure

* revert format change

* add personal cloud image for jp

* docs: optimize toc to remove redundant content

* Revert wrong pull
2025-07-30 13:44:01 +08:00
hysyeah
518d2a41ea app-service: fix user sync error (#1639)
* app-service: fix user sync error

* fix: update app-service tag

* Update appservice_deploy.yaml

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-30 02:56:23 +08:00
berg
bf292e2f55 feat: update system frontend and user service version (#1642) 2025-07-30 01:15:17 +08:00
eball
896c6d76f9 cli: fix the installation in oic on Windows (#1636) 2025-07-30 01:14:00 +08:00
Calvin W.
ee4655d991 docs: update screenshots for GPU management (#1641) 2025-07-29 21:33:24 +08:00
Calvin W.
261bf0f9e0 docs: update the new upgrade process using LarePass (#1640)
* docs: update the new upgrade process using LarePass

* optimize images
2025-07-29 21:33:19 +08:00
dkeven
48776c32bf fix(upgrade): switch upgrader for 0728 to 0729 (#1638) 2025-07-29 13:51:16 +08:00
dkeven
9bbf270eb3 fix(gpu): revert buggy code change from upstream (#1637) 2025-07-29 13:50:50 +08:00
eball
bab83ed0b3 Update release-daily.yaml 2025-07-29 10:03:37 +08:00
wiy
62faa3e3b7 system-frontend,user-service: remove studio from system-frontend and fix some bugs (#1635)
* system-frontend,user-service: remove studio from system-frontend and fix some bugs

* fix: remove studio server

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-29 00:26:54 +08:00
hysyeah
e9346f04c1 sys-event: fix publish user resource usage error (#1634) 2025-07-29 00:25:57 +08:00
dkeven
1df5121a4e feat(upgrade): add upgrade tasks for K3s and GPU plugin (#1633)
* feat: add task to upgrade k3s

* feat: add task to upgrade GPU plugin
2025-07-29 00:25:30 +08:00
dkeven
2ecfc976d7 fix(gpu): update hami version to avoid nil pointer panic (#1632) 2025-07-29 00:24:54 +08:00
Sai
c38e00a825 fix: market service bug in 0.4.13 (#1631)
fix bugs
- after account create, chart repo miss notify
- i18n data in app data
- rebuild app info when chart repo restart
- entrances, options data
2025-07-29 00:24:21 +08:00
Calvin W.
8c801b8392 docs: update user management docs (#1630) 2025-07-29 00:23:44 +08:00
dkeven
eaceeef30b feat: upgrade Kubernetes to v1.33.3 (#1629) 2025-07-29 00:23:11 +08:00
dkeven
339b375a89 fix(upgrade): wait for user to finish activation before upgrading (#1628) 2025-07-29 00:22:33 +08:00
salt
b5b9d19bc3 feat: add producer, consumer watch (#1627)
Co-authored-by: ubuntu <you@example.com>
2025-07-27 01:53:13 +08:00
wiy
0bcb2cd893 system-frontend: fix 1.12 files some bugs (#1626)
* feat(system-frontend): release new version to fix some bugs

* fix(system-frontend): fix some system-frontend bugs
2025-07-27 01:52:44 +08:00
aby913
2b46e87baa fix(backup): nats service check (#1625) 2025-07-26 00:28:43 +08:00
hysyeah
d0351aed9c node_exporter, app-service: exclude virtual disk like qemu;use new chart repo;set values fs_type (#1624)
node_exporter: exclude virtual disk like qemu;use new chart repo;set values fs_type
2025-07-26 00:28:08 +08:00
dkeven
021338b4b7 feat(daemon): make upgrade download progress more smooth (#1623) 2025-07-25 20:49:45 +08:00
eball
d374133dd4 lldap, authelia: sign a 2fa token from lldap after larepass signed (#1622) 2025-07-25 20:48:49 +08:00
dkeven
c330589424 feat(cli): inject rootfs type to global envs (#1620) 2025-07-25 20:47:07 +08:00
dkeven
a690b5a852 fix(ci): specify arch variant in goreleaser override config (#1621) 2025-07-25 17:50:32 +08:00
dkeven
abc6bc01a6 feat(gpu): force out app exclusively bound to GPU (#1619) 2025-07-25 17:49:23 +08:00
eball
a0513a8e6f fix: self-signed certificate with a long expiration (#1614)
* fix: self-signed certificate with a long expiration

* Update auth_backend_deploy.yaml
2025-07-25 00:36:11 +08:00
wiy
b61a3233bb notification-api,user-service,system-frontend: fix notification error & fix system-frontend bugs (#1617) 2025-07-25 00:16:41 +08:00
hysyeah
5b3072dc6f app-service: fix somebug and optimize image download speed (#1616)
app-service: optimize image download speed
2025-07-25 00:16:04 +08:00
dkeven
2cc580a453 feat(daemon): add API to confirm upgrade after download (#1615) 2025-07-25 00:05:18 +08:00
Sai
3e63c3f34c feat:add dynamic chart repository (#1613)
* add dynamic chart repository

* add image info to api install

* add entrance info in app data
2025-07-25 00:04:35 +08:00
dkeven
d80fbfb5e5 fix(cli): make the timer for renewing K8s certs persistent (#1612) 2025-07-25 00:03:29 +08:00
dkeven
cea8f8bd1b refactor(ci): merge archs in olaresd's goreleaser config (#1611) 2025-07-24 18:01:52 +08:00
dkeven
7cce5ec761 fix(daemon): optimize disk space and node health check before upgrade (#1610) 2025-07-24 15:55:43 +08:00
dkeven
b705bb0814 feat(cli): download wizard from different vendor path (#1609) 2025-07-24 15:55:21 +08:00
eball
7fcfb2139b Update deps-manifest.sh 2025-07-24 12:11:04 +08:00
eball
f267639a82 Update build.sh 2025-07-24 11:59:25 +08:00
eball
42a10225cc Update build.sh 2025-07-24 11:42:29 +08:00
eball
39e3d453e2 bfl, authelia: fix internal mode and get real ip (#1608) 2025-07-24 00:53:50 +08:00
hysyeah
96334c89af app-service,user-service: fix wehbook,uninstall bug; user-service nats auth error (#1607) 2025-07-24 00:52:51 +08:00
0x7fffff92
eb774e6e06 fix: make the affinity rule strict for tailscale (#1606)
fix: requires tailscale and headscale to run on the same node

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-07-24 00:52:11 +08:00
aby913
8be967ebf3 fix(backup): wise svc upgrade (#1605) 2025-07-24 00:51:33 +08:00
yyh
8f2a98745a fix(monitoring-server): update monitoring server role management (#1604) 2025-07-24 00:51:08 +08:00
dkeven
e7303b0554 feat: upgrade containerd to v2.1.3 (#1603) 2025-07-24 00:50:35 +08:00
Peng Peng
9aee9453fc notification,user-service,wizard: update notification (#1602)
* feat(notification): update notification to support sign second verification on LarePass

* system-frontend,wizard,user-service: fix some bugs and user-service update notification

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-07-24 00:47:39 +08:00
Calvin W.
c480beb4de docs: update screenshots for install wizards and login (#1600)
* docs: update screenshots for install wizards and login

* optimize images
2025-07-23 19:31:51 +08:00
eball
8998dd48cf tapr: add the svc of postgres for the shared system app (#1599)
tapr: add the svc of the postgres for the shared system app
2025-07-23 19:31:04 +08:00
hysyeah
ed3713bd37 app-service: support v2 install;operate app by crd;user event publish to nats (#1597) 2025-07-23 00:57:30 +08:00
wiy
47bd343c6b vault-server,vault-admin,system-frontend: update system-frontend some bugs (#1596) 2025-07-23 00:56:57 +08:00
aby913
931f2992f4 fix(backup): add check disk space (#1595) 2025-07-23 00:56:27 +08:00
aby913
b3d8a2e718 fix(files): update global external data on mount (#1594) 2025-07-23 00:56:02 +08:00
eball
0e2a5d7c0e bfl: add files api routing to settings (#1593) 2025-07-23 00:55:29 +08:00
dkeven
3035453f8c feat: add vendor type to olares-cli/olaresd (#1592) 2025-07-23 00:55:01 +08:00
eball
267d92607c hami: fix null poimter in hami-core 2025-07-22 23:05:31 +08:00
dkeven
dce43cd081 fix(daemon): do not set download state again after completion (#1591) 2025-07-22 21:33:34 +08:00
salt
3826c64e48 fix: fix inotify race (#1590)
Co-authored-by: ubuntu <you@example.com>
2025-07-22 21:32:55 +08:00
dkeven
e398150e01 fix(daemon): do not clear upgrade state file when upgrading is in progress (#1589) 2025-07-22 21:32:28 +08:00
eball
694c472aad app-service: add workflow label to the namespace of workeflow (#1588) 2025-07-22 01:18:18 +08:00
eball
62db7fe18a bfl: remove token auth from ingress (#1587) 2025-07-22 00:24:07 +08:00
wiy
7e1674aa77 system-frontend&files-server: release new version to fix some bugs (#1586)
* feat(system-frontend): update system-frontend new version

* fix(files): add master node info

* feat(search): update search3 version to v0.0.59

---------

Co-authored-by: aby913 <aby913@163.com>
2025-07-22 00:23:30 +08:00
hysyeah
72d804b0c9 authelia, lldap: fix login failed after reset password within one second (#1585) 2025-07-22 00:23:02 +08:00
dkeven
a91b20b7a0 feat(daemon): optimize and add new params to the upgrade feature (#1584) 2025-07-22 00:22:38 +08:00
Peng Peng
fa92825ce9 feat: add intent sub pub on market and user-service (#1583)
* feat: add intent sub pub on market and user-service

* Update system-frontend.yaml
2025-07-20 22:03:07 +08:00
dkeven
0e04f416d7 Revert "feat(ci): retrieve and save uncompressed size of layers in image manifest (#1570) (#1582)
Revert "feat(ci): retrieve and save uncompressed size of layers in image manifest (#1570)"

This reverts commit d4a1a44e39.
2025-07-20 22:02:07 +08:00
Peng Peng
e43055b0f3 feat(docs): Remove knowledge, rsshub and argowoflow infomation from Readme (#1581)
Update README.md
2025-07-20 22:01:34 +08:00
Peng Peng
f918614bd2 feat: add notification support (#1579)
* feat: add notification support

* Update system-frontend.yaml

* Update check.yaml

* Update check.yaml

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-20 00:21:26 +08:00
eball
12f19b7d46 app-service: add runAsInternal option and multi-admin supported in v1 package (#1576)
app-service: add runAsInternal option and v1 package multi-admin supported
2025-07-19 12:14:17 +08:00
berg
5c8f3ea2ff system-frontend: move socket to sharedworker and fix some ui bugs (#1578)
* feat: update system-frontend version

* feat: update version
2025-07-19 09:27:24 +08:00
aby913
9b7635f244 fix(backup): replace node name for External path (#1577) 2025-07-19 09:26:52 +08:00
dkeven
a949e317ac perf(ci): use skopeo to upload image archive to cdn (#1572) 2025-07-19 09:26:24 +08:00
0x7fffff92
f362396514 fix: nftables only for owner (#1571)
* fix: nftables only for owner

* typo

---------

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-07-19 09:25:55 +08:00
dkeven
d4a1a44e39 feat(ci): retrieve and save uncompressed size of layers in image manifest (#1570) 2025-07-19 09:23:38 +08:00
Peng Peng
95fdffb24f feat(docs): update config to support search (#1573) 2025-07-18 21:52:25 +08:00
wiy
4c72114a4d dashboard&settings&desktop&backup&user-service: fix some bugs and add backup nats message (#1569)
* fix(backup): use nats to push messages

* feat: update user-service version and support backup nas

* dashboard&desktop&settings: fix some bugs

---------

Co-authored-by: aby913 <aby913@163.com>
Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
2025-07-18 00:42:04 +08:00
dkeven
e28371551b feat(gpu): update metrics API for HAMi-WebUI and library for HAMi (#1568) 2025-07-18 00:41:21 +08:00
hysyeah
ef01c331e9 app-service, lldap: fix admin reset password;refresh token expiry (#1567) 2025-07-18 00:40:33 +08:00
dkeven
40b29d12d6 feat(ci): record cdn object file size in manifest (#1566) 2025-07-18 00:39:51 +08:00
dkeven
506bd3bc1d fix(daemon): filter out invalid IPs when getting host IP (#1563)
* fix(daemon): filter out invalid IPs when getting host IP

* fix: node ip check bug

* fix: remove log

* fix: check master node

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-18 00:37:38 +08:00
dkeven
9d097f77b1 feat(cli): adjust release line constraint and output order of upgrade path command (#1561)
* fix(cli): consider alpha & beta as release version on main line

* fix(cli): dont allow upgrade commands with incompatible base & cli release line

* feat(cli): support order sort option for upgrade path command
2025-07-18 00:36:57 +08:00
Calvin W.
a71b536a80 Docs: revert search provider to agolia (#1564)
* docs: revert search mode to agolia and fix formats

* update repo link

* update style reference page link
2025-07-17 15:22:30 +08:00
Calvin W.
8eb2d86f56 docs: fix use case link on main (#1565) 2025-07-17 15:22:06 +08:00
Calvin W.
ea0404fe2b docs: add readme for docs repo (#1562)
* docs: add readme for docs repo

* fix sentence style heading

* change search provider to local
2025-07-17 14:37:07 +08:00
Peng Peng
af8e3b172c feat: add docs to main (#1559) 2025-07-17 11:58:59 +08:00
wiy
e00018de59 system-frontend&market: release new version (#1558)
* feat: update market backend version

* feat(system-frontend): update system-frontend new version

---------

Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
2025-07-17 01:06:27 +08:00
hysyeah
6bba107fdd app-service: correct entrance status;workflow install (#1557) 2025-07-16 23:45:16 +08:00
dkeven
ffb96bcbfc fix(cli): check emptiness of user and domain before write release file (#1556) 2025-07-16 23:44:34 +08:00
salt
a6e4a73af2 fix: get ctime error (#1555)
Co-authored-by: ubuntu <you@example.com>
2025-07-16 23:43:48 +08:00
dkeven
61d3dedbfd fix(cli): bump helm version to v3.18.4 (#1554) 2025-07-16 19:04:30 +08:00
hysyeah
962e251691 bfl, studio, tapr, authelia: fit new user owner role (#1551)
* bfl, studio, tapr: fit new user owner role

* fix: tailscale index

* fix: l4 proxy version
2025-07-16 01:10:05 +08:00
eball
b37adf2521 daemon: change api cors module and signature validating (#1552) 2025-07-16 00:09:07 +08:00
salt
460603ae69 fix: invalid path clean watch error (#1550)
Update search3_server_deploy.yaml
2025-07-16 00:08:33 +08:00
lovehunter9
1197860c29 fix: files sync paste dir out bug (#1549) 2025-07-16 00:07:39 +08:00
dkeven
417c4b520b fix(cli): make sure hostname resolvable before running olaresd (#1548) 2025-07-16 00:06:47 +08:00
eball
e1fa887e6c bfl: change ingress default domain (#1547) 2025-07-16 00:06:10 +08:00
dkeven
b2e84cfd21 cli(refactor): new structure for upgrade (#1546) 2025-07-16 00:05:09 +08:00
wiy
e8f0054b4f fix(user-service): config redis host error (#1545)
* fix(user-service): config redis host error

* feat(user-service): update user password
fix(wizard): Fixed the issue of sub-account activation failure
feat(vault-server): update get olares name timeout to 60s
2025-07-16 00:04:22 +08:00
eball
cd6c89f724 Update checkjws.go 2025-07-15 22:21:12 +08:00
eball
3d3d85ca3e Update checkjws.go 2025-07-15 22:05:44 +08:00
wiy
058cf31e44 system-frontend&user-service: update user-service & system-frontend new version (#1544)
* feat(user-service): update dataStore use redis

* feat(wise): remove from system-frontend
fix(settings): some bugs
fix(files): some bugs

* knowledge: remove knowledge, rss, argo

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-15 00:39:01 +08:00
hysyeah
72a5b2c6a2 app-service, bfl, cli, authelia,kubesphere: support create user from user cr (#1543)
* app-service, bfl, cli, authelia,kubesphere: support create user by cr

* fix: rm kubesphere-monitoring-federated ns
2025-07-14 23:48:53 +08:00
eball
f78890b01b otel: disable telemetry by default (#1542) 2025-07-14 23:48:18 +08:00
eball
13df294653 olaresd: refactor api server (#1541) 2025-07-14 23:47:55 +08:00
0x7fffff92
2af86e161a fix(headscale): Make the Affinity Rule Strict (#1540)
* fix(headscale): Make the Affinity Rule Strict

* fix(headscale): make ci happy

---------

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-07-14 23:47:25 +08:00
aby913
ee567c270c fix(files): external delete (#1539)
* fix(files): external delete

* login & system-frontend: update login and system-frontend new version

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-07-12 00:23:59 +08:00
hysyeah
4246bcce06 fix: simplify nat permission request (#1538) 2025-07-12 00:23:10 +08:00
eball
fb73d62bd5 bfl: change unmount-api of file-server (#1537) 2025-07-12 00:22:27 +08:00
eball
209f0d15e3 authelia: send notification in user login phase (#1536)
* authelia: send notification in user login phase

* fix: set cookie nil

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-12 00:21:48 +08:00
dkeven
78911d44cf feat(gpu): add more metrics in GPU monitor API (#1535) 2025-07-12 00:20:41 +08:00
salt
d964c33c2d feat: Chinese uses both single-character segmentation and word segmen… (#1534)
feat: Chinese uses both single-character segmentation and word segmentation. Word segmentation is used for easier sorting.

Co-authored-by: ubuntu <you@example.com>
2025-07-11 22:00:14 +08:00
salt
2b54795e10 fix: waiting... Both uppercase and lowercase letters can be searched, include special token (#1533)
fix: Both uppercase and lowercase letters can be searched, and special characters can be searched as well.'

Co-authored-by: ubuntu <you@example.com>
2025-07-11 13:20:31 +08:00
aby913
efb4be4fcf fix(files): deletion and other fixes (#1532)
* fix(files): deletion and other fixes

* feat(files & marker): update files and market new version

* feat: update market worker count

* Update bfl_deploy.yaml

---------

Co-authored-by: qq815776412 <815776412@qq.com>
Co-authored-by: icebergtsn <zyh2433219116@gmail.com>
Co-authored-by: eball <liuy102@hotmail.com>
2025-07-11 00:35:46 +08:00
simon
89575096ba feat(knowledge): knowledge & download refactor (#1531)
* knowledge

* knowledge
2025-07-10 21:36:30 +08:00
dkeven
5edba60295 fix(cli): remove state files of olaresd when uninstalling (#1530) 2025-07-10 16:12:23 +08:00
eball
1aecc3495a ci: add a parameter of the code repository (#1529)
* ci: add a parameter of the code repository

* fix: file name bug

* refactor(cli): adjust local release command for vendor repo path

---------

Co-authored-by: dkeven <dkvvven@gmail.com>
2025-07-10 16:11:03 +08:00
salt
2d5c1fc484 feat: hybrid unigram search for title (#1528)
Co-authored-by: ubuntu <you@example.com>
2025-07-09 23:20:44 +08:00
hysyeah
81355f4a1c authelia: send login message to os.users.<olaresid> (#1527) 2025-07-09 23:20:13 +08:00
lovehunter9
2c4e9fb835 feat: seafile add support for avi, wmv, mkv, flv, rmvb (#1526) 2025-07-09 23:19:32 +08:00
dkeven
4947538e68 fix(daemon): apply filters correctly when listing users (#1525) 2025-07-09 23:18:39 +08:00
Peng Peng
21bb10b72b Revert "gpu: refactor gpu scheduler with cpp (#1475)"
This reverts commit ae3e4e6bb9.
2025-07-09 13:26:41 +08:00
wiy
8064c591f2 feat(files): files supports multiple nodes (#1524)
* feat(system-frontend): update files supports multiple nodes

* feat: add files routing gateway

* feat(media-server): surpport for multiple nodes

* feat(files): update files supports multiple nodes

---------

Co-authored-by: eball <liuy102@hotmail.com>
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
Co-authored-by: aby913 <aby913@163.com>
2025-07-08 23:11:41 +08:00
Calvin W.
1073575a1d docs: add readmes for Olares components (#1522)
* docs: add readmes for Olares components

* merge with latest upstream
2025-07-08 21:34:05 +08:00
dkeven
4cf977f6df fix(ci): specify repo when checkout code for PR (#1523) 2025-07-08 17:53:46 +08:00
hysyeah
0dda3811c7 bfl, authelia, lldap: change access-token expiry duration, support refresh and revoke user token (#1521)
bfl, authelia, lldap: change access-token expiry duration and support refresh;revoke user token after reset password
2025-07-08 00:03:59 +08:00
hysyeah
2632b45fc2 bfl, app-service, system-frontend/dashboard: remove analytics (#1520)
* bfl, app-service: remove analytics

* fix(system-frontend): remove dashboard analytics

* fix(system-frontend): update system-frontend version

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-07-08 00:03:11 +08:00
berg
ae3f3d6a20 market: v1.12 new category and fix some bugs. (#1518)
feat: v1.12 new category and fix some bugs.
2025-07-05 00:55:37 +08:00
eball
4f3b824f48 authelia: update oidc cert (#1516) 2025-07-05 00:54:44 +08:00
hysyeah
9efa6df969 tapr: add default perm for nats subject (#1515)
fix: add default perm for nats subject
2025-07-05 00:54:01 +08:00
dkeven
045dfc11bc perf(ci): ignore more archs when releasing cli (#1514)
* perf(ci): ignore more archs when releasing cli

* Update auth_backend_deploy.yaml

---------

Co-authored-by: eball <liuy102@hotmail.com>
2025-07-04 18:45:36 +08:00
hysyeah
9913d29f81 studio-server: move studio server to os-framework (#1513) 2025-07-04 00:42:39 +08:00
berg
0ccf091aff market, settings: fix the problem of theme settings & settings apps status & market terminusInfo error (#1512)
feat: update market frontend and backend version
2025-07-04 00:41:54 +08:00
dkeven
01f3b27b8c feat(upgrade): update sysconf for specific versions (#1511) 2025-07-04 00:41:12 +08:00
dkeven
475faafec4 fix(cli): clear upgrade-related state files when uninstalling (#1510) 2025-07-03 21:01:07 +08:00
berg
31ab286a4b market, profile: fix display error in avatar selector's image list and clear market data when terminusId changed (#1509)
feat: update market frontend and backend version
2025-07-03 00:51:40 +08:00
eball
c9b4a40a1c olares: refactor installation manifest (#1508)
* olares: refactor installation manifest

* fix: file name typo

* fix: add http accept header

* fix: bug

* fix: bug

* fix: import json
2025-07-03 00:50:09 +08:00
simon
da19d00d08 fix(download): fix download task operation & reduce youtube API requests (#1507)
download
2025-07-02 21:49:49 +08:00
dkeven
49d233a55b fix(cli): also update local reserved ports when modifying sysconf (#1506) 2025-07-02 21:49:23 +08:00
dkeven
300aaa0753 fix(daemon): handle empty pid files when check process running (#1505) 2025-07-02 21:48:56 +08:00
berg
962b220440 market: add local chart upload socket event & update menu and add search function (#1504)
* fix: omit to gen entrance url before active

* feat: update market frontend and backend version

---------

Co-authored-by: hys <hysyeah@gmail.com>
2025-07-01 23:44:31 +08:00
salt
4da25bca36 fix: when need physical path, miss use frontend_resource_uri (#1500)
* fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

* fix: search3 fix when need physical path miss use frontend_resource_ui

* fix: use wrong image

---------

Co-authored-by: ubuntu <you@example.com>
2025-07-01 23:32:34 +08:00
dkeven
42eff16695 feat(cli): config endpoint_pod_names in coredns when installing (#1503) 2025-07-01 20:35:42 +08:00
dkeven
450aa19dfc fix(cli): also reserve local ports for l4-proxied service (#1502) 2025-07-01 20:35:20 +08:00
eball
c750f6f85b infisical: create user error (#1501) 2025-07-01 20:33:18 +08:00
berg
bf57da0fa4 market: waiting for the app-service to start & displays the failed status of the installation button. (#1499)
feat: update market version
2025-06-30 23:57:57 +08:00
0x7fffff92
5df379f286 feat(headscale): let headscale run on the master node like l4-bfl-proxy (#1498)
Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-30 21:02:26 +08:00
dkeven
cfb54fb974 feat(cli): auto enable GPU when adding new node (#1497) 2025-06-30 21:02:00 +08:00
eball
9515c05bb6 bfl: do not change owner when restart (#1496) 2025-06-30 21:01:25 +08:00
dkeven
bdcd924e50 chore(cli): remove unused DeleteCache arg and module (#1495) 2025-06-30 21:01:10 +08:00
eball
e9eb218348 olaresd: refresh user expiring certs (#1493)
* feat: refresh user expiring certs

* fix: admin user not found
2025-06-30 21:00:32 +08:00
eball
9746e2c110 infisical: crash when user not found (#1492) 2025-06-30 21:00:14 +08:00
berg
27d9715292 market: multi user multi source (#1490)
* multi user & multi source & pre-render and collect image download progress & custom render variants

* support GlobalEnvs

* feat: release system-frontend: v1.3.88

* feat: app-service, studio-server

* feat: update market backend version

---------

Co-authored-by: Sai <kldtks@live.com>
Co-authored-by: hys <hysyeah@gmail.com>
2025-06-28 16:46:44 +08:00
salt
10d6c2a6fa fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search w… (#1491)
fix: 1. fix: like 'why-olares.md', if input 'why', 'olares', search without result 2.when generate_monitor_folder_path_list for convert_from_physical_path_to_frontend_resource_uri not propagate error

Co-authored-by: ubuntu <you@example.com>
2025-06-28 16:46:10 +08:00
eball
57d8a55d8d authelia: add user list api (#1489) 2025-06-27 22:07:27 +08:00
dkeven
b9a227acd7 fix(manifest): update the missed reverse proxy image version (#1488) 2025-06-27 11:27:07 +08:00
wiy
e6115794ce feat(system-frontend): update system-frontend new version to v1.3.86 (#1487) 2025-06-27 11:24:02 +08:00
dkeven
22739c90db fix(manifest): add missing app author label to argo deploy (#1486) 2025-06-27 11:23:29 +08:00
dkeven
6fac46130a perf(gpu): use our fork of dcgm-exporter with lower memory consumption (#1485) 2025-06-27 11:23:07 +08:00
simon
e19e049e7d feat(knowledge): add youtube feed and optimize the file name for aria2 download (#1481)
knowledge v0.12.12
2025-06-26 15:53:40 +08:00
wiy
1d0c20d6ad fix(system-frontend): copy nginx address error (#1484) 2025-06-26 15:16:18 +08:00
dkeven
397590d402 fix(cli): set health host of felix to lo addr explicitly (#1483) 2025-06-26 15:15:53 +08:00
hysyeah
fc1a59b79b ks,cli: remove host_ip label from some metric (#1482)
ks,cli: remove host_ip label from metric
2025-06-26 00:05:10 +08:00
eball
3dea149790 olaresd: network interface api modifed and nvstream mdns bug fix (#1480) 2025-06-26 00:04:10 +08:00
0x7fffff92
9d6834faa1 feat(tailscale): let tailscale run on the node where headscale is run… (#1479)
feat(tailscale): let tailscale run on the node where headscale is running

Co-authored-by: 0x7fffff92 <0x7fffff92@example.com>
2025-06-26 00:03:51 +08:00
dkeven
bef61309a3 feat(cli): set explicit image gc policy when installing K8s (#1478) 2025-06-26 00:03:04 +08:00
salt
cf52a59ef7 feat: search3 support multiple node for cache and external, run as daemonset (#1477)
* feat: search3 support multiple node for cache and external, and search3monitor run in daemon set

* fix: fix search3 iniialization fail because of not exist table __diesel_schema_migrations

---------

Co-authored-by: ubuntu <you@example.com>
2025-06-26 00:02:36 +08:00
wiy
80023be159 feat(system-frontend): merge system apps main (#1476)
* feat(system-frontend): merge apps into one image

* fix(system-frontend): update image version to v1.3.85

---------

Co-authored-by: yyh <24493052+yongheng2016@users.noreply.github.com>
2025-06-26 00:02:03 +08:00
eball
ae3e4e6bb9 gpu: refactor gpu scheduler with cpp (#1475) 2025-06-24 23:29:13 +08:00
dkeven
8c9e4d532b fix(daemon): upgrade runc dependency to fix vulnerability (#1473) 2025-06-24 21:33:43 +08:00
eball
3c48afb5b5 olares: move gpu package (#1474)
* olares: move gpu package

* fix: hami webui image
2025-06-24 21:32:37 +08:00
dkeven
3d22a01eef fix(cli): do not wait for recreation of pods without owner when changing ip (#1472) 2025-06-23 23:26:41 +08:00
eball
d6263bacca authelia: remove httponly option from set-cookie (#1471) 2025-06-23 23:25:55 +08:00
hysyeah
3b070ea095 node-exporter: add pcie_version,sata_version label for disk metric (#1470)
node-exporter: add pcie_version,sata_version label for node_disk_smartctl_info metric
2025-06-23 23:25:19 +08:00
dkeven
82b715635b feat: build and use hami-webui images using our own repo (#1469) 2025-06-23 23:24:38 +08:00
Peng Peng
1d4494c8d7 feat(user-service, notification, analytics): put prisma library under node_moudles in dockers (#1468)
feat: add prisma dependency to the docker
2025-06-23 11:22:31 +08:00
simon
56f5c07229 feat(knowledge): add ebook , pdf download and article extractor (#1467)
knowledge v0.12.11
2025-06-21 02:08:19 +08:00
berg
697ac440c7 wise, studio, desktop, dashboard: update system frontend version to v1.3.82 (#1466)
feat: update system frontend version to v1.3.82
2025-06-21 02:07:58 +08:00
eball
f0edbc08a6 gpu: bump libvgpu.so version (#1465) 2025-06-20 20:31:41 +08:00
eball
001607e840 authelia: add SameSite option to set-cookie (#1464) 2025-06-20 20:31:23 +08:00
dkeven
e8f525daca refactor(daemon): new scheme for upgrade APIs and operations (#1463) 2025-06-20 20:30:46 +08:00
salt
6d6f7705c9 feat: return search3 result with standard resource_urri (#1462)
* fix: fix search3 escape error

* feat: for search return resource_uri with standard mode

---------

Co-authored-by: Ubuntu <ubuntu@localhost.localdomain>
2025-06-20 11:18:01 +08:00
wiy
46b7fa0079 feat(system-frontend): update desktop files search; update dashboard chart components; (#1461) 2025-06-20 00:27:06 +08:00
hysyeah
793a62396b lldap,system-server: pub event async; chanage secret ns (#1460)
lldap,system-server: pub event async
2025-06-20 00:26:44 +08:00
eball
7cb4975f5b authelia: replace http session with lldap jwt (#1459)
* authelia: replace http session with lldap jwt

* fix: remove check auth

* fix: set default configuration

* fix: revert pg and nats configuration
2025-06-20 00:26:12 +08:00
eball
bfaf647ad1 tapr, cli:add extension vchord to pg and decrease k3s image fs threshold (#1458)
* tapr, cli:add extension vchord to pg and decrease k3s image fs threshold

* fix: image tag
2025-06-19 23:18:56 +08:00
hysyeah
23d3dc58ed lldap,tapr: add totp api (#1456) 2025-06-19 00:20:18 +08:00
yyh
7bf07f36b7 feat(system-frontend): update dashboard, control hub, and settings image (#1455)
* feat(system-frontend): update dashboard, control hub, and settings images to v1.3.80

* feat(ks_server): add environment variables for NODE_IP and TERMINUSD_HOST
2025-06-19 00:19:17 +08:00
eball
7e7117fc3a cli, daemon: persist the user name to the Olares release file (#1454) 2025-06-19 00:18:38 +08:00
1396 changed files with 46445 additions and 7935 deletions

View File

@@ -3,8 +3,12 @@ name: Lint and Test Charts
on:
push:
branches: [ "main", "release-*" ]
paths-ignore:
- 'docs/**'
pull_request_target:
branches: [ "main", "release-*" ]
paths-ignore:
- 'docs/**'
workflow_dispatch:
@@ -55,7 +59,7 @@ jobs:
steps:
- id: generate
run: |
v=1.12.0-$(echo $RANDOM)
v=1.12.1-$(echo $RANDOM$RANDOM)
echo "version=$v" >> "$GITHUB_OUTPUT"
upload-cli:
@@ -65,6 +69,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
upload-daemon:
needs: test-version
@@ -73,6 +78,7 @@ jobs:
with:
version: ${{ needs.test-version.outputs.version }}
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
push-image:
runs-on: ubuntu-latest
@@ -97,6 +103,12 @@ jobs:
runs-on: [self-hosted, linux, ARM64]
steps:
- name: Install skopeo (Ubuntu)
run: |
sudo apt-get update
sudo apt-get install -y skopeo
- name: 'Checkout source code'
uses: actions/checkout@v3
with:
@@ -132,6 +144,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -156,6 +169,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.test-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,27 +11,13 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -42,28 +28,12 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64

View File

@@ -11,22 +11,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
coscmd config -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
@@ -42,23 +26,6 @@ jobs:
- name: "Checkout source code"
uses: actions/checkout@v3
- name: Install coscmd
run: pip install coscmd
- name: Configure coscmd
env:
TENCENT_SECRET_ID: ${{ secrets.TENCENT_SECRET_ID }}
TENCENT_SECRET_KEY: ${{ secrets.TENCENT_SECRET_KEY }}
COS_BUCKET: ${{ secrets.COS_BUCKET }}
COS_REGION: ${{ secrets.COS_REGION }}
END_POINT: ${{ secrets.END_POINT }}
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
coscmd config -m 10 -p 10 -a $TENCENT_SECRET_ID \
-s $TENCENT_SECRET_KEY \
-b $COS_BUCKET \
-r $COS_REGION
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
runs-on: ubuntu-22.04
@@ -18,6 +28,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -51,6 +62,5 @@ jobs:
AWS_DEFAULT_REGION: "us-east-1"
run: |
cd cli/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
# coscmd upload $file /$file
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -8,7 +8,17 @@ on:
required: true
ref:
type: string
repository:
type: string
workflow_dispatch:
inputs:
version:
type: string
required: true
ref:
type: string
repository:
type: string
jobs:
goreleaser:
@@ -19,6 +29,7 @@ jobs:
with:
fetch-depth: 1
ref: ${{ inputs.ref }}
repository: ${{ inputs.repository }}
- name: Add Local Git Tag For GoReleaser
run: git tag ${{ inputs.version }}
@@ -54,5 +65,5 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
cd daemon/output && for file in *.tar.gz; do
aws s3 cp "$file" s3://terminus-os-install/$file --acl=public-read
aws s3 cp "$file" s3://terminus-os-install${{ secrets.REPO_PATH }}${file} --acl=public-read
done

View File

@@ -17,7 +17,7 @@ jobs:
steps:
- id: generate
run: |
v=1.12.0-$(date +"%Y%m%d")
v=1.12.1-$(date +"%Y%m%d")
echo "version=$v" >> "$GITHUB_OUTPUT"
release-cli:
@@ -77,6 +77,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
@@ -94,6 +95,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ needs.daily-version.outputs.version }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64
@@ -121,13 +123,13 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz > install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install/install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ needs.daily-version.outputs.version }}.tar.gz --acl=public-read && \
echo "md5sum=$(awk '{print $1}' install-wizard-v${{ needs.daily-version.outputs.version }}.md5sum.txt)" >> "$GITHUB_OUTPUT"
release:
needs: [daily-version, upload-package]
needs: [daily-version, upload-package, release-cli]
runs-on: ubuntu-latest
steps:

View File

@@ -59,8 +59,46 @@ jobs:
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/image-manifest.sh && bash build/upload-images.sh .manifest/images.mf linux/arm64
push-deps:
needs: [release-daemon]
runs-on: ubuntu-latest
steps:
- name: "Checkout source code"
uses: actions/checkout@v3
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ github.event.inputs.tags }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
bash build/deps-manifest.sh && bash build/upload-deps.sh
push-deps-arm64:
needs: [release-daemon]
runs-on: [self-hosted, linux, ARM64]
steps:
- name: "Checkout source code"
uses: actions/checkout@v3
# test
- env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: "us-east-1"
VERSION: ${{ github.event.inputs.tags }}
REPO_PATH: '${{ secrets.REPO_PATH }}'
run: |
export PATH=$PATH:/usr/local/bin:/home/ubuntu/.local/bin
bash build/deps-manifest.sh linux/arm64 && bash build/upload-deps.sh linux/arm64
upload-package:
needs: [push, push-arm64, release-daemon]
needs: [push, push-arm64, push-deps, push-deps-arm64, release-daemon]
runs-on: ubuntu-latest
steps:
@@ -80,12 +118,12 @@ jobs:
AWS_DEFAULT_REGION: 'us-east-1'
run: |
md5sum install-wizard-v${{ github.event.inputs.tags }}.tar.gz > install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install/install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt --acl=public-read && \
aws s3 cp install-wizard-v${{ github.event.inputs.tags }}.tar.gz s3://terminus-os-install${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.tar.gz --acl=public-read
release:
runs-on: ubuntu-latest
needs: [upload-package]
needs: [upload-package, release-cli]
steps:
- name: 'Checkout source code'
@@ -101,7 +139,7 @@ jobs:
- name: Get checksum
id: vars
run: |
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net/install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
echo "version_md5sum=$(curl -sSfL https://dc3p1870nn3cj.cloudfront.net${{ secrets.REPO_PATH }}install-wizard-v${{ github.event.inputs.tags }}.md5sum.txt|awk '{print $1}')" >> $GITHUB_OUTPUT
- name: Update checksum
uses: eball/write-tag-to-version-file@latest

5
.gitignore vendored
View File

@@ -32,3 +32,8 @@ olares-cli-*.tar.gz
cli/output
daemon/output
daemon/bin
docs/.vitepress/dist/
docs/.vitepress/cache/
node_modules
.idea/

View File

@@ -108,20 +108,15 @@ Olares has been tested and verified on the following Linux platforms:
To get started with Olares on your own device, follow the [Getting Started Guide](https://docs.olares.com/manual/get-started/) for step-by-step instructions.
## Project navigation
> [!NOTE]
> We are currently consolidating Olares subproject code into this repository. This process may take a few months. Once finished, you will get a comprehensive view of the entire Olares system here.
This section lists the main directories in the Olares repository:
* **`apps`**: Contains the code for system applications, primarily for `larepass`.
* **`cli`**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **`daemon`**: Contains the code for `olaresd`, the system daemon process.
* **`docs`**: Contains documentation for the project.
* **`framework`**: Contains the Olares system services.
* **`infrastructure`**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **`platform`**: Contains code for cloud-native components like databases and message queues.
* **[`apps`](./apps)**: Contains the code for system applications, primarily for `larepass`.
* **[`cli`](./cli)**: Contains the code for `olares-cli`, the command-line interface tool for Olares.
* **[`daemon`](./daemon)**: Contains the code for `olaresd`, the system daemon process.
* **[`docs`](./docs)**: Contains documentation for the project.
* **[`framework`](./framework)**: Contains the Olares system services.
* **[`infrastructure`](./infrastructure)**: Contains code related to infrastructure components such as computing, storage, networking, and GPUs.
* **[`platform`](./platform)**: Contains code for cloud-native components like databases and message queues.
* **`vendor`**: Contains code from third-party hardware vendors.
## Contributing to Olares

View File

@@ -110,19 +110,15 @@ Olares 已在以下 Linux 平台完成测试与验证:
参考[快速上手指南](https://docs.olares.cn/zh/manual/get-started/)安装并激活 Olares。
## 项目目录
> [!NOTE]
> 我们正将 Olares 子项目的代码移动到当前仓库。此过程可能会持续数月。届时您就可以通过本仓库了解 Olares 系统的全貌。
Olares 代码库中的主要目录如下:
* **`apps`**: 用于存放系统应用,主要是 `larepass` 的代码。
* **`cli`**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **`daemon`**: 用于存放 `olaresd`(系统守护进程)的代码。
* **`docs`**: 用于存放 Olares 项目的文档。
* **`framework`**: 用来存放 Olares 系统服务代码。
* **`infrastructure`**: 用于存放计算存储网络GPU 等基础设施的代码。
* **`platform`**: 用于存放数据库、消息队列等云原生组件的代码。
* **[`apps`](./apps)**: 用于存放系统应用,主要是 `larepass` 的代码。
* **[`cli`](./cli)**: 用于存放 `olares-cli`Olares 的命令行界面工具)的代码。
* **[`daemon`](./daemon)**: 用于存放 `olaresd`(系统守护进程)的代码。
* **[`docs`**](./docs)**: 用于存放 Olares 项目的文档。
* **[`framework`](./framework)**: 用来存放 Olares 系统服务代码。
* **[`infrastructure`](./infrastructure)**: 用于存放计算存储网络GPU 等基础设施的代码。
* **[`platform`](./platform)**: 用于存放数据库、消息队列等云原生组件的代码。
* **`vendor`**: 用于存放来自第三方硬件供应商的代码。
## 社区贡献

View File

@@ -108,20 +108,16 @@ Olaresは以下のLinuxプラットフォームで動作検証を完了してい
自分のデバイスでOlaresを始めるには、[はじめにガイド](https://docs.olares.com/manual/get-started/)に従ってステップバイステップの手順を確認してください。
## プロジェクトナビゲーション
> [!NOTE]
> 現在、Olaresのサブプロジェクトのコードを当リポジトリへ移行する作業を進めています。この作業が完了するまでには数ヶ月を要する見込みです。完了後には、当リポジトリを通じてOlaresシステムの全貌をご覧いただけるようになります。
## プロジェクトナビゲーションx
このセクションでは、Olares リポジトリ内の主要なディレクトリをリストアップしています:
* **`apps`**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **`cli`**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **`daemon`**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **`docs`**: プロジェクトのドキュメントが含まれています。
* **`framework`**: Olares システムサービスが含まれています。
* **`infrastructure`**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **`platform`**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **[`apps`](./apps)**: システムアプリケーションのコードが含まれており、主に `larepass` 用です。
* **[`cli`](./cli)**: Olares のコマンドラインインターフェースツールである `olares-cli` のコードが含まれています。
* **[`daemon`](./daemon)**: システムデーモンプロセスである `olaresd` のコードが含まれています。
* **[`docs`](./docs)**: プロジェクトのドキュメントが含まれています。
* **[`framework`](./framework)**: Olares システムサービスが含まれています。
* **[`infrastructure`](./infrastructure)**: コンピューティング、ストレージ、ネットワーキング、GPU などのインフラストラクチャコンポーネントに関連するコードが含まれています。
* **[`platform`](./platform)**: データベースやメッセージキューなどのクラウドネイティブコンポーネントのコードが含まれています。
* **`vendor`**: サードパーティのハードウェアベンダーからのコードが含まれています。
## Olaresへの貢献

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,26 +0,0 @@
apiVersion: v2
name: appstore
description: A Helm chart for Kubernetes
maintainers:
- name: bytetrade
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "appstore.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "appstore.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "appstore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "appstore.labels" -}}
helm.sh/chart: {{ include "appstore.chart" . }}
{{ include "appstore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "appstore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "appstore.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "appstore.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "appstore.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,353 +0,0 @@
{{- $market_secret := (lookup "v1" "Secret" .Release.Namespace "market-secrets") -}}
{{- $redis_password := "" -}}
{{ if $market_secret -}}
{{ $redis_password = (index $market_secret "data" "redis-passwords") }}
{{ else -}}
{{ $redis_password = randAlphaNum 16 | b64enc }}
{{- end -}}
{{- $market_backend_nats_secret := (lookup "v1" "Secret" .Release.Namespace "market-backend-nats-secret") -}}
{{- $nats_password := "" -}}
{{ if $market_backend_nats_secret -}}
{{ $nats_password = (index $market_backend_nats_secret "data" "nats_password") }}
{{ else -}}
{{ $nats_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: market-backend-nats-secret
namespace: {{ .Release.Namespace }}
type: Opaque
data:
nats_password: {{ $nats_password }}
---
apiVersion: v1
kind: Secret
metadata:
name: market-secrets
namespace: {{ .Release.Namespace }}
type: Opaque
data:
redis-passwords: {{ $redis_password }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: market-deployment
namespace: {{ .Release.Namespace }}
labels:
app: appstore
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
selector:
matchLabels:
app: appstore
template:
metadata:
labels:
app: appstore
io.bytetrade.app: "true"
annotations:
instrumentation.opentelemetry.io/inject-go: "olares-instrumentation"
instrumentation.opentelemetry.io/go-container-names: "appstore-backend"
instrumentation.opentelemetry.io/otel-go-auto-target-exe: "/opt/app/market"
spec:
priorityClassName: "system-cluster-critical"
initContainers:
- args:
- -it
- authelia-backend.os-framework:9091
image: owncloudci/wait-for:latest
imagePullPolicy: IfNotPresent
name: check-auth
- name: terminus-sidecar-init
image: openservicemesh/init:v1.2.3
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
runAsNonRoot: false
runAsUser: 0
command:
- /bin/sh
- -c
- |
iptables-restore --noflush <<EOF
# sidecar interception rules
*nat
:PROXY_IN_REDIRECT - [0:0]
:PROXY_INBOUND - [0:0]
-A PROXY_IN_REDIRECT -p tcp -j REDIRECT --to-port 15003
-A PROXY_INBOUND -p tcp --dport 15000 -j RETURN
-A PROXY_INBOUND -p tcp -j PROXY_IN_REDIRECT
-A PREROUTING -p tcp -j PROXY_INBOUND
COMMIT
EOF
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
containers:
- name: appstore-backend
image: beclab/market-backend:v0.3.12
imagePullPolicy: IfNotPresent
ports:
- containerPort: 81
env:
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: OS_APP_SECRET
value: '{{ .Values.os.appstore.appSecret }}'
- name: OS_APP_KEY
value: {{ .Values.os.appstore.appKey }}
- name: APP_SOTRE_SERVICE_SERVICE_HOST
value: appstore-server-prod.bttcdn.com
- name: MARKET_PROVIDER
value: '{{ .Values.os.appstore.marketProvider }}'
- name: APP_SOTRE_SERVICE_SERVICE_PORT
value: '443'
- name: APP_SERVICE_SERVICE_HOST
value: app-service.os-framework
- name: APP_SERVICE_SERVICE_PORT
value: '6755'
- name: REPO_URL_PORT
value: "82"
- name: REDIS_ADDRESS
value: 'redis-cluster-proxy.user-system-{{ .Values.bfl.username }}:6379'
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: market-secrets
key: redis-passwords
- name: REDIS_DB_NUMBER
value: '0'
- name: REPO_URL_HOST
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NATS_HOST
value: nats.user-system-{{ .Values.bfl.username }}
- name: NATS_PORT
value: '4222'
- name: NATS_USERNAME
value: os-market-backend
- name: NATS_PASSWORD
valueFrom:
secretKeyRef:
name: market-backend-nats-secret
key: nats_password
- name: NATS_SUBJECT_USER_APPLICATION
value: terminus.user.application.{{ .Values.bfl.username}}
volumeMounts:
- name: opt-data
mountPath: /opt/app/data
- name: terminus-envoy-sidecar
image: bytetrade/envoy:v1.25.11
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
runAsUser: 1000
ports:
- name: proxy-admin
containerPort: 15000
- name: proxy-inbound
containerPort: 15003
volumeMounts:
- name: terminus-sidecar-config
readOnly: true
mountPath: /etc/envoy/envoy.yaml
subPath: envoy.yaml
command:
- /usr/local/bin/envoy
- --log-level
- debug
- -c
- /etc/envoy/envoy.yaml
env:
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: terminus-ws-sidecar
image: 'beclab/ws-gateway:v1.0.5'
command:
- /ws-gateway
env:
- name: WS_PORT
value: '81'
- name: WS_URL
value: /app-store/v1/websocket/message
resources: { }
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
volumes:
- name: terminus-sidecar-config
configMap:
name: sidecar-ws-configs
items:
- key: envoy.yaml
path: envoy.yaml
- name: opt-data
hostPath:
path: '{{ .Values.userspace.appData}}/appstore/data'
type: DirectoryOrCreate
- name: app
emptyDir: {}
- name: nginx-confd
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: appstore-service
namespace: {{ .Release.Namespace }}
spec:
selector:
app: appstore
type: ClusterIP
ports:
- protocol: TCP
name: appstore-backend
port: 81
targetPort: 81
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ApplicationPermission
metadata:
name: appstore
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: appstore
appid: appstore
key: {{ .Values.os.appstore.appKey }}
secret: {{ .Values.os.appstore.appSecret }}
permissions:
- dataType: event
group: message-disptahcer.system-server
ops:
- Create
version: v1
- dataType: app
group: service.bfl
ops:
- UserApps
version: v1
status:
state: active
---
apiVersion: sys.bytetrade.io/v1alpha1
kind: ProviderRegistry
metadata:
name: appstore-backend-provider
namespace: user-system-{{ .Values.bfl.username }}
spec:
dataType: app
deployment: market
description: app store provider
endpoint: appstore-service.{{ .Release.Namespace }}:81
group: service.appstore
kind: provider
namespace: {{ .Release.Namespace }}
opApis:
- name: InstallDevApp
uri: /app-store/v1/applications/provider/installdev
- name: UninstallDevApp
uri: /app-store/v1/applications/provider/uninstalldev
version: v1
status:
state: active
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-redis
namespace: {{ .Release.Namespace }}
spec:
app: market
appNamespace: {{ .Release.Namespace }}
middleware: redis
redis:
password:
valueFrom:
secretKeyRef:
key: redis-passwords
name: market-secrets
namespace: market
---
apiVersion: v1
kind: Service
metadata:
name: appstore-svc
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
app: appstore
ports:
- name: "appstore-backend"
protocol: TCP
port: 81
targetPort: 81
- name: "appstore-websocket"
protocol: TCP
port: 40010
targetPort: 40010
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: market-backend-nats
namespace: {{ .Release.Namespace }}
spec:
app: market-backend
appNamespace: os
middleware: nats
nats:
password:
valueFrom:
secretKeyRef:
key: nats_password
name: market-backend-nats-secret
refs:
- appName: user-service
appNamespace: os
subjects:
- name: "application.*"
perm:
- pub
- sub
- appName: user-service
appNamespace: os
subjects:
- name: "market.*"
perm:
- pub
- sub
user: os-market-backend

View File

@@ -1,44 +0,0 @@
bfl:
nodeport: 30883
nodeport_ingress_http: 30083
nodeport_ingress_https: 30082
username: 'test'
url: 'test'
nodeName: test
pvc:
userspace: test
userspace:
userData: test/Home
appData: test/Data
appCache: test
dbdata: test
docs:
nodeport: 30881
desktop:
nodeport: 30180
os:
portfolio:
appKey: '${ks[0]}'
appSecret: test
vault:
appKey: '${ks[0]}'
appSecret: test
desktop:
appKey: '${ks[0]}'
appSecret: test
message:
appKey: '${ks[0]}'
appSecret: test
rss:
appKey: '${ks[0]}'
appSecret: test
search:
appKey: '${ks[0]}'
appSecret: test
search2:
appKey: '${ks[0]}'
appSecret: test
appstore:
marketProvider: ''
kubesphere:
redis_password: ""

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,26 +0,0 @@
apiVersion: v2
name: studio
description: A Terminus app development tool
maintainers:
- name: bytetrade
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "4.9.1"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 749 KiB

View File

@@ -1,294 +0,0 @@
{{- $namespace := printf "%s%s" "user-system-" .Values.bfl.username -}}
{{- $studio_secret := (lookup "v1" "Secret" $namespace "studio-secrets") -}}
{{- $pg_password := "" -}}
{{ if $studio_secret -}}
{{ $pg_password = (index $studio_secret "data" "pg_password") }}
{{ else -}}
{{ $pg_password = randAlphaNum 16 | b64enc }}
{{- end -}}
---
apiVersion: v1
kind: Secret
metadata:
name: studio-secrets
namespace: user-system-{{ .Values.bfl.username }}
type: Opaque
data:
pg_password: {{ $pg_password }}
---
apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: studio-pg
namespace: user-system-{{ .Values.bfl.username }}
spec:
app: studio
appNamespace: {{ .Release.Namespace }}
middleware: postgres
postgreSQL:
user: studio_{{ .Values.bfl.username }}
password:
valueFrom:
secretKeyRef:
key: pg_password
name: studio-secrets
databases:
- name: studio
---
apiVersion: v1
kind: Service
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
spec:
selector:
app: studio-server
ports:
- protocol: TCP
port: 8080
targetPort: 8088
name: http
- protocol: TCP
port: 8083
targetPort: 8083
name: https
---
kind: Service
apiVersion: v1
metadata:
name: chartmuseum-studio
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8888
selector:
app: studio-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: studio-san-cnf
namespace: {{ .Release.Namespace }}
data:
san.cnf: |
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
[req_distinguished_name]
countryName = CN
stateOrProvinceName = Beijing
localityName = Beijing
0.organizationName = bytetrade
commonName = studio-server.{{ .Release.Namespace }}.svc
[v3_req]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @bytetrade
[bytetrade]
DNS.1 = studio-server.{{ .Release.Namespace }}.svc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: studio-server
namespace: {{ .Release.Namespace }}
labels:
app: studio-server
applications.app.bytetrade.io/author: bytetrade.io
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: studio-server
template:
metadata:
labels:
app: studio-server
spec:
serviceAccountName: bytetrade-controller
volumes:
- name: chart
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData}}/studio/Chart'
- name: data
hostPath:
type: DirectoryOrCreate
path: '{{ .Values.userspace.appData }}/studio/Data'
- name: storage-volume
hostPath:
path: '{{ .Values.userspace.appData }}/studio/helm-repo-dev'
type: DirectoryOrCreate
- name: config-san
configMap:
name: studio-san-cnf
items:
- key: san.cnf
path: san.cnf
- name: certs
emptyDir: {}
initContainers:
- name: init-chmod-data
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- sh
- '-c'
- |
chown -R 1000:1000 /home/coder
chown -R 65532:65532 /charts
chown -R 65532:65532 /data
securityContext:
runAsUser: 0
resources: { }
volumeMounts:
- name: storage-volume
mountPath: /home/coder
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- name: generate-certs
image: beclab/openssl:v3
imagePullPolicy: IfNotPresent
command: [ "/bin/sh", "-c" ]
args:
- |
openssl genrsa -out /etc/certs/ca.key 2048
openssl req -new -x509 -days 3650 -key /etc/certs/ca.key -out /etc/certs/ca.crt \
-subj "/CN=bytetrade CA/O=bytetrade/C=CN"
openssl req -new -newkey rsa:2048 -nodes \
-keyout /etc/certs/server.key -out /etc/certs/server.csr \
-config /etc/san/san.cnf
openssl x509 -req -days 3650 -in /etc/certs/server.csr \
-CA /etc/certs/ca.crt -CAkey /etc/certs/ca.key \
-CAcreateserial -out /etc/certs/server.crt \
-extensions v3_req -extfile /etc/san/san.cnf
chown -R 65532 /etc/certs/*
volumeMounts:
- name: config-san
mountPath: /etc/san
- name: certs
mountPath: /etc/certs
containers:
- name: studio
image: beclab/studio-server:v0.1.51
imagePullPolicy: IfNotPresent
args:
- server
ports:
- name: port
containerPort: 8088
protocol: TCP
- name: ssl-port
containerPort: 8083
protocol: TCP
volumeMounts:
- name: chart
mountPath: /charts
- name: data
mountPath: /data
- mountPath: /etc/certs
name: certs
lifecycle:
preStop:
exec:
command:
- "/studio"
- "clean"
env:
- name: BASE_DIR
value: /charts
- name: OS_API_KEY
value: {{ .Values.os.studio.appKey }}
- name: OS_API_SECRET
value: {{ .Values.os.studio.appSecret }}
- name: OS_SYSTEM_SERVER
value: system-server.user-system-{{ .Values.bfl.username }}
- name: NAME_SPACE
value: {{ .Release.Namespace }}
- name: OWNER
value: '{{ .Values.bfl.username }}'
- name: DB_HOST
value: citus-master-svc.user-system-{{ .Values.bfl.username }}
- name: DB_USERNAME
value: studio_{{ .Values.bfl.username }}
- name: DB_PASSWORD
value: "{{ $pg_password | b64dec }}"
- name: DB_NAME
value: user_space_{{ .Values.bfl.username }}_studio
- name: DB_PORT
value: "5432"
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: "0.5"
memory: 1000Mi
- name: chartmuseum
image: aboveos/helm-chartmuseum:v0.15.0
args:
- '--port=8888'
- '--storage-local-rootdir=/storage'
ports:
- name: http
containerPort: 8888
protocol: TCP
env:
- name: CHART_POST_FORM_FIELD_NAME
value: chart
- name: DISABLE_API
value: 'false'
- name: LOG_JSON
value: 'true'
- name: PROV_POST_FORM_FIELD_NAME
value: prov
- name: STORAGE
value: local
resources:
requests:
cpu: "50m"
memory: 100Mi
limits:
cpu: 1000m
memory: 512Mi
volumeMounts:
- name: storage-volume
mountPath: /storage
livenessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3

View File

@@ -1,42 +0,0 @@
bfl:
nodeport: 30883
nodeport_ingress_http: 30083
nodeport_ingress_https: 30082
username: 'test'
url: 'test'
nodeName: test
pvc:
userspace: test
userspace:
userData: test/Home
appData: test/Data
appCache: test
dbdata: test
docs:
nodeport: 30881
desktop:
nodeport: 30180
os:
portfolio:
appKey: '${ks[0]}'
appSecret: test
vault:
appKey: '${ks[0]}'
appSecret: test
desktop:
appKey: '${ks[0]}'
appSecret: test
message:
appKey: '${ks[0]}'
appSecret: test
rss:
appKey: '${ks[0]}'
appSecret: test
search:
appKey: '${ks[0]}'
appSecret: test
studio:
appKey: '${ks[0]}'
appSecret: test
kubesphere:
redis_password: ""

View File

@@ -29,7 +29,7 @@ spec:
containers:
- name: wizard
image: beclab/wizard:v1.3.57
image: beclab/wizard:v1.3.111
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80

View File

@@ -0,0 +1,20 @@
# Olares Apps
## Overview
This directory contains the code for system applications, primarily for LarePass. The following are the pre-installed system applications that offer tools for managing files, knowledge, passwords, and the system itself.
## System Applications Overview
| Application | Description |
| --- | --- |
| Files | A file management app that manages and synchronizes files across devices and sources, enabling seamless sharing and access. |
| Wise | A local-first and AI-native modern reader that helps to collect, read, and manage information from various platforms. Users can run self-hosted recommendation algorithms to filter and sort online content. |
| Vault | A secure password manager for storing and managing sensitive information across devices. |
| Market | A decentralized and permissionless app store for installing, uninstalling, and updating applications and recommendation algorithms. |
| Desktop | A hub for managing and interacting with installed applications. File and application searching are also supported. |
| Profile | An app to customize the user's profile page. |
| Settings | A system configuration application. |
| Dashboard | An app for monitoring system resource usage. |
| Control Hub | The console for Olares, providing precise and autonomous control over the system and its environment. |
| DevBox | A development tool for building and deploying Olares applications. |

View File

@@ -10,7 +10,7 @@ function command_exists() {
if [[ x"$VERSION" == x"" ]]; then
if [[ "$LOCAL_RELEASE" == "1" ]]; then
ts=$(date +%Y%m%d%H%M%S)
export VERSION="1.12.0-$ts"
export VERSION="1.12.1-$ts"
echo "will build and use a local release of Olares with version: $VERSION"
echo ""
else
@@ -20,7 +20,7 @@ fi
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
echo "for example: VERSION=1.12.0-20241124 bash $0"
echo "for example: VERSION=1.12.1-20241124 bash $0"
exit 1
fi

View File

@@ -149,7 +149,7 @@ export VERSION="#__VERSION__"
if [[ "x${VERSION}" == "x" || "x${VERSION:3}" == "xVERSION__" ]]; then
echo "error: Olares version is unspecified, please set the VERSION env var and rerun this script."
echo "for example: VERSION=1.12.0-20241124 bash $0"
echo "for example: VERSION=1.12.1-20241124 bash $0"
exit 1
fi

View File

@@ -1,2 +1,2 @@
upgrade:
minVersion: 1.12.0-1
minVersion: 1.12.1-0

View File

@@ -6,7 +6,7 @@ metadata:
annotations:
iam.kubesphere.io/uninitialized: "true"
helm.sh/resource-policy: keep
bytetrade.io/owner-role: platform-admin
bytetrade.io/owner-role: owner
bytetrade.io/terminus-name: "{{.Values.user.terminus_name}}"
bytetrade.io/launcher-auth-policy: two_factor
bytetrade.io/launcher-access-level: "1"
@@ -23,4 +23,4 @@ spec:
groups:
- lldap_admin
status:
state: Active
state: Created

View File

@@ -76,7 +76,7 @@ Create the name of the service account to use
{{- $caCertEnc = index $prevSecret "data" "ca.crt" }}
{{- else }}
{{- $altNames := list ( printf "%s-webhook.%s" (include "opentelemetry-operator.fullname" .) .Release.Namespace ) ( printf "%s-webhook.%s.svc" (include "opentelemetry-operator.fullname" .) .Release.Namespace ) -}}
{{- $tmpperioddays := 3650 }}
{{- $tmpperioddays := 36500 }}
{{- $ca := genCA "opentelemetry-operator-operator-ca" $tmpperioddays }}
{{- $cert := genSignedCert (include "opentelemetry-operator.fullname" .) nil $altNames $tmpperioddays $ca }}
{{- $certCrtEnc = b64enc $cert.Cert }}

View File

@@ -1,12 +1,14 @@
# chart-testing: ignore-file
{{ $cluster_id := randAlphaNum 16 }}
{{ $version := "#__VERSION__" }}
{{ if .Values.cluster_id }}
{{ $cluster_id = .Values.cluster_id }}
{{ end }}
{{ $cr := (lookup "sys.bytetrade.io/v1alpha1" "terminus.sys.bytetrade.io" "" "terminus") }}
{{ $cr := (lookup "sys.bytetrade.io/v1alpha1" "Terminus" "" "terminus") }}
{{ if $cr }}
{{ $cluster_id = (index $cr "metadata" "labels" "bytetrade.io/cluster-id") }}
{{ $version = (index $cr "spec" "version") }}
{{ end }}
---
@@ -22,7 +24,7 @@ metadata:
bytetrade.io/s3-sk: '{{ .Values.s3_sk }}'
spec:
name: terminus-1
version: #__VERSION__
version: {{ .Values.version | default $version }}
display: Terminus One
releaseServer:
serverType: github

View File

@@ -24,6 +24,7 @@ cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
cp ${BASE_DIR}/.dependencies/components ${BASE_DIR}/.manifest/.
pushd ${BASE_DIR}.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd

187
build/build-manifest.py Normal file
View File

@@ -0,0 +1,187 @@
#!/usr/bin/env python3
import argparse
import hashlib
import os
import requests
import sys
import json
CDN_URL = "https://dc3p1870nn3cj.cloudfront.net"
def get_file_size(objectid, fileid):
url = f"{CDN_URL}/{objectid}"
try:
response = requests.head(url)
response.raise_for_status()
content_length = response.headers.get('Content-Length')
if content_length:
return int(content_length)
else:
print(f"Content-Length header missing for {fileid} from {url}", file=sys.stderr)
sys.exit(1)
except requests.RequestException as e:
print(f"Error getting file size for {fileid} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def download_checksum(name):
"""Downloads the checksum for a given name."""
url = f"{CDN_URL}/{name}.checksum.txt"
try:
response = requests.get(url)
response.raise_for_status()
return response.text.split()[0]
except requests.exceptions.RequestException as e:
print(f"Error getting checksum for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def get_image_manifest(name):
"""Downloads the image manifest for a given name."""
url = f"{CDN_URL}/{name}.manifest.json"
try:
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error getting manifest for {name} from {url}: {e}", file=sys.stderr)
sys.exit(1)
def main():
"""Main function."""
parser = argparse.ArgumentParser()
parser.add_argument("manifest_file", help="The manifest file to write to.")
args = parser.parse_args()
manifest_file = args.manifest_file
version = os.environ.get("VERSION", "")
repo_path = os.environ.get("REPO_PATH", "/")
manifest_amd64_data = {}
manifest_arm64_data = {}
# Process components
try:
with open("components", "r") as f:
for line in f:
line = line.strip()
if not line:
continue
# Replace version
if version:
line = line.replace("#__VERSION__", version)
# Replace repo path
if repo_path:
line = line.replace("#__REPO_PATH__", repo_path)
fields = line.split(",")
if len(fields) < 5:
print(f"Format error in components file: {line}", file=sys.stderr)
sys.exit(1)
filename, path, deps, _, fileid = fields[:5]
print(f"Downloading file checksum for {filename}")
name = hashlib.md5(filename.encode()).hexdigest()
url_amd64 = name
url_arm64 = f"arm64/{name}"
checksum_amd64 = download_checksum(url_amd64)
checksum_arm64 = download_checksum(url_arm64)
file_size_amd64 = get_file_size(url_amd64, fileid)
file_size_arm64 = get_file_size(url_arm64, fileid)
manifest_amd64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": fileid,
"size": file_size_amd64,
}
manifest_arm64_data[filename] = {
"type": "component",
"path": path,
"deps": deps,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": fileid,
"size": file_size_arm64,
}
except FileNotFoundError:
print("Error: 'components' file not found.", file=sys.stderr)
sys.exit(1)
# Process images
path = "images"
for deps_file in ["images.mf"]:
try:
with open(deps_file, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
print(f"Downloading file checksum for {line}")
name = hashlib.md5(line.encode()).hexdigest()
url_amd64 = f"{name}.tar.gz"
url_arm64 = f"arm64/{name}.tar.gz"
checksum_amd64 = download_checksum(name)
checksum_arm64 = download_checksum(f"arm64/{name}")
file_size_amd64 = get_file_size(url_amd64, line)
file_size_arm64 = get_file_size(url_arm64, line)
# Get the image manifest
image_manifest_amd64 = get_image_manifest(name)
image_manifest_arm64 = get_image_manifest(f"arm64/{name}")
filename = f"{name}.tar.gz"
manifest_amd64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_amd64": url_amd64,
"checksum_amd64": checksum_amd64,
"fileid": line,
"size": file_size_amd64,
"manifest": image_manifest_amd64
}
manifest_arm64_data[filename] = {
"type": "image",
"path": path,
"deps": deps_file,
"url_arm64": url_arm64,
"checksum_arm64": checksum_arm64,
"fileid": line,
"size": file_size_arm64,
"manifest": image_manifest_arm64
}
except FileNotFoundError:
print(f"Warning: '{deps_file}' not found, skipping.", file=sys.stderr)
sys.exit(1)
# Write the manifest file
amd64_manifest_file = f"{manifest_file}.amd64"
with open(amd64_manifest_file, "w") as mf:
json.dump(manifest_amd64_data, mf, indent=2)
arm64_manifest_file = f"{manifest_file}.arm64"
with open(arm64_manifest_file, "w") as mf:
json.dump(manifest_arm64_data, mf, indent=2)
# TODO: compress the manifest files
if __name__ == "__main__":
main()

View File

@@ -46,6 +46,9 @@ while read line; do
done < components
sed -i "s/#__VERSION__/${VERSION}/g" $manifest_file
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" $manifest_file
path="images"
for deps in "images.mf"; do
while read line; do

View File

@@ -5,6 +5,22 @@ rm -rf ${BASE_DIR}/../.dist
DIST_PATH="${BASE_DIR}/../.dist/install-wizard"
export VERSION=$1
# vendor replace
if [[ "${REPO_PATH}" != "" && "$REPO_PATH" != "/" ]]; then
path="vendor${REPO_PATH}"
echo "replace vendor path: ${path}"
find ${BASE_DIR}/../$path -type f | while read l;
do
file=$(awk -F "$path" '{print $1$2}' <<< "$l")
if [[ "$file" != ".gitkeep" ]]; then
echo "replace [$file] with [$l]"
cp -f "$l" "$file"
fi
done
fi
DIST_PATH=${DIST_PATH} bash ${BASE_DIR}/package.sh
bash ${BASE_DIR}/image-manifest.sh
@@ -16,6 +32,7 @@ rm -rf ${BASE_DIR}/../.dependencies
set -e
pushd ${BASE_DIR}/../.manifest
bash ${BASE_DIR}/build-manifest.sh ${BASE_DIR}/../.manifest/installation.manifest
python3 ${BASE_DIR}/build-manifest.py ${BASE_DIR}/../.manifest/installation.manifest
popd
pushd $DIST_PATH
@@ -42,7 +59,6 @@ else
VERSION="debug"
fi
$TAR --exclude=wizard/tools --exclude=.git -zcvf ${BASE_DIR}/../install-wizard-${VERSION}.tar.gz .
popd

View File

@@ -20,6 +20,21 @@ function get_key(){
fi
}
# vendor replace
if [[ "${REPO_PATH}" != "" && "$REPO_PATH" != "/" ]]; then
path="vendor${REPO_PATH}"
echo "replace vendor path: ${path}"
find ${BASE_DIR}/../$path -type f | while read l;
do
file=$(awk -F "$path" '{print $1$2}' <<< "$l")
if [[ "$file" != ".gitkeep" ]]; then
echo "replace [$file] with [$l]"
cp -f "$l" "$file"
fi
done
fi
find $BASE_DIR/../ -type f -name Olares.yaml | while read f; do
echo "Processing $f"
declare -a bins
@@ -77,3 +92,5 @@ find $BASE_DIR/../ -type f -name Olares.yaml | while read f; do
done
sed -i "s/#__VERSION__/${VERSION}/g" ${manifest}
path="${REPO_PATH:-/}"
sed -i "s|#__REPO_PATH__|${path}|g" ${manifest}

200
build/get-manifest.py Normal file
View File

@@ -0,0 +1,200 @@
#!/usr/bin/env python3
import requests
import json
import argparse
import re
import sys
import platform
def parse_image_name(image_name):
"""
Parses a full image name into registry, repository, and reference (tag/digest).
Handles defaults for Docker Hub.
"""
# Default to 'latest' tag if no tag or digest is specified
if ":" not in image_name and "@" not in image_name:
image_name += ":latest"
# Split repository from reference (tag or digest)
if "@" in image_name:
repo_part, reference = image_name.rsplit("@", 1)
else:
repo_part, reference = image_name.rsplit(":", 1)
# Determine registry and repository
if "/" not in repo_part:
# This is an official Docker Hub image, e.g., "ubuntu"
registry = "registry-1.docker.io"
repository = f"library/{repo_part}"
else:
parts = repo_part.split("/")
# If the first part looks like a domain name, it's the registry
if "." in parts[0] or ":" in parts[0]:
registry = parts[0]
repository = "/".join(parts[1:])
else:
# A scoped Docker Hub image, e.g., "bitnami/nginx"
registry = "registry-1.docker.io"
repository = repo_part
return registry, repository, reference
def get_auth_token(registry, repository):
"""
Gets an authentication token from the registry's auth service.
"""
# First, probe the registry to get the auth challenge
try:
probe_url = f"https://{registry}/v2/"
response = requests.get(probe_url, timeout=10)
except requests.exceptions.RequestException as e:
print(f"Error: Could not connect to registry at {probe_url}. Details: {e}", file=sys.stderr)
sys.exit(1)
if response.status_code != 401:
# Either public or something is wrong, we can try without a token
return None
auth_header = response.headers.get("Www-Authenticate")
if not auth_header:
print(f"Error: Registry {registry} returned 401 but did not provide Www-Authenticate header.", file=sys.stderr)
sys.exit(1)
# Parse the Www-Authenticate header to find realm, service, and scope
try:
realm = re.search('realm="([^"]+)"', auth_header).group(1)
service = re.search('service="([^"]+)"', auth_header).group(1)
# Scope for the specific repository is needed
scope = f"repository:{repository}:pull"
except AttributeError:
print(f"Error: Could not parse Www-Authenticate header: {auth_header}", file=sys.stderr)
sys.exit(1)
# Request the actual token from the auth realm
auth_params = {
"service": service,
"scope": scope
}
try:
auth_response = requests.get(realm, params=auth_params, timeout=10)
auth_response.raise_for_status()
return auth_response.json().get("token")
except requests.exceptions.RequestException as e:
print(f"Error: Failed to get auth token from {realm}. Details: {e}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Failed to decode JSON response from auth server: {auth_response.text}", file=sys.stderr)
sys.exit(1)
def get_manifest(registry, repository, reference, token):
"""
Fetches the image manifest from the registry.
"""
manifest_url = f"https://{registry}/v2/{repository}/manifests/{reference}"
headers = {
# Request multiple manifest types, the registry will return the correct one
"Accept": "application/vnd.oci.image.index.v1+json, application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json"
}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
response = requests.get(manifest_url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401 and not token:
print("Error: Received 401 Unauthorized. Attempting to get a token...", file=sys.stderr)
# The initial probe might have passed, but manifest access requires auth.
# We re-run the token acquisition logic.
new_token = get_auth_token(registry, repository)
if new_token:
return get_manifest(registry, repository, reference, new_token)
print(f"Error: Failed to fetch manifest from {manifest_url}. Status: {e.response.status_code}", file=sys.stderr)
print(f"Response: {e.response.text}", file=sys.stderr)
sys.exit(1)
except requests.exceptions.RequestException as e:
print(f"Error: A network error occurred. Details: {e}", file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description="Fetch an OCI/Docker image manifest from a container registry.",
epilog="""Examples:
python get_manifest.py ubuntu:22.04
python get_manifest.py quay.io/brancz/kube-rbac-proxy:v0.18.1 -o manifest.json
python get_manifest.py gcr.io/google-containers/pause:3.9""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("image_name", help="Full name of the container image (e.g., 'ubuntu:latest' or 'quay.io/prometheus/node-exporter:v1.7.0')")
parser.add_argument("-o", "--output-file", help="Optional. Path to write the final manifest JSON to. If not provided, prints to stdout.")
args = parser.parse_args()
registry, repository, reference = parse_image_name(args.image_name)
# Suppress informational prints if writing to a file
verbose_print = print if not args.output_file else lambda *a, **k: None
verbose_print(f"Registry: {registry}")
verbose_print(f"Repository: {repository}")
verbose_print(f"Reference: {reference}", end='\n\n', flush=True)
token = get_auth_token(registry, repository)
if not token and not args.output_file:
print("No authentication token needed or could be retrieved. Proceeding without token...", file=sys.stderr)
manifest = get_manifest(registry, repository, reference, token)
final_manifest = None
media_type = manifest.get("mediaType", "")
if "manifest.list" in media_type or "image.index" in media_type:
verbose_print("Detected a multi-platform image index. Finding manifest for current architecture...")
system_arch = platform.machine()
arch_map = {"x86_64": "amd64", "aarch64": "arm64"}
target_arch = arch_map.get(system_arch, system_arch)
verbose_print(f"System architecture: {system_arch} -> Target: linux/{target_arch}")
target_digest = None
for m in manifest.get("manifests", []):
plat = m.get("platform", {})
if plat.get("os") == "linux" and plat.get("architecture") == target_arch:
target_digest = m.get("digest")
break
if target_digest:
verbose_print(f"Found manifest for linux/{target_arch} with digest: {target_digest}\n")
final_manifest = get_manifest(registry, repository, target_digest, token)
else:
print(f"Error: Could not find a manifest for 'linux/{target_arch}' in the index.", file=sys.stderr)
if not args.output_file:
print("Available platforms:", file=sys.stderr)
for m in manifest.get("manifests", []):
print(f" - {m.get('platform', {}).get('os')}/{m.get('platform', {}).get('architecture')}", file=sys.stderr)
sys.exit(1)
else:
final_manifest = manifest
if final_manifest:
if args.output_file:
try:
with open(args.output_file, 'w') as f:
json.dump(final_manifest, f, indent=2)
print(f"Successfully wrote manifest to {args.output_file}")
except IOError as e:
print(f"Error: Could not write to file {args.output_file}. Details: {e}", file=sys.stderr)
sys.exit(1)
else:
print(json.dumps(final_manifest, indent=2))
if __name__ == "__main__":
main()

View File

@@ -74,6 +74,6 @@ echo "packaging launcher ..."
run_cmd "cp -rf framework/bfl/.olares/config/launcher ${DIST}/wizard/config/"
echo "packaging gpu ..."
run_cmd "cp -rf framework/gpu/.olares/config/gpu ${DIST}/wizard/config/"
run_cmd "cp -rf infrastructure/gpu/.olares/config/gpu ${DIST}/wizard/config/"
echo "packaging completed"

View File

@@ -23,26 +23,28 @@ while read line; do
continue
fi
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
filename=$(echo "$line"|awk -F"," '{print $1}')
echo "if exists $filename ... "
name=$(echo -n "$filename"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
echo "if exists $filename ... "
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz)
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name)
if [ $code -eq 403 ]; then
bash ${BASE_DIR}/download-deps.sh $PLATFORM $line
if [ $? -ne 0 ]; then
exit -1
fi
md5sum $name > $checksum
backup_file=$(awk '{print $1}' $checksum)
if [ x"$backup_file" == x"" ]; then
echo "invalid checksum"
exit 1
fi
set -ex
aws s3 cp $name s3://terminus-os-install/$path$name --acl=public-read
aws s3 cp $name s3://terminus-os-install/backup/$path$backup_file --acl=public-read

View File

@@ -10,14 +10,14 @@ cat $1|while read image; do
echo "if exists $image ... "
name=$(echo -n "$image"|md5sum|awk '{print $1}')
checksum="$name.checksum.txt"
manifest="$name.manifest.json"
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$name.tar.gz)
if [ $code -eq 403 ]; then
set -ex
docker pull $image
docker save $image -o $name.tar
skopeo copy --insecure-policy docker://$image oci-archive:$name.tar
gzip $name.tar
md5sum $name.tar.gz > $checksum
@@ -50,8 +50,7 @@ cat $1|while read image; do
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$checksum)
if [ $code -eq 403 ]; then
set -ex
docker pull $image
docker save $image -o $name.tar
skopeo copy --insecure-policy docker://$image oci-archive:$name.tar
gzip $name.tar
md5sum $name.tar.gz > $checksum
@@ -68,48 +67,29 @@ cat $1|while read image; do
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image"
echo "failed to check image checksum"
exit -1
fi
fi
fi
# upload to tencent cloud cos
# curl -fsSLI https://cdn.joinolares.cn/$path$name.tar.gz > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# # re-upload checksum.txt
# curl -fsSLI https://cdn.joinolares.cn/$path$checksum > /dev/null
# if [ $? -ne 0 ]; then
# set -e
# docker pull $image
# docker save $image -o $name.tar
# gzip $name.tar
# md5sum $name.tar.gz > $checksum
# coscmd upload ./$name.tar.gz /$path$name.tar.gz
# coscmd upload ./$checksum /$path$checksum
# echo "upload $name to cos completed"
# set +e
# fi
# upload manifest.json
curl -fsSLI https://dc3p1870nn3cj.cloudfront.net/$path$manifest > /dev/null
if [ $? -ne 0 ]; then
code=$(curl -o /dev/null -fsSLI -w "%{http_code}" https://dc3p1870nn3cj.cloudfront.net/$path$manifest)
if [ $code -eq 403 ]; then
set -ex
BASE_DIR=$(dirname $(realpath -s $0))
python3 $BASE_DIR/get-manifest.py $image -o $manifest
aws s3 cp $manifest s3://terminus-os-install/$path$manifest --acl=public-read
echo "upload $name manifest completed"
set +ex
else
if [ $code -ne 200 ]; then
echo "failed to check image manifest"
exit -1
fi
fi
fi
done

View File

@@ -17,12 +17,24 @@ builds:
ignore:
- goos: darwin
goarch: arm
- goos: darwin
goarch: amd64
- goos: windows
goarch: arm
- goos: windows
goarch: arm64
ldflags:
- -s
- -w
- -X github.com/beclab/Olares/cli/version.VERSION={{ .Version }}
- >-
{{- if index .Env "OLARES_VENDOR_TYPE" }}
-X github.com/beclab/Olares/cli/version.VENDOR={{ .Env.OLARES_VENDOR_TYPE }}
{{- end }}
- >-
{{- if index .Env "OLARES_VENDOR_REPO_PATH" }}
-X github.com/beclab/Olares/cli/version.VENDOR_REPO_PATH={{ .Env.OLARES_VENDOR_REPO_PATH }}
{{- end }}
dist: ./output
archives:
- id: olares-cli

View File

@@ -1 +1,92 @@
# installer
# Olares CLI
This directory contains the code for **olares-cli**, the official command-line interface for administering an **Olares** cluster. It provides a modular, pipeline-based architecture for orchestrating complex system operations. See the full [Olares CLI Documentation](https://docs.olares.com/developer/install/cli-1.12/olares-cli.html) for command reference and tutorials.
Key responsibilities include:
- **Cluster management**: Installing, upgrading, restarting, and maintaining an Olares cluster.
- **Node management**: Adding to or removing nodes from an Olares cluster.
## Execution Model
For most of the commands, `olares-cli` is executed through a four-tier hierarchy:
```
Pipeline ➜ Module ➜ Task ➜ Action
````
### Example: `install-olares` Pipeline
```text
Pipeline: Install Olares
├── ...other modules
└── Module: Bootstrap OS
├── ...other tasks
├── Task: Check Prerequisites
│ └── Action: run-precheck.sh
└── Task: Configure System
└── Action: apply-sysctl
````
## Repository layout
```text
cli/
├── cmd/ # Cobra command definitions
│ ├── main.go # CLI entry point
│ └── ctl/
│ ├── root.go
│ ├── os/ # OS-level maintenance commands
│ ├── node/ # Cluster node operations
│ └── gpu/ # GPU management
└── pkg/
├── core/
│ ├── action/ # Re-usable action primitives
│ ├── module/ # Module abstractions
│ ├── pipeline/ # Pipeline abstractions
│ └── task/ # Task abstractions
└── pipelines/ # Pre-built pipelines
│ ├── ... # actual modules and tasks for various commands and components
```
## Build from source
### Prerequisites
* **Go 1.24+**
* **GoReleaser** (optional, for cross-compiling and packaging)
### Sample commands
```bash
# Clone the repo and enter the CLI folder
cd cli
# 1) Build for the host OS/ARCH
go build -o olares-cli ./cmd/main.go
# 2) Cross-compile for Linux amd64 (from macOS, for example)
GOOS=linux GOARCH=amd64 go build -o olares-cli ./cmd/main.go
# 3) Produce multi-platform artifacts (tar.gz, checksums, etc.)
goreleaser release --snapshot --clean
```
---
## Development workflow
### Add a new command
1. Create the command file in `cmd/ctl/<category>/`.
2. Define a pipeline in `pkg/pipelines/`.
3. Implement modules & tasks inside the relevant `pkg/` sub-packages.
### Test your build
1. Upload the self-built `olares-cli` binary to a machine that's running Olares.
2. Replace the existing `olares-cli` binary on the machine using `sudo cp -f olares-cli /usr/local/bin`.
3. Execute arbitrary commands using `olares-cli`

View File

@@ -10,6 +10,7 @@ type CliDownloadWizardOptions struct {
KubeType string
BaseDir string
DownloadCdnUrl string
UrlOverride string
}
func NewCliDownloadWizardOptions() *CliDownloadWizardOptions {
@@ -21,6 +22,7 @@ func (o *CliDownloadWizardOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().StringVarP(&o.BaseDir, "base-dir", "b", "", "Set Olares package base dir, defaults to $HOME/"+cc.DefaultBaseDir)
cmd.Flags().StringVar(&o.KubeType, "kube", "k3s", "Set kube type, e.g., k3s or k8s")
cmd.Flags().StringVar(&o.DownloadCdnUrl, "download-cdn-url", "", "Set the CDN accelerated download address in the format https://example.cdn.com. If not set, the default download address will be used")
cmd.Flags().StringVar(&o.UrlOverride, "url-override", "", "Set another URL for wizard download explicitly")
}
type CliDownloadOptions struct {

View File

@@ -49,7 +49,7 @@ func NewCmdRelease() *cobra.Command {
}
if version == "" {
version = fmt.Sprintf("1.12.0-%s", time.Now().Format("20060102150405"))
version = fmt.Sprintf("1.12.1-%s", time.Now().Format("20060102150405"))
fmt.Printf("--version unspecified, using: %s\n", version)
time.Sleep(1 * time.Second)
}

View File

@@ -1,11 +1,18 @@
package os
import (
"log"
"encoding/json"
"fmt"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/cmd/ctl/options"
"github.com/beclab/Olares/cli/pkg/phase"
"github.com/beclab/Olares/cli/pkg/pipelines"
"github.com/beclab/Olares/cli/pkg/upgrade"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"log"
"os"
"slices"
)
type UpgradeOsOptions struct {
@@ -31,6 +38,49 @@ func NewCmdUpgradeOs() *cobra.Command {
}
o.UpgradeOptions.AddFlags(cmd)
cmd.AddCommand(NewCmdUpgradePrecheck())
cmd.AddCommand(NewCmdGetUpgradePath())
return cmd
}
func NewCmdGetUpgradePath() *cobra.Command {
var baseVersionStr string
var latestFirst bool
cmd := &cobra.Command{
Use: "path",
Short: "Get the upgrade path (required intermediate versions) from base version to the latest upgradable version (as known to this release of olares-cli)",
RunE: func(cmd *cobra.Command, args []string) error {
var baseVersion *semver.Version
var err error
if baseVersionStr == "" {
baseVersionStr, err = phase.GetOlaresVersion()
if err != nil {
return errors.New("failed to get current Olares version, please specify the base version explicitly")
}
}
baseVersion, err = semver.NewVersion(baseVersionStr)
if err != nil {
return fmt.Errorf("invalid base version: %v", err)
}
path, err := upgrade.GetUpgradePathFor(baseVersion, nil)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if latestFirst {
slices.Reverse(path)
}
encoder := json.NewEncoder(cmd.OutOrStdout())
encoder.SetIndent("", " ")
return encoder.Encode(path)
},
}
cmd.Flags().StringVarP(&baseVersionStr, "base-version", "b", baseVersionStr, "base version to be upgraded, defaults to the current Olares version if inside Olares cluster")
cmd.Flags().BoolVar(&latestFirst, "latest-first", true, "sort versions to put recent ones in the front")
return cmd
}

View File

@@ -10,12 +10,22 @@ import (
)
func NewDefaultCommand() *cobra.Command {
var showVendor bool
cmds := &cobra.Command{
Use: "olares-cli",
Short: "Olares Installer",
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
Version: version.VERSION,
Run: func(cmd *cobra.Command, args []string) {
if showVendor {
println(version.VENDOR)
} else {
cmd.Usage()
}
return
},
}
cmds.Flags().BoolVar(&showVendor, "vendor", false, "show the vendor type of olares-cli")
cmds.AddCommand(osinfo.NewCmdInfo())
cmds.AddCommand(os.NewOSCommands()...)

View File

@@ -22,6 +22,7 @@ require (
github.com/containerd/containerd v1.7.27
github.com/decentralized-identity/web5-go v0.25.0
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/distribution/reference v0.6.0
github.com/dominodatalab/os-release v0.0.0-20190522011736-bcdb4a3e3c2f
github.com/go-playground/validator/v10 v10.22.0
github.com/google/uuid v1.6.0
@@ -46,16 +47,16 @@ require (
github.com/stretchr/testify v1.10.0
github.com/syndtr/goleveldb v1.0.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.37.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
golang.org/x/crypto v0.39.0
golang.org/x/term v0.32.0
golang.org/x/text v0.26.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.18.1
k8s.io/api v0.33.0
k8s.io/apimachinery v0.33.0
k8s.io/cli-runtime v0.33.0
k8s.io/client-go v0.33.0
k8s.io/kubectl v0.33.0
helm.sh/helm/v3 v3.18.4
k8s.io/api v0.33.2
k8s.io/apimachinery v0.33.2
k8s.io/cli-runtime v0.33.2
k8s.io/client-go v0.33.2
k8s.io/kubectl v0.33.2
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.21.0
sigs.k8s.io/kustomize/kyaml v0.19.0
@@ -93,7 +94,6 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
@@ -191,9 +191,9 @@ require (
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.28.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/time v0.9.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
@@ -202,13 +202,13 @@ require (
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/apiextensions-apiserver v0.33.0 // indirect
k8s.io/apiserver v0.33.0 // indirect
k8s.io/component-base v0.33.0 // indirect
k8s.io/component-helpers v0.33.0 // indirect
k8s.io/apiextensions-apiserver v0.33.2 // indirect
k8s.io/apiserver v0.33.2 // indirect
k8s.io/component-base v0.33.2 // indirect
k8s.io/component-helpers v0.33.2 // indirect
k8s.io/cri-api v0.27.1 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
oras.land/oras-go/v2 v2.5.0 // indirect
oras.land/oras-go/v2 v2.6.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kustomize/api v0.19.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect

View File

@@ -538,8 +538,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -549,8 +549,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -564,8 +564,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
@@ -575,8 +575,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -602,14 +602,14 @@ golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -622,8 +622,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -678,38 +678,38 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
helm.sh/helm/v3 v3.18.1 h1:qLhXmtqXOHQb0Xv9HJolOLlah8RWbgyzt50xrtTWAlg=
helm.sh/helm/v3 v3.18.1/go.mod h1:43QHS1W97RcoFJRk36ZBhHdTfykqBlJdsWp3yhzdq8w=
helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ=
helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs=
k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
k8s.io/cli-runtime v0.33.0 h1:Lbl/pq/1o8BaIuyn+aVLdEPHVN665tBAXUePs8wjX7c=
k8s.io/cli-runtime v0.33.0/go.mod h1:QcA+r43HeUM9jXFJx7A+yiTPfCooau/iCcP1wQh4NFw=
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY=
k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs=
k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8=
k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8=
k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY=
k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apiserver v0.33.2 h1:KGTRbxn2wJagJowo29kKBp4TchpO1DRO3g+dB/KOJN4=
k8s.io/apiserver v0.33.2/go.mod h1:9qday04wEAMLPWWo9AwqCZSiIn3OYSZacDyu/AcoM/M=
k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y=
k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88=
k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E=
k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo=
k8s.io/component-base v0.33.2 h1:sCCsn9s/dG3ZrQTX/Us0/Sx2R0G5kwa0wbZFYoVp/+0=
k8s.io/component-base v0.33.2/go.mod h1:/41uw9wKzuelhN+u+/C59ixxf4tYQKW7p32ddkYNe2k=
k8s.io/component-helpers v0.33.2 h1:AjCtYzst11NV8ensxV/2LEEXRwctqS7Bs44bje9Qcnw=
k8s.io/component-helpers v0.33.2/go.mod h1:PsPpiCk74n8pGWp1d6kjK/iSKBTyQfIacv02BNkMenU=
k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q=
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kubectl v0.33.0 h1:HiRb1yqibBSCqic4pRZP+viiOBAnIdwYDpzUFejs07g=
k8s.io/kubectl v0.33.0/go.mod h1:gAlGBuS1Jq1fYZ9AjGWbI/5Vk3M/VW2DK4g10Fpyn/0=
k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y=
k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=

View File

@@ -60,7 +60,7 @@ echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
@@ -84,7 +84,7 @@ sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /et
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767,46800-50000@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf

View File

@@ -40,6 +40,7 @@ Description=Timer to renew K8S control plane certificates
[Timer]
OnCalendar=Mon *-*-* 03:00:00
Unit=k8s-certs-renew.service
Persistent=true
[Install]
WantedBy=multi-user.target
`)))

View File

@@ -23,8 +23,8 @@ import (
)
const (
DefaultK8sVersion = "v1.32.2"
DefaultK3sVersion = "v1.32.2-k3s"
DefaultK8sVersion = "v1.33.3"
DefaultK3sVersion = "v1.33.3-k3s"
DefaultKubernetesVersion = ""
DefaultKubeSphereVersion = "v3.3.0"
DefaultTokenMaxAge = 31536000
@@ -265,7 +265,7 @@ const (
CacheAppServicePod = "app_service_pod_name"
CacheAppValues = "app_built_in_values"
CacheCountPodsUsingHostIP = "count_pods_using_host_ip"
CacheCountPodsWaitForRecreation = "count_pods_wait_for_recreation"
CacheUpgradeUsers = "upgrade_users"
CacheUpgradeAdminUser = "upgrade_admin_user"

View File

@@ -73,7 +73,6 @@ type Argument struct {
ImagesDir string `json:"images_dir"`
Namespace string `json:"namespace"`
DeleteCRI bool `json:"delete_cri"`
DeleteCache bool `json:"delete_cache"`
Role string `json:"role"`
Type string `json:"type"`
Kubetype string `json:"kube_type"`
@@ -257,13 +256,14 @@ type Frp struct {
}
func NewArgument() *Argument {
si := connector.GetSystemInfo()
arg := &Argument{
KsEnable: true,
KsVersion: DefaultKubeSphereVersion,
InstallPackages: false,
SKipPushImages: false,
ContainerManager: Containerd,
SystemInfo: connector.GetSystemInfo(),
SystemInfo: si,
Storage: &Storage{
StorageType: ManagedMinIO,
},
@@ -287,6 +287,7 @@ func NewArgument() *Argument {
arg.IsCloudInstance, _ = strconv.ParseBool(os.Getenv(ENV_TERMINUS_IS_CLOUD_VERSION))
arg.PublicNetworkInfo.PubliclyAccessible, _ = strconv.ParseBool(os.Getenv(ENV_PUBLICLY_ACCESSIBLE))
arg.IsOlaresInContainer = os.Getenv("CONTAINER_MODE") == "oic"
si.IsOIC = arg.IsOlaresInContainer
if err := arg.LoadReleaseInfo(); err != nil {
fmt.Printf("error loading release info: %v", err)
@@ -322,10 +323,26 @@ func (a *Argument) SaveReleaseInfo() error {
if a.OlaresVersion == "" {
return errors.New("invalid: empty olares version")
}
releaseInfoMap := map[string]string{
ENV_OLARES_BASE_DIR: a.BaseDir,
ENV_OLARES_VERSION: a.OlaresVersion,
}
if a.User != nil && a.User.UserName != "" && a.User.DomainName != "" {
releaseInfoMap["OLARES_NAME"] = fmt.Sprintf("%s@%s", a.User.UserName, a.User.DomainName)
} else {
if util.IsExist(OlaresReleaseFile) {
// if the user is not set, try to load the user name from the release file
envs, err := godotenv.Read(OlaresReleaseFile)
if err == nil {
if userName, ok := envs["OLARES_NAME"]; ok {
releaseInfoMap["OLARES_NAME"] = userName
}
}
}
}
if !util.IsExist(filepath.Dir(OlaresReleaseFile)) {
if err := os.MkdirAll(filepath.Dir(OlaresReleaseFile), 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %v", filepath.Dir(OlaresReleaseFile), err)
@@ -395,10 +412,6 @@ func (a *Argument) SetRegistryMirrors(registryMirrors string) {
a.RegistryMirrors = registryMirrors
}
func (a *Argument) SetDeleteCache(deleteCache bool) {
a.DeleteCache = deleteCache
}
func (a *Argument) SetDeleteCRI(deleteCRI bool) {
a.DeleteCRI = deleteCRI
}

View File

@@ -1,17 +1,16 @@
package common
const (
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringFederated = "kubesphere-monitoring-federated"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
NamespaceDefault = "default"
NamespaceKubeNodeLease = "kube-node-lease"
NamespaceKubePublic = "kube-public"
NamespaceKubeSystem = "kube-system"
NamespaceKubekeySystem = "kubekey-system"
NamespaceKubesphereControlsSystem = "kubesphere-controls-system"
NamespaceKubesphereMonitoringSystem = "kubesphere-monitoring-system"
NamespaceKubesphereSystem = "kubesphere-system"
NamespaceOsFramework = "os-framework"
NamespaceOsPlatform = "os-platform"
ChartNameRedis = "redis"
ChartNameSnapshotController = "snapshot-controller"

View File

@@ -404,3 +404,14 @@ func (t *KillContainerdProcess) Execute(runtime connector.Runtime) error {
return nil
}
type RestartContainerd struct {
common.KubeAction
}
func (t *RestartContainerd) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("systemctl restart containerd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to restart containerd")
}
return nil
}

View File

@@ -29,151 +29,27 @@ root = {{ .DataRoot }}
{{ else }}
root = "/var/lib/containerd"
{{- end }}
state = "/run/containerd"
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[ttrpc]
address = ""
uid = 0
gid = 0
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
sandbox_image = "{{ .SandBoxImage }}"
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "{{ .FsType }}"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
runtime_type = 'io.containerd.runc.v2'
sandboxer = 'podsandbox'
snapshotter = "{{ .FsType }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
{{- if .Mirrors }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
@@ -201,84 +77,6 @@ state = "/run/containerd"
{{- end}}
{{- end}}
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = "{{ .ZfsRootPath }}"
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
`)))

View File

@@ -114,6 +114,7 @@ type SystemInfo struct {
LocalIp string `json:"local_ip"`
NatGateway string `json:"nat_gateway"`
PkgManager string `json:"pkg_manager"`
IsOIC bool `json:"is_oic,omitempty"`
}
func (s *SystemInfo) IsSupport() error {
@@ -217,7 +218,7 @@ func (s *SystemInfo) IsPveOrPveLxc() bool {
}
func (s *SystemInfo) IsWsl() bool {
return s.HostInfo.OsPlatform == common.WSL
return s.HostInfo.OsPlatform == common.WSL && !s.IsOIC
}
func (s *SystemInfo) IsRaspbian() bool {

View File

@@ -344,7 +344,7 @@ func Untar(src, dst string) error {
}
}
file, err := os.OpenFile(dstPath, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode))
file, err := os.OpenFile(dstPath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(hdr.Mode))
if err != nil {
return err
}

View File

@@ -92,6 +92,12 @@ func (i *InstallTerminusdBinaryModule) Init() {
i.Name = "InstallOlaresdBinaryModule"
i.Desc = "Install olaresd"
updateHost := &task.LocalTask{
Name: "UpdateHosts",
Action: new(terminus.UpdateKubeKeyHosts),
Prepare: new(HostnameNotResolvable),
}
install := &task.RemoteTask{
Name: "InstallOlaresdBinary",
Desc: "Install olaresd using binary",
@@ -134,6 +140,7 @@ func (i *InstallTerminusdBinaryModule) Init() {
}
i.Tasks = []task.Interface{
updateHost,
install,
generateEnv,
generateService,

View File

@@ -0,0 +1,21 @@
package daemon
import (
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"net"
)
type HostnameNotResolvable struct {
common.KubePrepare
}
func (p *HostnameNotResolvable) PreCheck(runtime connector.Runtime) (bool, error) {
ips, _ := net.LookupIP(runtime.GetSystemInfo().GetHostname())
for _, ip := range ips {
if ip.To4() != nil && ip.To4().String() == runtime.GetSystemInfo().GetLocalIp() {
return false, nil
}
}
return true, nil
}

View File

@@ -133,8 +133,11 @@ type DisableTerminusdService struct {
}
func (s *DisableTerminusdService) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
stdout, _ := runtime.GetRunner().SudoCmd("systemctl is-active olaresd", false, false)
if stdout == "active" {
if _, err := runtime.GetRunner().SudoCmd("systemctl disable --now olaresd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "disable olaresd failed")
}
}
return nil
}
@@ -144,10 +147,18 @@ type UninstallTerminusd struct {
}
func (r *UninstallTerminusd) Execute(runtime connector.Runtime) error {
var olaresdFiles []string
svcpath := filepath.Join("/etc/systemd/system", templates.TerminusdService.Name())
svcenvpath := filepath.Join("/etc/systemd/system", templates.TerminusdEnv.Name())
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s && rm -rf %s && rm -rf /usr/local/bin/olaresd", svcpath, svcenvpath), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
binPath := "/usr/local/bin/olaresd"
olaresdFiles = append(olaresdFiles, svcpath, svcenvpath, binPath)
for _, pidFile := range []string{"installing.pid", "changingip.pid"} {
olaresdFiles = append(olaresdFiles, filepath.Join(runtime.GetBaseDir(), pidFile))
}
for _, f := range olaresdFiles {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s", f), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "remove olaresd failed")
}
}
return nil
}

View File

@@ -21,9 +21,11 @@ import (
_ "compress/bzip2"
"context"
"fmt"
cliversion "github.com/beclab/Olares/cli/version"
"io"
"math"
"net/http"
"net/url"
"os"
"os/exec"
"path"
@@ -330,7 +332,7 @@ func NewKubeBinary(name, arch, osType, osVersion, osPlatformFamily, version, pre
case installwizard:
component.Type = WIZARD
component.FileName = fmt.Sprintf("install-wizard-v%s.tar.gz", version)
component.Url = fmt.Sprintf("%s/install-wizard-v%s.tar.gz", component.getDownloadMirrors(downloadMirrors), version)
component.Url, _ = url.JoinPath(component.getDownloadMirrors(downloadMirrors), cliversion.VENDOR_REPO_PATH, fmt.Sprintf("install-wizard-v%s.tar.gz", version))
component.CheckSum = false
component.BaseDir = filepath.Join(prePath, fmt.Sprintf("v%s", version))
case cudakeyring: // + gpu

View File

@@ -1,6 +1,7 @@
package gpu
import (
"github.com/beclab/Olares/cli/pkg/container"
"time"
"github.com/beclab/Olares/cli/pkg/common"
@@ -174,7 +175,7 @@ func (m *RestartContainerdModule) Init() {
Prepare: &prepare.PrepareCollection{
new(ContainerdInstalled),
},
Action: new(RestartContainerd),
Action: new(container.RestartContainerd),
Parallel: false,
Retry: 1,
}
@@ -263,30 +264,25 @@ type NodeLabelingModule struct {
func (l *NodeLabelingModule) Init() {
l.Name = "NodeLabeling"
updateNode := &task.RemoteTask{
Name: "UpdateNode",
Hosts: l.Runtime.GetHostsByRole(common.Master),
updateNode := &task.LocalTask{
Name: "UpdateNode",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(UpdateNodeLabels),
Parallel: false,
Retry: 1,
Action: new(UpdateNodeLabels),
Retry: 1,
}
restartPlugin := &task.RemoteTask{
Name: "RestartPlugin",
Hosts: l.Runtime.GetHostsByRole(common.Master),
restartPlugin := &task.LocalTask{
Name: "RestartPlugin",
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(CudaInstalled),
new(K8sNodeInstalled),
},
Action: new(RestartPlugin),
Parallel: false,
Retry: 1,
Action: new(RestartPlugin),
Retry: 1,
}
l.Tasks = []task.Interface{

View File

@@ -290,24 +290,13 @@ type ConfigureContainerdRuntime struct {
}
func (t *ConfigureContainerdRuntime) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("nvidia-ctk runtime configure --runtime=containerd --set-as-default --config-source=command", false, true); err != nil {
if _, err := runtime.GetRunner().SudoCmd("nvidia-ctk runtime configure --runtime=containerd --set-as-default --config-source=file", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to nvidia-ctk runtime configure")
}
return nil
}
type RestartContainerd struct {
common.KubeAction
}
func (t *RestartContainerd) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("systemctl restart containerd", false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to restart containerd")
}
return nil
}
type InstallPlugin struct {
common.KubeAction
}

View File

@@ -2,20 +2,19 @@ package images
import (
"fmt"
"github.com/distribution/reference"
"os"
"path/filepath"
"strings"
"time"
"github.com/containerd/containerd/pkg/cri/labels"
"github.com/containerd/containerd/reference/docker"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/cache"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/manifest"
"github.com/beclab/Olares/cli/pkg/utils"
"github.com/containerd/containerd/pkg/cri/labels"
)
const MAX_IMPORT_RETRY int = 5
@@ -120,8 +119,13 @@ func (t *LoadImages) Execute(runtime connector.Runtime) (reserr error) {
case "crio":
loadCmd = "ctr" // not implement
case "containerd":
parsedRef, err := reference.ParseNormalizedNamed(imageRepoTag)
if err != nil {
logger.Warnf("parse image name %s error: %v, skip importing", imageRepoTag, err)
continue
}
if HasSuffixI(imgFileName, ".tar.gz", ".tgz") {
loadCmd = fmt.Sprintf("gunzip -c %s | ctr -n k8s.io images import %s -", imageFileName, loadParm)
loadCmd = fmt.Sprintf("gunzip -c %s | ctr -n k8s.io images import --index-name %s %s -", imageFileName, parsedRef, loadParm)
} else {
loadCmd = fmt.Sprintf("ctr -n k8s.io images import %s %s", imageFileName, loadParm)
}
@@ -157,12 +161,15 @@ func (a *PinImages) Execute(runtime connector.Runtime) error {
return nil
}
for _, ref := range manifests {
parsedRef, err := docker.ParseNormalizedNamed(ref)
parsedRef, err := reference.ParseNormalizedNamed(ref)
if err != nil {
logger.Warnf("parse image name %s error: %v, skip pinning", ref, err)
continue
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("ctr -n k8s.io i label %s %s=%s", parsedRef.String(), labels.PinnedImageLabelKey, labels.PinnedImageLabelValue), false, false); err != nil {
if strings.Contains(err.Error(), "DEPRECATION") {
continue
}
// tolerate cases where some images are not found
// e.g., like in the cloud environment and some images are not in the ami
logger.Warnf("pin image %s error: %v", parsedRef.String(), err)

View File

@@ -195,11 +195,13 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
defaultKubeletArs := map[string]string{
"kube-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"system-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"eviction-hard": "memory.available<5%,nodefs.available<10%",
"eviction-hard": "memory.available<5%,nodefs.available<10%,imagefs.available<10%",
"config": "/etc/rancher/k3s/kubelet.config",
"containerd": container.DefaultContainerdCRISocket,
"cgroup-driver": "systemd",
"runtime-request-timeout": "5m",
"image-gc-high-threshold": "91",
"image-gc-low-threshold": "90",
}
defaultKubeProxyArgs := map[string]string{
"proxy-mode": "ipvs",

View File

@@ -42,5 +42,6 @@ K3S_KUBECONFIG_MODE=644
K3S_TOKEN={{ .Token }}
{{ end }}
GODEBUG=netdns=go
CATTLE_NEW_SIGNED_CERT_EXPIRATION_DAYS=36500
`)))

View File

@@ -307,6 +307,8 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
"evictionPressureTransitionPeriod": "30s",
"featureGates": FeatureGatesDefaultConfiguration,
"runtimeRequestTimeout": "5m",
"imageGCHighThresholdPercent": 91,
"imageGCLowThresholdPercent": 90,
}
if securityEnhancement {

View File

@@ -47,24 +47,6 @@ func (m *DeleteKubeSphereCachesModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCache"
deleteCache := &task.LocalTask{
Name: "DeleteCache",
Prepare: new(ShouldDeleteCache),
Action: new(DeleteCache),
}
m.Tasks = []task.Interface{
deleteCache,
}
}
type DeployModule struct {
common.KubeModule
Skip bool

File diff suppressed because one or more lines are too long

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -32,7 +32,7 @@ spec:
- command:
- ks-apiserver
- --logtostderr=true
image: beclab/ks-apiserver:0.0.20
image: beclab/ks-apiserver:0.0.21
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-apiserver
ports:

View File

@@ -1,121 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
strategy:
rollingUpdate:
maxSurge: 0
type: RollingUpdate
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
template:
metadata:
labels:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- command:
- controller-manager
- --logtostderr=true
- --leader-elect=false
image: beclab/ks-controller-manager:0.0.20
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: ks-controller-manager
ports:
- containerPort: 8080
protocol: TCP
resources:
{{- toYaml .Values.controller.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/kubesphere/
name: kubesphere-config
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.controller.extraVolumeMounts }}
{{- toYaml .Values.controller.extraVolumeMounts | nindent 8 }}
{{- end }}
env:
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
terminationGracePeriodSeconds: 30
volumes:
- name: kubesphere-config
configMap:
name: kubesphere-config
defaultMode: 420
- hostPath:
path: /etc/localtime
type: ""
name: host-time
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 6 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-controller-manager
namespaces:
- kubesphere-system
{{- with .Values.nodeAffinity }}
nodeAffinity:
{{ toYaml . | indent 10 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ks-controller-manager
tier: backend
version: {{ .Chart.AppVersion }}
name: ks-controller-manager
spec:
ports:
- port: 443
protocol: TCP
targetPort: 8443
selector:
app: ks-controller-manager
tier: backend
# version: {{ .Chart.AppVersion }}
sessionAffinity: None
type: ClusterIP

View File

@@ -4,8 +4,6 @@
image:
# Overrides the image tag whose default is the chart appVersion.
ks_controller_manager_repo: kubesphere/ks-controller-manager
ks_controller_manager_tag: "v3.3.0"
ks_apiserver_repo: beclab/ks-apiserver
ks_apiserver_tag: "v3.3.0-ext-3"

View File

@@ -748,12 +748,12 @@ spec:
sum (node_cpu_seconds_total{job="node-exporter", mode=~"user|nice|system|iowait|irq|softirq"}) by (cpu, instance, job, namespace, pod)
record: node_cpu_used_seconds_total
- expr: |
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, host_ip, role, pod)
max(kube_pod_info{job="kube-state-metrics"} * on(node) group_left(role) kube_node_role{job="kube-state-metrics", role="master"} or on(pod, namespace) kube_pod_info{job="kube-state-metrics"}) by (node, namespace, role, pod)
record: 'node_namespace_pod:kube_pod_info:'
- expr: |
count by (node, host_ip, role) (sum by (node, cpu, host_ip, role) (
count by (node, role) (sum by (node, cpu, role) (
node_cpu_seconds_total{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
))
record: node:node_num_cpu:sum
@@ -761,27 +761,27 @@ spec:
avg(irate(node_cpu_used_seconds_total{job="node-exporter"}[5m]))
record: :node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_used_seconds_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"user"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_user_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"system"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_system_cpu_utilisation:avg1m
- expr: |
avg by (node, host_ip, role) (
avg by (node, role) (
irate(node_cpu_seconds_total{job="node-exporter",mode=~"iowait"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:)
record: node:node_iowait_cpu_utilisation:avg1m
- expr: |
@@ -806,9 +806,9 @@ spec:
label_replace(node_memory_Cached_bytes, "node", "$1", "instance", "(.*)")
record: node:node_memory_Cached_bytes
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_Slab_bytes{job="node-exporter"} + node_memory_KernelStack_bytes{job="node-exporter"} + node_memory_PageTables_bytes{job="node-exporter"}+ node_memory_HardwareCorrupted_bytes{job="node-exporter"}+node_memory_Bounce_bytes{job="node-exporter"}-node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_system_reserved
@@ -825,16 +825,16 @@ spec:
sum(node_memory_MemTotal_bytes{job="node-exporter"})
record: ':node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"} + node_memory_SReclaimable_bytes{job="node-exporter"})
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_available:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
node_memory_MemTotal_bytes{job="node-exporter"}
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_memory_bytes_total:sum
@@ -842,30 +842,30 @@ spec:
1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum)
record: 'node:node_memory_utilisation:'
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_reads_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_reads:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_writes_completed_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_iops_writes:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_read_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_read:sum
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_disk_written_bytes_total{job="node-exporter"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:data_volume_throughput_bytes_written:sum
@@ -874,74 +874,74 @@ spec:
sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
record: :node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m]) +
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m]))
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_utilisation:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_transmitted:sum_irate
- expr: |
sum by (node, host_ip, role) (
sum by (node, role) (
irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[5m])
* on (namespace, pod) group_left(node, host_ip, role)
* on (namespace, pod) group_left(node, role)
node_namespace_pod:kube_pod_info:
)
record: node:node_net_bytes_received:sum_irate
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_total:'
- expr: |
sum by(node, host_ip, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by(node, role) (sum(max(node_filesystem_files_free{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, pod, namespace)) by (pod, namespace) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: 'node:node_inodes_free:'
- expr: |
sum by (node, host_ip, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load1{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load1:ratio
- expr: |
sum by (node, host_ip, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load5{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load5:ratio
- expr: |
sum by (node, host_ip, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
sum by (node, role) (node_load15{job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) / node:node_num_cpu:sum
record: node:load15:ratio
- expr: |
sum by (node, host_ip, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:)
sum by (node, role) ((kube_pod_status_scheduled{job="kube-state-metrics", condition="true"} > 0) * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:)
record: node:pod_count:sum
- expr: |
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(host_ip, role) max by(node, host_ip, role) (node_namespace_pod:kube_pod_info:{node!="",host_ip!=""}))
(sum(kube_node_status_capacity{resource="pods", job="kube-state-metrics"}) by (node) * on(node) group_left(role) max by(node, role) (node_namespace_pod:kube_pod_info:{node!=""}))
record: node:pod_capacity:sum
- expr: |
node:pod_running:count / node:pod_capacity:sum
record: node:pod_utilization:ratio
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node, role)
record: node:pod_running:count
- expr: |
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info: unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node, role)
record: node:pod_succeeded:count
- expr: |
count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, host_ip, role)
count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0) by (node, role)
record: node:pod_abnormal:count
- expr: |
(count by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"} unless on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Succeeded"} > 0) unless on(pod, namespace, cluster) ((kube_pod_status_ready{condition="true",job="kube-state-metrics"} > 0) and on(pod, namespace, cluster) (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} > 0)) unless on(pod, namespace, cluster) kube_pod_container_status_waiting_reason{job="kube-state-metrics",reason="ContainerCreating"} > 0) or on(namespace, cluster) (group by(namespace, cluster) (kube_pod_info{job="kube-state-metrics"}) * 0)) * on(namespace, cluster) group_left(user) (kube_namespace_labels{job="kube-state-metrics"}) > 0
record: user:pod_abnormal:count
- expr: |
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
node:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: node:pod_abnormal:ratio
- expr: |
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!="",host_ip!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, host_ip, role)
user:pod_abnormal:count / count(node_namespace_pod:kube_pod_info:{node!=""} unless on (pod, namespace) kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) by (node, role)
record: user:pod_abnormal:ratio
- expr: |
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: 'node:disk_space_available:'
- expr: |
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, host_ip, role) node_namespace_pod:kube_pod_info:) by (device, node, host_ip, role)) by (node, host_ip, role)
1- sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role) / sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node, role) node_namespace_pod:kube_pod_info:) by (device, node, role)) by (node, role)
record: node:disk_space_utilization:ratio
- expr: |
(1 - (node:node_inodes_free: / node:node_inodes_total:))

View File

@@ -42,7 +42,7 @@ spec:
- --collector.netdev.address-info
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: beclab/node-exporter:0.0.2
image: beclab/node-exporter:0.0.4
name: node-exporter
securityContext:
privileged: true

View File

@@ -58,12 +58,12 @@ var kscorecrds = []map[string]string{
"resource": "default-http-backend",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "secrets",
"resource": "ks-controller-manager-webhook-cert",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "secrets",
// "resource": "ks-controller-manager-webhook-cert",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "serviceaccounts",
@@ -100,24 +100,24 @@ var kscorecrds = []map[string]string{
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "services",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "services",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-apiserver",
"release": "ks-core",
},
{
"ns": "kubesphere-system",
"kind": "deployments",
"resource": "ks-controller-manager",
"release": "ks-core",
},
//{
// "ns": "kubesphere-system",
// "kind": "deployments",
// "resource": "ks-controller-manager",
// "release": "ks-core",
//},
//{
// "ns": "kubesphere-system",
// "kind": "validatingwebhookconfigurations",

View File

@@ -65,7 +65,7 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
kubectlpath = path.Join(common.BinDir, common.CommandKubectl)
}
for _, ns := range []string{common.NamespaceKubesphereControlsSystem, common.NamespaceKubesphereMonitoringFederated} {
for _, ns := range []string{common.NamespaceKubesphereControlsSystem} {
if stdout, err := runtime.GetRunner().Cmd(fmt.Sprintf("%s create ns %s", kubectlpath, ns), false, true); err != nil {
if !strings.Contains(stdout, "already exists") {
logger.Errorf("create ns %s failed: %v", ns, err)
@@ -98,8 +98,6 @@ func (t *InitNamespace) Execute(runtime connector.Runtime) error {
common.NamespaceKubeSystem,
common.NamespaceKubekeySystem,
common.NamespaceKubesphereControlsSystem,
common.NamespaceKubesphereMonitoringFederated,
common.NamespaceKubesphereMonitoringSystem,
common.NamespaceKubesphereSystem,
}

View File

@@ -23,17 +23,6 @@ import (
versionutil "k8s.io/apimachinery/pkg/util/version"
)
type ShouldDeleteCache struct {
common.KubePrepare
}
func (p *ShouldDeleteCache) PreCheck(runtime connector.Runtime) (bool, error) {
if p.KubeConf.Arg.DeleteCache {
return true, nil
}
return false, nil
}
type VersionBelowV3 struct {
common.KubePrepare
}

View File

@@ -52,19 +52,6 @@ func (d *DeleteKubeSphereCaches) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCache struct {
common.KubeAction
}
func (t *DeleteCache) Execute(runtime connector.Runtime) error {
// var cacheDir = path.Join(runtime.GetBaseDir(), cc.ImagesDir)
// if err := util.RemoveDir(cacheDir); err != nil {
// return err
// }
// logger.Debugf("delete caches success")
return nil
}
type AddInstallerConfig struct {
common.KubeAction
}
@@ -368,7 +355,7 @@ func (c *Check) Execute(runtime connector.Runtime) error {
return fmt.Errorf("kubectl not found")
}
var labels = []string{"app=ks-apiserver", "app=ks-controller-manager"}
var labels = []string{"app=ks-apiserver"}
for _, label := range labels {
var cmd = fmt.Sprintf("%s get pod -n %s -l '%s' -o jsonpath='{.items[0].status.phase}'", kubectlpath, common.NamespaceKubesphereSystem, label)

View File

@@ -6,6 +6,7 @@ import (
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/module"
"github.com/beclab/Olares/cli/pkg/core/pipeline"
"github.com/beclab/Olares/cli/pkg/gpu"
"github.com/beclab/Olares/cli/pkg/k3s"
"github.com/beclab/Olares/cli/pkg/kubernetes"
"github.com/beclab/Olares/cli/pkg/manifest"
@@ -75,6 +76,7 @@ func (m *AddNodeModule) Init() {
&k3s.JoinNodesModule{},
}
}
m.underlyingModules = append(m.underlyingModules, &gpu.NodeLabelingModule{})
for _, underlyingModule := range m.underlyingModules {
underlyingModule.Default(m.Runtime, m.PipelineCache, m.ModuleCache)
underlyingModule.AutoAssert()

View File

@@ -105,6 +105,7 @@ func (p *phaseBuilder) phaseInstall() *phaseBuilder {
&certs.UninstallCertsFilesModule{},
&storage.DeleteUserDataModule{},
&terminus.DeleteWizardFilesModule{},
&terminus.DeleteUpgradeFilesModule{},
&storage.RemoveJuiceFSModule{},
&storage.DeletePhaseFlagModule{
PhaseFile: common.TerminusStateFileInstalled,
@@ -132,33 +133,13 @@ func (p *phaseBuilder) phasePrepare() *phaseBuilder {
PhaseFile: common.TerminusStateFilePrepared,
BaseDir: p.runtime.GetBaseDir(),
},
&daemon.UninstallTerminusdModule{},
&terminus.RemoveReleaseFileModule{},
)
}
return p
}
func (p *phaseBuilder) phaseDownload() *phaseBuilder {
terminusdAction := &daemon.CheckTerminusdService{}
err := terminusdAction.Execute()
if p.convert() >= PhaseDownload {
if err == nil {
p.modules = append(p.modules, &daemon.UninstallTerminusdModule{})
}
p.modules = append(p.modules,
&kubesphere.DeleteCacheModule{},
)
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &storage.DeleteCacheModule{
BaseDir: p.runtime.GetBaseDir(),
})
}
}
return p
}
func (p *phaseBuilder) phaseMacos() {
p.modules = []module.Module{
&precheck.GreetingsModule{},
@@ -168,9 +149,6 @@ func (p *phaseBuilder) phaseMacos() {
}
if p.convert() >= PhaseDownload {
p.modules = append(p.modules, &kubesphere.DeleteKubeSphereCachesModule{})
if p.runtime.Arg.DeleteCache {
p.modules = append(p.modules, &kubesphere.DeleteCacheModule{})
}
}
}
@@ -189,8 +167,7 @@ func UninstallTerminus(phase string, runtime *common.KubeRuntime) pipeline.Pipel
builder.
phaseInstall().
phaseStorage().
phasePrepare().
phaseDownload()
phasePrepare()
}
return pipeline.Pipeline{

View File

@@ -8,11 +8,11 @@ import (
"github.com/beclab/Olares/cli/pkg/terminus"
)
func NewDownloadWizard(runtime *common.KubeRuntime) *pipeline.Pipeline {
func NewDownloadWizard(runtime *common.KubeRuntime, urlOverride string) *pipeline.Pipeline {
m := []module.Module{
&precheck.GreetingsModule{},
&terminus.InstallWizardDownloadModule{Version: runtime.Arg.OlaresVersion, DownloadCdnUrl: runtime.Arg.DownloadCdnUrl},
&terminus.InstallWizardDownloadModule{Version: runtime.Arg.OlaresVersion, DownloadCdnUrl: runtime.Arg.DownloadCdnUrl, UrlOverride: urlOverride},
}
return &pipeline.Pipeline{

View File

@@ -26,7 +26,7 @@ func DownloadInstallationWizard(opts *options.CliDownloadWizardOptions) error {
return fmt.Errorf("--download-cdn-url invalid")
}
p := download.NewDownloadWizard(runtime)
p := download.NewDownloadWizard(runtime, opts.UrlOverride)
if err := p.Start(); err != nil {
logger.Errorf("download wizard failed %v", err)
return err

View File

@@ -2,11 +2,10 @@ package pipelines
import (
"fmt"
"os"
"path"
"github.com/beclab/Olares/cli/pkg/upgrade"
"github.com/beclab/Olares/cli/pkg/utils"
"os"
"path"
"github.com/beclab/Olares/cli/cmd/ctl/options"
"github.com/beclab/Olares/cli/pkg/common"
@@ -40,9 +39,23 @@ func UpgradeOlaresPipeline(opts *options.UpgradeOptions) error {
return fmt.Errorf("error parsing target Olares version: %v", err)
}
if !targetVersion.GreaterThan(currentVersion) {
fmt.Printf("current version is: %s, no need to upgrade to %s\n", currentVersion.String(), opts.Version)
os.Exit(0)
upgradePath, err := upgrade.GetUpgradePathFor(currentVersion, targetVersion)
if err != nil {
return err
}
if len(upgradePath) > 1 {
fmt.Printf("unable to upgrade from %s to %s directly,\n", currentVersion, targetVersion)
if len(upgradePath) == 2 {
fmt.Printf("please upgrade to %s first!\n", upgradePath[0])
} else {
line := "please upgrade sequentially to:"
for _, u := range upgradePath[:len(upgradePath)-1] {
line += fmt.Sprintf(" %s", u)
}
line += " first!"
fmt.Println(line)
}
os.Exit(1)
}
arg := common.NewArgument()
@@ -59,9 +72,8 @@ func UpgradeOlaresPipeline(opts *options.UpgradeOptions) error {
manifest := path.Join(runtime.GetInstallerDir(), "installation.manifest")
runtime.Arg.SetManifest(manifest)
upgradeModule := &upgrade.UpgradeModule{
CurrentVersion: currentVersion,
TargetVersion: targetVersion,
upgradeModule := &upgrade.Module{
TargetVersion: targetVersion,
}
p := &pipeline.Pipeline{

View File

@@ -65,6 +65,7 @@ data:
health
ready
kubernetes {{ .DNSDomain }} in-addr.arpa ip6.arpa {
endpoint_pod_names
pods insecure
fallthrough in-addr.arpa ip6.arpa
}

View File

@@ -5993,6 +5993,8 @@ spec:
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
- name: FELIX_HEALTHHOST
value: 127.0.0.1
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:

View File

@@ -117,7 +117,7 @@ func (m *Manager) packageLauncher() error {
func (m *Manager) packageGPU() error {
fmt.Println("packaging gpu ...")
return util.CopyDirectory(
filepath.Join(m.olaresRepoRoot, "framework/gpu/.olares/config/gpu"),
filepath.Join(m.olaresRepoRoot, "infrastructure/gpu/.olares/config/gpu"),
filepath.Join(m.distPath, "wizard/config/gpu"),
)
}

View File

@@ -11,6 +11,7 @@ import (
type Builder struct {
olaresRepoRoot string
vendorRepoPath string
distPath string
version string
manifestManager *manifest.Manager
@@ -19,8 +20,13 @@ type Builder struct {
func NewBuilder(olaresRepoRoot, version, cdnURL string, ignoreMissingImages bool) *Builder {
distPath := filepath.Join(olaresRepoRoot, ".dist/install-wizard")
vendorRepoPath := os.Getenv("OLARES_VENDOR_REPO_PATH")
if vendorRepoPath == "" {
vendorRepoPath = "/"
}
return &Builder{
olaresRepoRoot: olaresRepoRoot,
vendorRepoPath: vendorRepoPath,
distPath: distPath,
version: version,
manifestManager: manifest.NewManager(olaresRepoRoot, distPath, cdnURL, ignoreMissingImages),
@@ -68,6 +74,9 @@ func (b *Builder) archive() (string, error) {
if err := util.ReplaceInFile(file, "#__VERSION__", b.version); err != nil {
return "", err
}
if err := util.ReplaceInFile(file, "#__REPO_PATH__", b.vendorRepoPath); err != nil {
return "", err
}
}
tarFile := filepath.Join(b.olaresRepoRoot, fmt.Sprintf("install-wizard-%s.tar.gz", versionStr))

View File

@@ -269,3 +269,14 @@ func getManagedMinIOAccessFlags(localIp string) (string, error) {
return fmt.Sprintf(" --storage minio --bucket http://%s:9000/%s --access-key %s --secret-key %s",
localIp, cc.OlaresDir, MinioRootUser, minioPassword), nil
}
func GetRootFSType() string {
if util.IsExist(JuiceFsServiceFile) {
return "jfs"
}
return "fs"
}
func init() {
common.TerminusGlobalEnvs["OLARES_FS_TYPE"] = GetRootFSType()
}

View File

@@ -214,29 +214,6 @@ func (m *DeletePhaseFlagModule) Init() {
}
}
type DeleteCacheModule struct {
common.KubeModule
BaseDir string
}
func (m *DeleteCacheModule) Init() {
m.Name = "DeleteCaches"
deleteCaches := &task.RemoteTask{
Name: "DeleteCaches",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Action: &DeleteCaches{
BaseDir: m.BaseDir,
},
Parallel: false,
Retry: 1,
}
m.Tasks = []task.Interface{
deleteCaches,
}
}
type DeleteUserDataModule struct {
common.KubeModule
}

View File

@@ -325,38 +325,6 @@ func (t *DeletePhaseFlagFile) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteCaches struct {
common.KubeAction
BaseDir string
}
func (t *DeleteCaches) Execute(runtime connector.Runtime) error {
var cachesDirs []string
filepath.WalkDir(t.BaseDir, func(path string, d fs.DirEntry, err error) error {
if path != t.BaseDir {
if d.IsDir() {
cachesDirs = append(cachesDirs, path)
return filepath.SkipDir
}
}
return nil
},
)
if cachesDirs != nil && len(cachesDirs) > 0 {
for _, cachesDir := range cachesDirs {
if util.IsExist(cachesDir) {
if err := util.RemoveDir(cachesDir); err != nil {
logger.Errorf("remove %s failed %v", cachesDir, err)
}
}
}
}
return nil
}
type DeleteTerminusUserData struct {
common.KubeAction
}

View File

@@ -96,7 +96,7 @@ func (u *PrepareAppValues) Execute(runtime connector.Runtime) error {
if err != nil {
return err
}
fsType := getRootFSType()
fsType := storage.GetRootFSType()
gpuType := getGpuType(u.KubeConf.Arg.GPU.Enable)
appValues := getAppSecrets(getAppPatches())

View File

@@ -33,6 +33,7 @@ type InstallWizardDownloadModule struct {
common.KubeModule
Version string
DownloadCdnUrl string
UrlOverride string
}
func (m *InstallWizardDownloadModule) Init() {
@@ -42,6 +43,7 @@ func (m *InstallWizardDownloadModule) Init() {
Action: &Download{
Version: m.Version,
DownloadCdnUrl: m.DownloadCdnUrl,
UrlOverride: m.UrlOverride,
},
Retry: 1,
}
@@ -199,6 +201,23 @@ func (m *InstalledModule) Init() {
}
}
type DeleteUpgradeFilesModule struct {
common.KubeModule
}
func (d *DeleteUpgradeFilesModule) Init() {
d.Name = "DeleteUpgradeFiles"
deleteUpgradeFiles := &task.LocalTask{
Name: "DeleteUpgradeFiles",
Action: &DeleteUpgradeFiles{},
}
d.Tasks = []task.Interface{
deleteUpgradeFiles,
}
}
type DeleteWizardFilesModule struct {
common.KubeModule
}

View File

@@ -70,7 +70,7 @@ func (t *InstallOsSystem) Execute(runtime connector.Runtime) error {
},
"gpu": getGpuType(t.KubeConf.Arg.GPU.Enable),
"s3_bucket": t.KubeConf.Arg.Storage.StorageBucket,
"fs_type": getRootFSType(),
"fs_type": storage.GetRootFSType(),
common.HelmValuesKeyTerminusGlobalEnvs: common.TerminusGlobalEnvs,
common.HelmValuesKeyOlaresRootFSPath: storage.OlaresRootDir,
}
@@ -86,6 +86,12 @@ func (t *InstallOsSystem) Execute(runtime connector.Runtime) error {
// TODO: wait for the platform to be ready
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
var frameworkPath = path.Join(runtime.GetInstallerDir(), "wizard", "config", "os-framework")
if err := utils.UpgradeCharts(ctx, actionConfig, settings, common.ChartNameOSFramework, frameworkPath, "", common.NamespaceOsFramework, vals, false); err != nil {
return err
@@ -290,13 +296,6 @@ func cloudValue(cloudInstance bool) string {
return ""
}
func getRootFSType() string {
if util.IsExist(storage.JuiceFsServiceFile) {
return "jfs"
}
return "fs"
}
func getRedisPassword(client clientset.Client, runtime connector.Runtime) (string, error) {
secret, err := client.Kubernetes().CoreV1().Secrets(common.NamespaceKubesphereSystem).Get(context.Background(), "redis-secret", metav1.GetOptions{})
if err != nil {

View File

@@ -5,10 +5,12 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"github.com/beclab/Olares/cli/version"
"io/fs"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
@@ -91,23 +93,24 @@ func (t *CheckKeyPodsRunning) Execute(runtime connector.Runtime) error {
logger.Debugf("skipping pod %s that's not on node %s", pod.Name, t.Node)
continue
}
if strings.HasPrefix(pod.Namespace, "user-space") ||
strings.HasPrefix(pod.Namespace, "user-system") ||
pod.Namespace == "os-platform" ||
pod.Namespace == "os-framework" {
if pod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("pod %s/%s is not running", pod.Namespace, pod.Name)
if !strings.HasPrefix(pod.Namespace, "user-") && !strings.HasPrefix(pod.Namespace, "os-") {
continue
}
if pod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("pod %s/%s is not running", pod.Namespace, pod.Name)
}
if len(pod.Status.ContainerStatuses) != len(pod.Spec.Containers) {
return fmt.Errorf("pod %s/%s has not started all containers yet", pod.Namespace, pod.Name)
}
for _, cStatus := range pod.Status.ContainerStatuses {
if cStatus.State.Terminated != nil && cStatus.State.Terminated.ExitCode != 0 {
return fmt.Errorf("container %s in pod %s/%s is terminated", cStatus.Name, pod.Namespace, pod.Name)
}
if len(pod.Status.ContainerStatuses) == 0 {
return fmt.Errorf("pod %s/%s has no container statuses yet", pod.Namespace, pod.Name)
if cStatus.State.Running == nil {
return fmt.Errorf("container %s in pod %s/%s is not running", cStatus.Name, pod.Namespace, pod.Name)
}
for _, cStatus := range pod.Status.ContainerStatuses {
if cStatus.State.Terminated != nil && cStatus.State.Terminated.ExitCode != 0 {
return fmt.Errorf("container %s in pod %s/%s is terminated", cStatus.Name, pod.Namespace, pod.Name)
}
if cStatus.State.Running == nil {
return fmt.Errorf("container %s in pod %s/%s is not running", cStatus.Name, pod.Namespace, pod.Name)
}
if !cStatus.Ready {
return fmt.Errorf("container %s in pod %s/%s is not ready", cStatus.Name, pod.Namespace, pod.Name)
}
}
}
@@ -154,19 +157,14 @@ type Download struct {
Version string
BaseDir string
DownloadCdnUrl string
UrlOverride string
}
func (t *Download) Execute(runtime connector.Runtime) error {
if t.KubeConf.Arg.OlaresVersion == "" {
if t.UrlOverride == "" && t.KubeConf.Arg.OlaresVersion == "" {
return errors.New("unknown version to download")
}
var fetchMd5 = fmt.Sprintf("curl -sSfL %s/install-wizard-v%s.md5sum.txt |awk '{print $1}'", t.DownloadCdnUrl, t.Version)
md5sum, err := runtime.GetRunner().Cmd(fetchMd5, false, false)
if err != nil {
return errors.New("get md5sum failed")
}
var osArch = runtime.GetSystemInfo().GetOsArch()
var osType = runtime.GetSystemInfo().GetOsType()
var osVersion = runtime.GetSystemInfo().GetOsVersion()
@@ -174,8 +172,21 @@ func (t *Download) Execute(runtime connector.Runtime) error {
var baseDir = runtime.GetBaseDir()
var prePath = path.Join(baseDir, "versions")
var wizard = files.NewKubeBinary("install-wizard", osArch, osType, osVersion, osPlatformFamily, t.Version, prePath, t.DownloadCdnUrl)
wizard.CheckMd5Sum = true
wizard.Md5sum = md5sum
if t.UrlOverride == "" {
md5URL, _ := url.JoinPath(t.DownloadCdnUrl, version.VENDOR_REPO_PATH, fmt.Sprintf("install-wizard-v%s.md5sum.txt", t.Version))
var fetchMd5 = fmt.Sprintf("curl -sSfL %s |awk '{print $1}'", md5URL)
md5sum, err := runtime.GetRunner().Cmd(fetchMd5, false, false)
if err != nil {
return errors.New("get md5sum failed")
}
wizard.CheckMd5Sum = true
wizard.Md5sum = md5sum
} else {
wizard.CheckMd5Sum = false
wizard.Url = t.UrlOverride
util.RemoveFile(wizard.Path())
}
if err := wizard.CreateBaseDir(); err != nil {
return errors.Wrapf(errors.WithStack(err), "create file %s base dir failed", wizard.FileName)
@@ -296,6 +307,30 @@ func (t *InstallFinished) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteUpgradeFiles struct {
common.KubeAction
}
func (d *DeleteUpgradeFiles) Execute(runtime connector.Runtime) error {
baseDir := runtime.GetBaseDir()
files, err := os.ReadDir(baseDir)
if err != nil {
return errors.Wrapf(err, "failed to read directory %s", baseDir)
}
for _, file := range files {
if strings.HasPrefix(file.Name(), "upgrade.") {
filePath := path.Join(baseDir, file.Name())
if err := os.RemoveAll(filePath); err != nil && !os.IsNotExist(err) {
logger.Warnf("failed to delete %s: %v", filePath, err)
}
}
}
return nil
}
type DeleteWizardFiles struct {
common.KubeAction
}
@@ -453,14 +488,21 @@ func (a *DeletePodsUsingHostIP) Execute(runtime connector.Runtime) error {
if err != nil {
return errors.Wrap(err, "failed to get pods using host IP")
}
a.PipelineCache.Set(common.CacheCountPodsUsingHostIP, len(targetPods))
var waitRecreationPodsCount int
for _, pod := range targetPods {
logger.Infof("restarting pod %s/%s that's using host IP", pod.Namespace, pod.Name)
err = kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
if err != nil && !kerrors.IsNotFound(err) {
return errors.Wrap(err, "failed to delete pod")
}
// pods not created by any owner resource
// may not be recreated immediately and should not be waited
if len(pod.OwnerReferences) > 0 {
waitRecreationPodsCount++
}
}
a.PipelineCache.Set(common.CacheCountPodsWaitForRecreation, waitRecreationPodsCount)
// try our best to wait for the pods to be actually deleted
// to avoid the next module getting the pods with a still running phase
@@ -479,7 +521,7 @@ type WaitForPodsUsingHostIPRecreate struct {
}
func (a *WaitForPodsUsingHostIPRecreate) Execute(runtime connector.Runtime) error {
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsUsingHostIP)
count, ok := a.PipelineCache.GetMustInt(common.CacheCountPodsWaitForRecreation)
if !ok {
return errors.New("failed to get the count of pods using host IP")
}

View File

@@ -0,0 +1,94 @@
package upgrade
import (
"fmt"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/logger"
"github.com/beclab/Olares/cli/pkg/core/task"
"os"
"strings"
)
type upgrader_1_12_0_20250702 struct {
upgraderBase
}
func (u upgrader_1_12_0_20250702) Version() *semver.Version {
return semver.MustParse("1.12.0-20250702")
}
func (u upgrader_1_12_0_20250702) PrepareForUpgrade() []task.Interface {
preTasks := []task.Interface{
&task.LocalTask{
Name: "UpdateSysctlReservedPorts",
Action: new(updateSysctlReservedPorts),
},
}
return append(preTasks, u.upgraderBase.PrepareForUpgrade()...)
}
type updateSysctlReservedPorts struct {
common.KubeAction
}
func (u *updateSysctlReservedPorts) Execute(runtime connector.Runtime) error {
const sysctlFile = "/etc/sysctl.conf"
const reservedPortsKey = "net.ipv4.ip_local_reserved_ports"
const expectedValue = "30000-32767,46800-50000"
content, err := os.ReadFile(sysctlFile)
if err != nil {
return fmt.Errorf("failed to read sysctl.conf: %v", err)
}
lines := strings.Split(string(content), "\n")
var foundKey bool
var needUpdate bool
var updatedLines []string
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
if strings.HasPrefix(trimmedLine, reservedPortsKey) {
foundKey = true
parts := strings.SplitN(trimmedLine, "=", 2)
if len(parts) == 2 {
currentValue := strings.TrimSpace(parts[1])
if currentValue != expectedValue {
logger.Infof("updating %s from %s to %s", reservedPortsKey, currentValue, expectedValue)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
} else {
updatedLines = append(updatedLines, line)
}
}
if !foundKey {
logger.Infof("key %s not found in sysctl.conf, adding it", reservedPortsKey)
updatedLines = append(updatedLines, fmt.Sprintf("%s=%s", reservedPortsKey, expectedValue))
needUpdate = true
}
if needUpdate {
updatedContent := strings.Join(updatedLines, "\n")
if err := os.WriteFile(sysctlFile, []byte(updatedContent), 0644); err != nil {
return fmt.Errorf("failed to write updated sysctl.conf: %v", err)
}
if _, err := runtime.GetRunner().SudoCmd("sysctl -p", false, false); err != nil {
return fmt.Errorf("failed to reload sysctl: %v", err)
}
logger.Infof("updated and reloaded sysctl configuration")
} else {
logger.Debugf("%s already has the expected value: %s", reservedPortsKey, expectedValue)
}
return nil
}

View File

@@ -0,0 +1,50 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/container"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/manifest"
)
type upgrader_1_12_0_20250723 struct {
upgraderBase
}
func (u upgrader_1_12_0_20250723) Version() *semver.Version {
return semver.MustParse("1.12.0-20250723")
}
func (u upgrader_1_12_0_20250723) PrepareForUpgrade() []task.Interface {
preTasks := []task.Interface{
&task.LocalTask{
Name: "UpgradeContainerd",
Action: new(upgradeContainerd),
},
&task.LocalTask{
Name: "RestartContainerd",
Action: new(container.RestartContainerd),
},
}
return append(preTasks, u.upgraderBase.PrepareForUpgrade()...)
}
type upgradeContainerd struct {
common.KubeAction
}
func (u *upgradeContainerd) Execute(runtime connector.Runtime) error {
m, err := manifest.ReadAll(u.KubeConf.Arg.Manifest)
if err != nil {
return err
}
action := &container.SyncContainerd{
ManifestAction: manifest.ManifestAction{
Manifest: m,
BaseDir: runtime.GetBaseDir(),
},
}
return action.Execute(runtime)
}

View File

@@ -0,0 +1,112 @@
package upgrade
import (
"fmt"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/bootstrap/precheck"
"github.com/beclab/Olares/cli/pkg/common"
"github.com/beclab/Olares/cli/pkg/core/connector"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/core/util"
"github.com/beclab/Olares/cli/pkg/gpu"
k3stemplates "github.com/beclab/Olares/cli/pkg/k3s/templates"
"github.com/beclab/Olares/cli/pkg/manifest"
"github.com/beclab/Olares/cli/pkg/terminus"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
)
type upgrader_1_12_0_20250730 struct {
upgraderBase
}
func (u upgrader_1_12_0_20250730) Version() *semver.Version {
return semver.MustParse("1.12.0-20250730")
}
func (u upgrader_1_12_0_20250730) PrepareForUpgrade() []task.Interface {
var preTasks []task.Interface
if util.IsExist(filepath.Join("/etc/systemd/system/", k3stemplates.K3sService.Name())) {
preTasks = append(preTasks,
&task.LocalTask{
Name: "UpgradeK3sBinary",
Action: new(upgradeK3sBinary),
},
&task.LocalTask{
Name: "UpdateK3sServiceEnv",
Action: new(injectK3sCertExpireTime),
},
&task.LocalTask{
Name: "RestartK3sService",
Action: &terminus.SystemctlCommand{
UnitNames: []string{common.K3s},
Command: "restart",
DaemonReloadPreExec: true,
},
},
&task.LocalTask{
Name: "WaitForKubeAPIServerUp",
Action: new(precheck.GetKubernetesNodesStatus),
Retry: 10,
Delay: 10,
})
}
return append(preTasks, u.upgraderBase.PrepareForUpgrade()...)
}
func (u upgrader_1_12_0_20250730) UpgradeSystemComponents() []task.Interface {
preTasks := []task.Interface{
&task.LocalTask{
Name: "UpgradeGPUPlugin",
Action: new(gpu.InstallPlugin),
},
}
return append(preTasks, u.upgraderBase.UpgradeSystemComponents()...)
}
type upgradeK3sBinary struct {
common.KubeAction
}
func (u *upgradeK3sBinary) Execute(runtime connector.Runtime) error {
m, err := manifest.ReadAll(u.KubeConf.Arg.Manifest)
if err != nil {
return err
}
binary, err := m.Get(common.K3s)
if err != nil {
return fmt.Errorf("get k3s binary info failed: %v", err)
}
path := binary.FilePath(runtime.GetBaseDir())
dst := filepath.Join(common.BinDir, common.K3s)
// replacing the binary does not interrupt the running k3s server
if err := runtime.GetRunner().SudoScp(path, dst); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade k3s binary failed"))
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("chmod +x %s", dst), false, false); err != nil {
return err
}
return nil
}
type injectK3sCertExpireTime struct {
common.KubeAction
}
func (u *injectK3sCertExpireTime) Execute(runtime connector.Runtime) error {
expireTimeEnv := "CATTLE_NEW_SIGNED_CERT_EXPIRATION_DAYS"
envFile := filepath.Join("/etc/systemd/system/", k3stemplates.K3sServiceEnv.Name())
content, err := os.ReadFile(envFile)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to read k3s service env file: %v", err)
}
if strings.Contains(string(content), expireTimeEnv) {
return nil
}
newContent := string(content) + fmt.Sprintf("\n%s=36500\n", expireTimeEnv)
err = os.WriteFile(envFile, []byte(newContent), 0644)
return err
}

View File

@@ -3,6 +3,8 @@ package upgrade
import (
"context"
"fmt"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/terminus"
"os"
"path"
"time"
@@ -19,11 +21,110 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
)
type PrepareUserInfoForUpgrade struct {
// upgraderBase is the general-purpose upgrader implementation
// for upgrading across versions without any breaking changes.
// Other implementations of breakingUpgrader,
// targeted for versions with breaking changes,
// should use this as a base for injecting and/or rewriting specific tasks as needed
type upgraderBase struct{}
func (u upgraderBase) PrepareForUpgrade() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "PrepareUserInfoForUpgrade",
Action: new(prepareUserInfoForUpgrade),
Retry: 5,
},
}
}
func (u upgraderBase) ClearAppChartValues() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "ClearAppChartValues",
Action: new(terminus.ClearAppValues),
},
}
}
func (u upgraderBase) ClearBFLChartValues() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "ClearBFLChartValues",
Action: new(terminus.ClearBFLValues),
},
}
}
func (u upgraderBase) UpdateChartsInAppService() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "UpdateChartsInAppService",
Action: new(terminus.CopyAppServiceHelmFiles),
Retry: 5,
},
}
}
func (u upgraderBase) UpgradeUserComponents() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "UpgradeUserComponents",
Action: new(upgradeUserComponents),
Retry: 5,
Delay: 15 * time.Second,
},
}
}
func (u upgraderBase) UpdateReleaseFile() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "UpdateReleaseFile",
Action: new(terminus.WriteReleaseFile),
},
}
}
func (u upgraderBase) UpgradeSystemComponents() []task.Interface {
// this task updates the version in the CR
// so put this at last to make the whole pipeline
// reentrant
return []task.Interface{
&task.LocalTask{
Name: "UpgradeSystemComponents",
Action: new(upgradeSystemComponents),
Retry: 10,
Delay: 15 * time.Second,
},
}
}
func (u upgraderBase) UpdateOlaresVersion() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "UpdateOlaresVersion",
Action: new(updateOlaresVersion),
},
}
}
func (u upgraderBase) PostUpgrade() []task.Interface {
return []task.Interface{
&task.LocalTask{
Name: "EnsurePodsUpAndRunningAgain",
Action: new(terminus.CheckKeyPodsRunning),
Delay: 15 * time.Second,
Retry: 60,
},
}
}
type prepareUserInfoForUpgrade struct {
common.KubeAction
}
func (p *PrepareUserInfoForUpgrade) Execute(runtime connector.Runtime) error {
func (p *prepareUserInfoForUpgrade) Execute(runtime connector.Runtime) error {
config, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to get rest config: %s", err)
@@ -62,7 +163,7 @@ func (p *PrepareUserInfoForUpgrade) Execute(runtime connector.Runtime) error {
return fmt.Errorf("failed to get user-space-%x: %v", user.Name, err)
}
usersToUpgrade = append(usersToUpgrade, user)
if role, ok := user.Annotations["bytetrade.io/owner-role"]; ok && role == "platform-admin" {
if role, ok := user.Annotations["bytetrade.io/owner-role"]; ok && role == "owner" {
adminUser = user.Name
}
}
@@ -78,11 +179,11 @@ func (p *PrepareUserInfoForUpgrade) Execute(runtime connector.Runtime) error {
return nil
}
type UpgradeUserComponents struct {
type upgradeUserComponents struct {
common.KubeAction
}
func (u *UpgradeUserComponents) Execute(runtime connector.Runtime) error {
func (u *upgradeUserComponents) Execute(runtime connector.Runtime) error {
config, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to get rest config: %s", err)
@@ -153,7 +254,7 @@ func (u *UpgradeUserComponents) Execute(runtime connector.Runtime) error {
}
var wizardNeedUpgrade bool
if wizardStatus, ok := user.Annotations["bytetrade.io/wizard-status"]; ok && wizardStatus == "completed" {
if wizardStatus, ok := user.Annotations["bytetrade.io/wizard-status"]; !ok || wizardStatus != "completed" {
wizardNeedUpgrade = true
}
@@ -177,11 +278,11 @@ func (u *UpgradeUserComponents) Execute(runtime connector.Runtime) error {
return nil
}
type UpgradeSystemComponents struct {
type upgradeSystemComponents struct {
common.KubeAction
}
func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
func (u *upgradeSystemComponents) Execute(runtime connector.Runtime) error {
config, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to get rest config: %s", err)
@@ -197,7 +298,7 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
return err
}
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsPlatform)
actionConfig, settings, err = utils.InitConfig(config, common.NamespaceOsFramework)
if err != nil {
return err
}
@@ -221,3 +322,27 @@ func (u *UpgradeSystemComponents) Execute(runtime connector.Runtime) error {
}
return nil
}
type updateOlaresVersion struct {
common.KubeAction
}
func (u *updateOlaresVersion) Execute(runtime connector.Runtime) error {
config, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to get rest config: %s", err)
}
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceDefault)
if err != nil {
return err
}
ctx, cancelSettings := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancelSettings()
settingsChartPath := path.Join(runtime.GetInstallerDir(), "wizard", "config", "settings")
vals := map[string]interface{}{"version": u.KubeConf.Arg.OlaresVersion}
if err := utils.UpgradeCharts(ctx, actionConfig, settings, common.ChartNameSettings, settingsChartPath, "", common.NamespaceDefault, vals, true); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,23 @@
package upgrade
import (
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader interface {
PrepareForUpgrade() []task.Interface
ClearAppChartValues() []task.Interface
ClearBFLChartValues() []task.Interface
UpdateChartsInAppService() []task.Interface
UpgradeUserComponents() []task.Interface
UpdateReleaseFile() []task.Interface
UpgradeSystemComponents() []task.Interface
UpdateOlaresVersion() []task.Interface
PostUpgrade() []task.Interface
}
type breakingUpgrader interface {
upgrader
Version() *semver.Version
}

Some files were not shown because too many files have changed in this diff Show More