Compare commits

...

22 Commits

Author SHA1 Message Date
dkeven
9f79567c5e fix(gpu): handle scheduler inconsistency and device stuck in unhealthy 2026-01-05 16:39:56 +08:00
eball
be7f3b3c3f daemon: update serial filtering logic to use suffix matching (#2367) 2026-01-04 20:44:41 +08:00
hysyeah
99c6d3860d app-service: app upgrade set tailscale acl (#2362)
* fix: failed release upgrade

* fix: helm upgrade do not use atomic param and allow upgrade failed release

* fix: app upgrade set tailscale acl (#2357)

* fix: increase wait timeout for namespace delete

* fix: update app-service image tag to 0.4.73
2025-12-31 23:58:57 +08:00
berg
9f56cf0f05 login, system frontend: update qrcode size (#2361)
feat: update login version and system frontend version
2025-12-31 23:58:11 +08:00
Yajing
76c8e93822 docs: fix misplaced braces in studio tutorial (#2358) 2025-12-31 21:41:30 +08:00
yajing wang
d38d0d0e1d docs: fix misplaced braces in studio tutorial 2025-12-31 20:59:24 +08:00
hysyeah
65b32c7c41 kubeblocks-addon: fix kubeblocks-addon rabbitmq image pull policy (#2356)
fix: kubeblocks-addon rabbitmq image pull policy
2025-12-31 15:10:26 +08:00
wiy
f6f14e8d9a olares app: update settings create sub-accounts to block domain (#2355) 2025-12-31 15:09:33 +08:00
eball
f8653692b1 daemon: update DID gate URL handling in JWS validation and resolution (#2354) 2025-12-31 13:07:22 +08:00
eball
5264df60cc cli: update ResolveOlaresName and CheckJWS to accept gateUrl parameter (#2352) 2025-12-31 00:11:35 +08:00
berg
1a200ed17c system frontend: update market topic ids (#2351)
feat: update system frontend version
2025-12-30 21:17:53 +08:00
eball
48fdaa5481 daemon: enhance USB monitoring with serial filtering support (#2349)
* daemon: enhance USB monitoring with serial filtering support

* daemon: add check for USB devices with serial before mounting

* daemon: implement FilterBySerial function for USB device filtering
2025-12-30 21:17:15 +08:00
eball
570fe070c9 k3s: update eviction thresholds and image GC settings (#2348)
k3s: update eviction thresholds and image GC settings for improved resource management
2025-12-30 21:16:54 +08:00
lovehunter9
6b18bbd94d fix: files change usb watcher to retry and change sync reconnection to callback (#2342)
* fix: files change usb watcher to retry and change sync reconnection to callback

* fix: create folder and rsync chown to 1000
2025-12-30 21:15:34 +08:00
Yajing
c6836f9859 docs: update nav to reflect the latest changes (#2343) 2025-12-30 17:41:39 +08:00
yajing wang
288869d91d docs: update nav to reflect the latest changes 2025-12-29 20:55:06 +08:00
hysyeah
8ea8a0857e app-service: add helm upgrade timeout (#2339)
* fix: failed release upgrade

* fix: update appservice image tag to 0.4.71

* fix: helm upgrade do not use atomic param and allow upgrade failed release
2025-12-27 14:05:22 +08:00
eball
87674cc5d9 opa: update image validation to exclude alpine and mariadb images (#2337) 2025-12-27 14:04:31 +08:00
berg
11f556e9af system frontend, market backend: verify the update time when the app status is changed. (#2336)
feat: update system frontend version
2025-12-27 14:04:14 +08:00
simon
d2d3195fea download-server: modify ytdlp support domain (#2335)
download
2025-12-27 14:03:45 +08:00
hysyeah
ad3b138284 app-service: fix exposeport upgrade (#2334)
* fix: exposeport upgrade (#2333)

* update appservice tag to 0.4.70
2025-12-26 19:41:14 +08:00
eball
ff609db1aa tapr: change kvrocks to run as root by default (#2332)
* tapr: upgrade pod template and image for PGCluster reconciliation (#2213)

* tapr: upgrade pod template and image for PGCluster reconciliation

* fix(ci): specify working directory in github action for tapr (#2215)

---------

Co-authored-by: dkeven <82354774+dkeven@users.noreply.github.com>

* tapr: upgrade pod template and image for PGCluster reconciliation

* fix(kvrocks): update init container image and pull policy configuration (#2331)

* tapr: change kvrocks running as root by default

---------

Co-authored-by: dkeven <82354774+dkeven@users.noreply.github.com>
2025-12-26 19:40:48 +08:00
44 changed files with 348 additions and 195 deletions

View File

@@ -317,7 +317,7 @@ spec:
chown -R 1000:1000 /uploadstemp && \
chown -R 1000:1000 /appdata
- name: olares-app-init
image: beclab/system-frontend:v1.6.32
image: beclab/system-frontend:v1.6.38
imagePullPolicy: IfNotPresent
command:
- /bin/sh

View File

@@ -195,13 +195,13 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error {
defaultKubeletArs := map[string]string{
"kube-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"system-reserved": "cpu=200m,memory=250Mi,ephemeral-storage=1Gi",
"eviction-hard": "memory.available<5%,nodefs.available<10%,imagefs.available<10%",
"eviction-hard": "memory.available<5%,nodefs.available<5%,imagefs.available<5%",
"config": "/etc/rancher/k3s/kubelet.config",
"containerd": container.DefaultContainerdCRISocket,
"cgroup-driver": "systemd",
"runtime-request-timeout": "5m",
"image-gc-high-threshold": "91",
"image-gc-low-threshold": "90",
"image-gc-high-threshold": "96",
"image-gc-low-threshold": "95",
"housekeeping_interval": "5s",
}
defaultKubeProxyArgs := map[string]string{

View File

@@ -296,8 +296,10 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
"memory": "250Mi",
},
"evictionHard": map[string]string{
"memory.available": "5%",
"pid.available": "10%",
"memory.available": "5%",
"pid.available": "10%",
"nodefs.available": "5%",
"imagefs.available": "5%",
},
"evictionSoft": map[string]string{
"memory.available": "10%",
@@ -309,8 +311,8 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
"evictionPressureTransitionPeriod": "30s",
"featureGates": FeatureGatesDefaultConfiguration,
"runtimeRequestTimeout": "5m",
"imageGCHighThresholdPercent": 91,
"imageGCLowThresholdPercent": 90,
"imageGCHighThresholdPercent": 96,
"imageGCLowThresholdPercent": 95,
}
if securityEnhancement {

View File

@@ -19,7 +19,6 @@ import (
)
var (
DIDGateURL = "https://did-gate-v3.bttcdn.com/1.0/name/"
DIDGateTimeout = 10 * time.Second
DIDCachePath = "/var/lib/olares"
)
@@ -90,7 +89,7 @@ type CheckJWSResult struct {
}
// resolveDID resolves a DID either from cache or from the DID gate
func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
func ResolveOlaresName(gateUrl, olares_id string) (*didcore.ResolutionResult, error) {
name := strings.Replace(olares_id, "@", ".", -1)
// Try to get from cache first
cached, err := getDB().Get([]byte(name), nil)
@@ -105,7 +104,7 @@ func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
client := &http.Client{
Timeout: DIDGateTimeout,
}
resp, err := client.Get(DIDGateURL + name)
resp, err := client.Get(gateUrl + name)
if err != nil {
return nil, fmt.Errorf("failed to fetch DID from gate: %w", err)
}
@@ -135,7 +134,7 @@ func ResolveOlaresName(olares_id string) (*didcore.ResolutionResult, error) {
}
// CheckJWS verifies a JWS and returns the terminus name, body and kid
func CheckJWS(jws string, duration int64) (*CheckJWSResult, error) {
func CheckJWS(gateUrl, jws string, duration int64) (*CheckJWSResult, error) {
var kid string
var name string
var timestamp int64
@@ -198,7 +197,7 @@ func CheckJWS(jws string, duration int64) (*CheckJWSResult, error) {
}
// Resolve DID
resolutionResult, err := ResolveOlaresName(name)
resolutionResult, err := ResolveOlaresName(gateUrl, name)
if err != nil {
return nil, fmt.Errorf("failed to resolve DID: %w", err)
}

View File

@@ -18,7 +18,7 @@ require (
bytetrade.io/web3os/bfl v0.0.0-00010101000000-000000000000
github.com/Masterminds/semver/v3 v3.4.0
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
github.com/beclab/Olares/cli v0.0.0-20251219153848-63d422037cf9
github.com/beclab/Olares/cli v0.0.0-20251230161135-5264df60cc33
github.com/beclab/Olares/framework/app-service v0.0.0-20251225061130-909b7656fd70
github.com/containerd/containerd v1.7.29
github.com/distribution/distribution/v3 v3.0.0

View File

@@ -24,8 +24,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beclab/Olares/cli v0.0.0-20251219153848-63d422037cf9 h1:YNHfPra2FqsKJ5mAxSWNVIK6VyWygRyZiNwfPqiFxlg=
github.com/beclab/Olares/cli v0.0.0-20251219153848-63d422037cf9/go.mod h1:cYPcuju2yRSp9BQjIN/CC495dDOOvVoL42r/gvFlutk=
github.com/beclab/Olares/cli v0.0.0-20251230161135-5264df60cc33 h1:WYuUPOT/p26aCDJGJEDai1v7YM6QHiaFDusBVynnbBY=
github.com/beclab/Olares/cli v0.0.0-20251230161135-5264df60cc33/go.mod h1:ixhzBK5XIovsRB5djk44TChsOK4wum2q4y/hZxJKlNw=
github.com/beclab/Olares/framework/app-service v0.0.0-20251225061130-909b7656fd70 h1:U3z6m0hokD1gzl788BrUdxCbDyAjdOBBXA8ilYgn6VQ=
github.com/beclab/Olares/framework/app-service v0.0.0-20251225061130-909b7656fd70/go.mod h1:D9wl7y3obLqXMqfubMROMgdxWAwInnKNrFC//d0nyIA=
github.com/beclab/bfl v0.3.36 h1:PgeSPGc+XoONiwFsKq9xX8rqcL4kVM1G/ut0lYYj/js=

View File

@@ -2,8 +2,10 @@ package handlers
import (
"net/http"
"net/url"
"github.com/beclab/Olares/cli/pkg/web5/jws"
"github.com/beclab/Olares/daemon/pkg/commands"
"github.com/gofiber/fiber/v2"
"k8s.io/klog/v2"
)
@@ -14,8 +16,14 @@ func (h *Handlers) ResolveOlaresName(c *fiber.Ctx) error {
klog.Error("olaresName parameter is missing")
return h.ErrJSON(c, fiber.StatusBadRequest, "olaresName parameter is required")
}
klog.Infof("Received olaresName: %s", olaresName)
result, err := jws.ResolveOlaresName(olaresName)
didServiceURL, err := getDidGateURL()
if err != nil {
return h.ErrJSON(c, fiber.StatusInternalServerError, "Failed to get DID gate URL")
}
result, err := jws.ResolveOlaresName(didServiceURL, olaresName)
if err != nil {
klog.Errorf("Failed to resolve DID for %s: %v", olaresName, err)
return h.ErrJSON(c, fiber.StatusInternalServerError, "Failed to resolve DID")
@@ -46,7 +54,11 @@ func (h *Handlers) CheckJWS(c *fiber.Ctx) error {
body.Duration = int64(3 * 60 * 1000) // 3 minutes in milliseconds
}
result, err := jws.CheckJWS(body.JWS, body.Duration)
didServiceURL, err := getDidGateURL()
if err != nil {
return h.ErrJSON(c, fiber.StatusInternalServerError, "Failed to get DID gate URL")
}
result, err := jws.CheckJWS(didServiceURL, body.JWS, body.Duration)
if err != nil {
klog.Errorf("Failed to check JWS: %v", err)
return h.ErrJSON(c, fiber.StatusBadRequest, "Invalid JWS")
@@ -54,3 +66,12 @@ func (h *Handlers) CheckJWS(c *fiber.Ctx) error {
return h.OkJSON(c, "success", result)
}
func getDidGateURL() (string, error) {
didServiceURL, err := url.JoinPath(commands.OLARES_REMOTE_SERVICE, "/did/1.0/name/")
if err != nil {
klog.Errorf("failed to parse DID gate service URL: %v, Olares remote service: %s", err, commands.OLARES_REMOTE_SERVICE)
return "", err
}
return didServiceURL, nil
}

View File

@@ -20,6 +20,12 @@ func NewUsbWatcher() *usbWatcher {
return w
}
var UsbSerialKey = struct{}{}
func WithSerial(ctx context.Context, serial string) context.Context {
return context.WithValue(ctx, UsbSerialKey, serial)
}
func (w *usbWatcher) Watch(ctx context.Context) {
retry := 1
devs, err := utils.DetectdUsbDevices(ctx)
@@ -55,6 +61,16 @@ func (w *usbWatcher) Watch(ctx context.Context) {
return
}
serial := ctx.Value(UsbSerialKey).(string)
if serial != "" {
klog.Info("mount usb device with serial, ", serial)
devs = utils.FilterArray(devs, utils.FilterBySerial(serial))
if len(devs) == 0 {
klog.Info("no usb device found with serial, ", serial)
return
}
}
mountedPath, err := utils.MountUsbDevice(ctx, commands.MOUNT_BASE_DIR, devs)
if err != nil {
klog.Error("mount usb error, ", err)
@@ -80,13 +96,13 @@ func (w *umountWatcher) Watch(ctx context.Context) {
}
func NewUsbMonitor(ctx context.Context) error {
return utils.MonitorUsbDevice(ctx, func(action string) error {
return utils.MonitorUsbDevice(ctx, func(action, serial string) error {
switch action {
case "add":
delay := time.NewTimer(2 * time.Second)
go func() {
<-delay.C
NewUsbWatcher().Watch(ctx)
NewUsbWatcher().Watch(WithSerial(ctx, serial))
}()
case "remove":
NewUmountWatcher().Watch(ctx)

View File

@@ -119,7 +119,7 @@ func DetectdHddDevices(ctx context.Context) (usbDevs []storageDevice, err error)
return detectdStorageDevices(ctx, "ata")
}
func MonitorUsbDevice(ctx context.Context, cb func(action string) error) error {
func MonitorUsbDevice(ctx context.Context, cb func(action, serial string) error) error {
filter := &usbmon.ActionFilter{Action: usbmon.ActionAll}
devs, err := usbmon.ListenFiltered(ctx, filter)
if err != nil {
@@ -137,8 +137,8 @@ func MonitorUsbDevice(ctx context.Context, cb func(action string) error) error {
fmt.Println("Path: " + dev.Path())
fmt.Println("Vendor: " + dev.Vendor())
if cb != nil {
err = cb(dev.Action())
if cb != nil && dev.Serial() != "" {
err = cb(dev.Action(), dev.Serial())
if err != nil {
klog.Error("usb action callback error, ", err, ", ", dev.Action())
}
@@ -197,6 +197,12 @@ func MountedHddPath(ctx context.Context) ([]string, error) {
return getMountedPath(hdds)
}
func FilterBySerial(serial string) func(dev storageDevice) bool {
return func(dev storageDevice) bool {
return strings.HasSuffix(serial, dev.IDSerial) || strings.HasSuffix(serial, dev.IDSerialShort)
}
}
func MountUsbDevice(ctx context.Context, mountBaseDir string, dev []storageDevice) (mountedPath []string, err error) {
mounter := mountutils.New("")
mountedList, err := mounter.List()

View File

@@ -19,7 +19,7 @@ func DetectdHddDevices(ctx context.Context) (usbDevs []storageDevice, err error)
return
}
func MonitorUsbDevice(ctx context.Context, cb func(action string) error) error {
func MonitorUsbDevice(ctx context.Context, cb func(action, id string) error) error {
klog.Warning("not implement")
return nil
}
@@ -72,3 +72,9 @@ func MountedPath(ctx context.Context) ([]mountedPath, error) {
klog.Warning("not implement")
return nil, nil
}
func FilterBySerial(serial string) func(dev storageDevice) bool {
return func(dev storageDevice) bool {
return dev.IDSerial == serial || dev.IDSerialShort == serial
}
}

View File

@@ -18,15 +18,14 @@ func ValidateJWS(token string) (bool, string, error) {
klog.Errorf("failed to parse DID gate service URL: %v, Olares remote service: %s", err, commands.OLARES_REMOTE_SERVICE)
return false, "", err
}
jws.DIDGateURL = didServiceURL
// Validate the JWS token with a 20-minute expiration time
checkJWS, err := jws.CheckJWS(token, 20*60*1000)
checkJWS, err := jws.CheckJWS(didServiceURL, token, 20*60*1000)
if err != nil {
if strings.HasPrefix(err.Error(), "timestamp") {
err = fmt.Errorf("%v, server time: %s", err, time.Now().UTC().Format(time.RFC3339))
}
klog.Errorf("failed to check JWS: %v, on %s", err, jws.DIDGateURL)
klog.Errorf("failed to check JWS: %v, on %s", err, didServiceURL)
return false, "", err
}

11
daemon/pkg/utils/utils.go Normal file
View File

@@ -0,0 +1,11 @@
package utils
func FilterArray[T any](items []T, fn func(T) bool) []T {
var filtered []T
for _, item := range items {
if fn(item) {
filtered = append(filtered, item)
}
}
return filtered
}

View File

@@ -6,7 +6,7 @@ const side = {
text: "What is Olares",
link: "/manual/overview",
items: [
{ text: "Compare Olares and NAS", link: "/manual/olares-vs-nas" },
// { text: "Compare Olares and NAS", link: "/manual/olares-vs-nas" },
{ text: "Help and support", link: "/manual/help/request-technical-support"}
// collapsed: true,
// items: [
@@ -468,10 +468,10 @@ const side = {
text: "Steam",
link: "/use-cases/stream-game",
},
{
text: "Redroid",
link: "/use-cases/host-cloud-android",
},
// {
// text: "Redroid",
// link: "/use-cases/host-cloud-android",
// },
{
text: "Windows",
link: "/use-cases/windows",

View File

@@ -8,7 +8,7 @@ const side = {
items: [
// { text: "应用场景", link: "/zh/manual/why-olares" },
//{ text: "功能对比", link: "/zh/manual/feature-overview" },
{ text: "比较 Olares 和 NAS", link: "/zh/manual/olares-vs-nas" },
// { text: "比较 Olares 和 NAS", link: "/zh/manual/olares-vs-nas" },
{text: "帮助与支持", link: "/zh/manual/help/request-technical-support",}
// collapsed: true,
// items: [
@@ -425,7 +425,7 @@ const side = {
],
"/zh/use-cases/": [
{
text: "Tutorials & use cases",
text: "应用示例",
link: "/zh/use-cases/",
items: [
{
@@ -471,10 +471,10 @@ const side = {
text: "Steam",
link: "/zh/use-cases/stream-game",
},
{
text: "Redroid",
link: "/zh/use-cases/host-cloud-android",
},
// {
// text: "Redroid",
// link: "/zh/use-cases/host-cloud-android",
// },
],
},
],

View File

@@ -92,13 +92,13 @@ This example demonstrates creating a basic web page manually.
```
5. Create a file named `index.js` in `/root/` with the following content:
```js
// Ensure the port matches what you defined
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080), function() {
console.log('Server is running on port 8080');
};
// Ensure the port matches what you defined
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080, function() {
console.log('Server is running on port 8080');
});
```
6. Create a `public` directory in `/root/` and add an `index.html` file:
```html
@@ -204,15 +204,15 @@ Once deployed, go to **Services** > **Ports**. You can see your new port listed
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080), function() {
console.log('Server is running on port 8080');
};
app.listen(8080, function() {
console.log('Server is running on port 8080');
});
// Add the following
const app_new = express();
app_new.use(express.static('new/'));
app_new.listen(8081), function() {
console.log('Server is running on port 8081');
};
app_new.listen(8081, function() {
console.log('Server is running on port 8081');
});
```
2. Create a `new` directory in `/root/` and add an `index.html` file:
```html

View File

@@ -16,7 +16,6 @@ From running AI models to building seamless workflows across your self-hosted se
{ title: 'Ollama', link: './ollama.html', tags: ['AI']},
{ title: 'Jellyfin', link: './stream-media.html', tags: ['Entertainment']},
{ title: 'Steam', link: './stream-game.html', tags: ['Entertainment']},
{ title: 'Redroid', link: './host-cloud-android.html', tags: ['Virtual Machine']},
{ title: 'Windows', link: './windows.html', tags: ['Virtual Machine']},
{ title: 'DeerFlow', link: './host-cloud-android.html', tags: ['AI']},
{ title: 'ACE-Step', link: './ace-step.html', tags: ['AI']},

View File

@@ -93,12 +93,12 @@ Olares Studio 允许你启动预配置的开发容器来编写和调试代码(
5. 在 `/root/` 中创建文件 `index.js`,内容如下:
```js
// 确保端口与定义的一致
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080), function() {
console.log('Server is running on port 8080');
};
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080, function() {
console.log('Server is running on port 8080');
});
```
6. 在 `/root/` 中创建 `public` 目录并添加 `index.html` 文件:
```html
@@ -204,15 +204,15 @@ Olares Studio 允许你启动预配置的开发容器来编写和调试代码(
const express = require('express');
const app = express();
app.use(express.static('public/'));
app.listen(8080), function() {
console.log('Server is running on port 8080');
};
app.listen(8080, function() {
console.log('Server is running on port 8080');
});
// 添加以下内容
const app_new = express();
app_new.use(express.static('new/'));
app_new.listen(8081), function() {
console.log('Server is running on port 8081');
};
app_new.listen(8081, function() {
console.log('Server is running on port 8081');
});
```
2. 在 `/root/` 中创建 `new` 目录并添加 `index.html` 文件:
```html

View File

@@ -2,9 +2,9 @@
description: 了解 Olares 在 AI 流程、创意工具和自托管应用中的实际用例,充分释放 Olares 的潜力。
---
# 使用场景与教程
# 应用示例
本节聚焦于**真实场景**,帮助你充分挖掘 Olares 的强大功能。 从运行 AI 模型到在自托管服务间构建无缝工作流,这些动手教程将指导你完成具体任务,并利用已安装的应用实现完整、实用的解决方案。
本节聚焦于真实场景,帮助你充分挖掘 Olares 的强大功能。 从运行 AI 模型到在自托管服务间构建无缝工作流,这些动手教程将指导你完成具体任务,并利用已安装的应用实现完整、实用的解决方案。
<FilterableList :items="[
{ title: 'Stable Diffusion', link: './stable-diffusion.html', tags: ['AI'] },
@@ -15,5 +15,4 @@ description: 了解 Olares 在 AI 流程、创意工具和自托管应用中的
{ title: 'Ollama', link: './ollama.html', tags: ['AI'] },
{ title: 'Jellyfin', link: './stream-media.html', tags: ['娱乐'] },
{ title: 'Steam', link: './stream-game.html', tags: ['娱乐'] },
{ title: 'Redroid', link: './host-cloud-android.html', tags: ['虚拟机'] },
]" />

View File

@@ -170,7 +170,7 @@ spec:
priorityClassName: "system-cluster-critical"
containers:
- name: app-service
image: beclab/app-service:0.4.69
image: beclab/app-service:0.4.73
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0

View File

@@ -146,7 +146,7 @@ func LoadStatefulApp(ctx context.Context, appmgr *ApplicationManagerController,
case appv1alpha1.ApplyingEnvCanceling:
return appstate.NewApplyingEnvCancelingApp(appmgr, &am)
case appv1alpha1.Uninstalling:
return appstate.NewUninstallingApp(appmgr, &am, 15*time.Minute)
return appstate.NewUninstallingApp(appmgr, &am, 30*time.Minute)
case appv1alpha1.StopFailed:
return appstate.NewSuspendFailedApp(appmgr, &am)
case appv1alpha1.UninstallFailed:

View File

@@ -146,6 +146,16 @@ func (h *upgradeHandlerHelper) setAndEncodingAppCofnig(prevCfg *appcfg.Applicati
}
}
}
prevPortsMap := apputils.BuildPrevPortsMap(prevCfg)
// Set expose ports for upgrade, preserving existing ports with same name
err := apputils.SetExposePorts(context.TODO(), h.appConfig, prevPortsMap)
if err != nil {
klog.Errorf("set expose ports failed %v", err)
return "", err
}
encoding, err := json.Marshal(h.appConfig)
if err != nil {
klog.Errorf("Failed to marshal app config err=%v", err)

View File

@@ -2,7 +2,6 @@ package appinstaller
import (
"encoding/json"
"fmt"
"time"
appv1alpha1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
@@ -12,7 +11,6 @@ import (
"github.com/beclab/Olares/framework/app-service/pkg/helm"
"github.com/beclab/Olares/framework/app-service/pkg/users/userspace"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
helmrelease "helm.sh/helm/v3/pkg/release"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -23,15 +21,7 @@ import (
// Upgrade do a upgrade operation for release.
func (h *HelmOps) Upgrade() error {
status, err := h.status()
if err != nil {
klog.Errorf("get release status failed %v", err)
return err
}
if status.Info.Status == helmrelease.StatusDeployed {
return h.upgrade()
}
return fmt.Errorf("cannot upgrade release %s/%s, current state is %s", h.app.Namespace, h.app.AppName, status.Info.Status)
return h.upgrade()
}
func (h *HelmOps) upgrade() error {

View File

@@ -13,6 +13,7 @@ import (
"github.com/beclab/Olares/framework/app-service/pkg/appinstaller/versioned"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
"github.com/beclab/Olares/framework/app-service/pkg/errcode"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
"github.com/pkg/errors"
"k8s.io/klog/v2"
@@ -59,12 +60,25 @@ func (p *InstallingApp) Exec(ctx context.Context) (StatefulInProgressApp, error)
klog.Errorf("get kube config failed %v", err)
return nil, err
}
err = setExposePorts(ctx, appCfg)
err = apputils.SetExposePorts(ctx, appCfg, nil)
if err != nil {
klog.Errorf("set expose ports failed %v", err)
return nil, err
}
updatedConfig, err := json.Marshal(appCfg)
if err != nil {
klog.Errorf("marshal appConfig failed %v", err)
return nil, err
}
managerCopy := p.manager.DeepCopy()
managerCopy.Spec.Config = string(updatedConfig)
err = p.client.Patch(ctx, managerCopy, client.MergeFrom(p.manager))
if err != nil {
klog.Errorf("update ApplicationManager config failed %v", err)
return nil, err
}
opCtx, cancel := context.WithCancel(context.Background())
ops, err := versioned.NewHelmOps(opCtx, kubeConfig, appCfg, token,

View File

@@ -20,6 +20,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
@@ -138,6 +139,13 @@ func (p *baseStatefulApp) forceDeleteApp(ctx context.Context) error {
return err
}
}
// Wait for namespace to be fully deleted before updating status
if err = p.waitForNamespaceDeleted(ctx); err != nil {
klog.Errorf("wait for namespace %s deleted failed %v", p.manager.Spec.AppNamespace, err)
return err
}
err = p.updateStatus(ctx, p.manager, appsv1.Uninstalled, nil, appsv1.Uninstalled.String(), "")
if err != nil {
klog.Errorf("update app manager %s to state %s failed", p.manager.Name, appsv1.Uninstalled)
@@ -146,6 +154,32 @@ func (p *baseStatefulApp) forceDeleteApp(ctx context.Context) error {
return nil
}
// waitForNamespaceDeleted waits for the namespace to be completely deleted
func (p *baseStatefulApp) waitForNamespaceDeleted(ctx context.Context) error {
namespace := p.manager.Spec.AppNamespace
if apputils.IsProtectedNamespace(namespace) {
return nil
}
klog.Infof("waiting for namespace %s to be fully deleted", namespace)
err := utilwait.PollImmediate(time.Second, 30*time.Minute, func() (done bool, err error) {
var ns corev1.Namespace
err = p.client.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
if err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("failed to get namespace %s: %v", namespace, err)
return false, err
}
if apierrors.IsNotFound(err) {
klog.Infof("namespace %s has been fully deleted", namespace)
return true, nil
}
klog.Infof("namespace %s still exists, waiting...", namespace)
return false, nil
})
return err
}
type OperationApp interface {
StatefulApp
IsTimeout() bool

View File

@@ -100,7 +100,7 @@ func (p *UninstallingApp) waitForDeleteNamespace(ctx context.Context) error {
if apputils.IsProtectedNamespace(p.manager.Spec.AppNamespace) {
return nil
}
err := utilwait.PollImmediate(time.Second, 15*time.Minute, func() (done bool, err error) {
err := utilwait.PollImmediate(time.Second, 30*time.Minute, func() (done bool, err error) {
klog.Infof("waiting for namespace %s to be deleted", p.manager.Spec.AppNamespace)
nsName := p.manager.Spec.AppNamespace
var ns corev1.Namespace

View File

@@ -169,6 +169,15 @@ func (p *UpgradingApp) exec(ctx context.Context) error {
klog.Errorf("get app config failed %v", err)
return err
}
var cfg *appcfg.ApplicationConfig
err = json.Unmarshal([]byte(p.manager.Spec.Config), &cfg)
if err != nil {
klog.Errorf("unmarshal to appConfig failed %v", err)
return err
}
appConfig.Ports = cfg.Ports
appConfig.TailScale = cfg.TailScale
} else {
_, err = apputils.GetIndexAndDownloadChart(ctx, &apputils.ConfigOptions{
App: p.manager.Spec.AppName,

View File

@@ -6,9 +6,6 @@ import (
appv1alpha1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/beclab/Olares/framework/app-service/pkg/utils"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -17,7 +14,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -25,96 +21,6 @@ import (
const suspendAnnotation = "bytetrade.io/suspend-by"
const suspendCauseAnnotation = "bytetrade.io/suspend-cause"
type portKey struct {
port int32
protocol string
}
func setExposePorts(ctx context.Context, appConfig *appcfg.ApplicationConfig) error {
existPorts := make(map[portKey]struct{})
client, err := utils.GetClient()
if err != nil {
return err
}
apps, err := client.AppV1alpha1().Applications().List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
for _, app := range apps.Items {
for _, p := range app.Spec.Ports {
protos := []string{p.Protocol}
if p.Protocol == "" {
protos = []string{"tcp", "udp"}
}
for _, proto := range protos {
key := portKey{
port: p.ExposePort,
protocol: proto,
}
existPorts[key] = struct{}{}
}
}
}
klog.Infof("existPorts: %v", existPorts)
for i := range appConfig.Ports {
port := &appConfig.Ports[i]
if port.ExposePort == 0 {
var exposePort int32
protos := []string{port.Protocol}
if port.Protocol == "" {
protos = []string{"tcp", "udp"}
}
for i := 0; i < 5; i++ {
exposePort, err = genPort(protos)
if err != nil {
continue
}
for _, proto := range protos {
key := portKey{port: exposePort, protocol: proto}
if _, ok := existPorts[key]; !ok && err == nil {
break
}
}
}
for _, proto := range protos {
key := portKey{port: exposePort, protocol: proto}
if _, ok := existPorts[key]; ok || err != nil {
return fmt.Errorf("%d port is not available", key.port)
}
existPorts[key] = struct{}{}
port.ExposePort = exposePort
}
}
}
// add exposePort to tailscale acls
for i := range appConfig.Ports {
if appConfig.Ports[i].AddToTailscaleAcl {
appConfig.TailScale.ACLs = append(appConfig.TailScale.ACLs, appv1alpha1.ACL{
Action: "accept",
Src: []string{"*"},
Proto: appConfig.Ports[i].Protocol,
Dst: []string{fmt.Sprintf("*:%d", appConfig.Ports[i].ExposePort)},
})
}
}
klog.Infof("appConfig.TailScale: %v", appConfig.TailScale)
return nil
}
func genPort(protos []string) (int32, error) {
exposePort := int32(rand.IntnRange(46800, 50000))
for _, proto := range protos {
if !utils.IsPortAvailable(proto, int(exposePort)) {
return 0, fmt.Errorf("failed to allocate an available port after 5 attempts")
}
}
return exposePort, nil
}
func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager, replicas int32) error {
suspend := func(list client.ObjectList) error {
namespace := am.Spec.AppNamespace

View File

@@ -87,7 +87,8 @@ func UpgradeCharts(ctx context.Context, actionConfig *action.Configuration, sett
client.Namespace = namespace
client.Timeout = 300 * time.Second
client.Recreate = false
client.Atomic = true
// Do not use Atomic, this could cause helm wait all resource ready.
//client.Atomic = true
if reuseValue {
client.ReuseValues = true
}

View File

@@ -35,6 +35,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
@@ -1094,3 +1095,118 @@ func GetRawAppName(AppName, rawAppName string) string {
return rawAppName
}
type portKey struct {
port int32
protocol string
}
func genPort(protos []string) (int32, error) {
exposePort := int32(rand.IntnRange(46800, 50000))
for _, proto := range protos {
if !utils.IsPortAvailable(proto, int(exposePort)) {
return 0, fmt.Errorf("failed to allocate an available port after 5 attempts")
}
}
return exposePort, nil
}
// SetExposePorts sets expose ports for app config.
func SetExposePorts(ctx context.Context, appConfig *appcfg.ApplicationConfig, prevPortsMap map[string]int32) error {
existPorts := make(map[portKey]struct{})
client, err := utils.GetClient()
if err != nil {
return err
}
apps, err := client.AppV1alpha1().Applications().List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
for _, app := range apps.Items {
for _, p := range app.Spec.Ports {
protos := []string{p.Protocol}
if p.Protocol == "" {
protos = []string{"tcp", "udp"}
}
for _, proto := range protos {
key := portKey{
port: p.ExposePort,
protocol: proto,
}
existPorts[key] = struct{}{}
}
}
}
klog.Infof("existPorts: %v", existPorts)
for i := range appConfig.Ports {
port := &appConfig.Ports[i]
// For upgrade: if port with same name exists in prevPortsMap, preserve its ExposePort
if prevPortsMap != nil && port.Name != "" {
if prevExposePort, exists := prevPortsMap[port.Name]; exists && prevExposePort != 0 {
klog.Infof("preserving ExposePort %d for port %s from previous config", prevExposePort, port.Name)
port.ExposePort = prevExposePort
continue
}
}
if port.ExposePort == 0 {
var exposePort int32
protos := []string{port.Protocol}
if port.Protocol == "" {
protos = []string{"tcp", "udp"}
}
for i := 0; i < 5; i++ {
exposePort, err = genPort(protos)
if err != nil {
continue
}
for _, proto := range protos {
key := portKey{port: exposePort, protocol: proto}
if _, ok := existPorts[key]; !ok && err == nil {
break
}
}
}
for _, proto := range protos {
key := portKey{port: exposePort, protocol: proto}
if _, ok := existPorts[key]; ok || err != nil {
return fmt.Errorf("%d port is not available", key.port)
}
existPorts[key] = struct{}{}
port.ExposePort = exposePort
}
}
}
// add exposePort to tailscale acls
for i := range appConfig.Ports {
if appConfig.Ports[i].AddToTailscaleAcl {
appConfig.TailScale.ACLs = append(appConfig.TailScale.ACLs, v1alpha1.ACL{
Action: "accept",
Src: []string{"*"},
Proto: appConfig.Ports[i].Protocol,
Dst: []string{fmt.Sprintf("*:%d", appConfig.Ports[i].ExposePort)},
})
}
}
klog.Infof("appConfig.TailScale: %v", appConfig.TailScale)
return nil
}
// BuildPrevPortsMap builds a map of port name -> expose port from previous config.
func BuildPrevPortsMap(prevConfig *appcfg.ApplicationConfig) map[string]int32 {
if prevConfig == nil {
return nil
}
m := make(map[string]int32)
for _, p := range prevConfig.Ports {
if p.Name != "" && p.ExposePort != 0 {
m[p.Name] = p.ExposePort
}
}
return m
}

View File

@@ -29,7 +29,7 @@ spec:
name: check-auth
containers:
- name: auth-front
image: beclab/login:v1.6.30
image: beclab/login:v1.6.38
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80

View File

@@ -180,7 +180,7 @@ spec:
memory: 300Mi
- name: download-server
image: "beclab/download-server:v0.1.16"
image: "beclab/download-server:v0.1.17"
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0

View File

@@ -210,7 +210,7 @@ spec:
command:
- /samba_share
- name: files
image: beclab/files-server:v0.2.142
image: beclab/files-server:v0.2.144
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true

View File

@@ -140,7 +140,7 @@ spec:
name: check-chart-repo
containers:
- name: appstore-backend
image: beclab/market-backend:v0.6.13
image: beclab/market-backend:v0.6.15
imagePullPolicy: IfNotPresent
ports:
- containerPort: 81

View File

@@ -248,7 +248,7 @@ spec:
containers:
- name: seafile-server
image: beclab/pg_seafile_server:v0.0.16
image: beclab/pg_seafile_server:v0.0.17
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8082

View File

@@ -4,7 +4,7 @@ nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
imagePullSecrets: []
version: "v2.6.6"
version: "v2.6.7"
# Nvidia GPU Parameters
resourceName: "nvidia.com/gpu"

View File

@@ -3,7 +3,7 @@ target: prebuilt
output:
containers:
-
name: beclab/hami:v2.6.6
name: beclab/hami:v2.6.7
-
name: beclab/hami-webui-fe-oss:v1.0.8
-

View File

@@ -7,4 +7,4 @@ output:
-
name: beclab/apecloud-kubeblocks:1.0.1
-
name: beclab/kubeblock-addon-charts:v1.0.1-ext
name: beclab/kubeblock-addon-charts:v1.0.1-ext2

View File

@@ -11,7 +11,7 @@ spec:
or cluster of machines.
helm:
chartLocationURL: file:///minio-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
chartsPathInImage: /charts
installValues: {}
valuesMapping:
@@ -44,7 +44,7 @@ spec:
and scaling.
helm:
chartLocationURL: file:///mongodb-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm
@@ -68,7 +68,7 @@ spec:
speed and relevance on production-scale workloads.
helm:
chartLocationURL: file:///elasticsearch-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm
@@ -90,7 +90,7 @@ spec:
description: RabbitMQ is a reliable and mature messaging and streaming broker.
helm:
chartLocationURL: file:///rabbitmq-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm
@@ -113,7 +113,7 @@ spec:
system that is widely used for web and application servers
helm:
chartLocationURL: file:///mariadb-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm
@@ -136,7 +136,7 @@ spec:
system (RDBMS)
helm:
chartLocationURL: file:///mysql-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm

View File

@@ -251,6 +251,8 @@ data:
not startswith(image, "ghcr.io/coder/coder:v2.19.0")
not startswith(image, "apecloud/")
not startswith(image, "kldtks/")
not startswith(image, "alpine:")
not startswith(image, "mariadb:")
}
is_root_user(ctx) if {

View File

@@ -3,7 +3,7 @@ target: prebuilt
output:
containers:
-
name: beclab/kvrocks:0.1.1
name: beclab/kvrocks:0.1.2

View File

@@ -57,7 +57,7 @@ spec:
path: '{{ $dbbackup_rootpath }}/pg_backup'
containers:
- name: operator-api
image: beclab/middleware-operator:0.2.29
image: beclab/middleware-operator:0.2.30
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080

View File

@@ -64,7 +64,7 @@ spec:
kvrocks:
owner: system
backupStorage: '{{ $redix_backuppath }}/kvrocks_backup'
image: beclab/kvrocks:0.1.1
image: beclab/kvrocks:0.1.2
imagePullPolicy: IfNotPresent
password:
valueFrom:

View File

@@ -64,6 +64,19 @@ func GetKVRocksDefineByUser(ctx context.Context, client *kubernetes.Clientset,
sts.Namespace = namespace
sts.Name = kvrocksDef.Name
for i, c := range sts.Spec.Template.Spec.InitContainers {
if c.Name == "init-kvrocks-cfg" {
ptrC := &sts.Spec.Template.Spec.InitContainers[i]
if kvrocksDef.Spec.KVRocks.Image != "" {
ptrC.Image = kvrocksDef.Spec.KVRocks.Image
}
if kvrocksDef.Spec.KVRocks.ImagePullPolicy != "" {
ptrC.ImagePullPolicy = kvrocksDef.Spec.KVRocks.ImagePullPolicy
}
}
}
for i, c := range sts.Spec.Template.Spec.Containers {
if c.Name == "kvrocks" {
ptrC := &sts.Spec.Template.Spec.Containers[i]

View File

@@ -12,7 +12,7 @@ import (
const (
DefaultKVRocksName = "kvrocks"
DefaultKVRocksImage = "beclab/kvrocks:0.1.1"
DefaultKVRocksImage = "beclab/kvrocks:0.1.2"
KVRocksVolumeName = "kvrdata"
KVRocksBackupVolumeName = "kvrbackup"
KVRocksBackupDir = "/backup"