Compare commits

...

31 Commits

Author SHA1 Message Date
hysyeah
d3e7d3e64f fix: app uninstall delete data (#2479) 2026-02-03 20:17:04 +08:00
hysyeah
c627218184 appservice: fix add spec ports (#2477)
fix: add spec ports
2026-02-03 20:15:26 +08:00
hysyeah
6286fee6f7 feat: add icon filed to nats event (#2476) 2026-02-03 20:14:53 +08:00
eball
4cd4476885 cherry-pick: fix(cli): set node port range in minikube to allow smb service (#2460) 2026-02-03 13:43:52 +08:00
eball
b5f09d8e27 cherry-pick: desktop, settings, files, vault: fix multiple known issues (#2467) 2026-02-03 13:20:48 +08:00
eball
b8b22fe210 cherry-pick: authelia: add user regulation for TOTP authentication attempts (#2466) 2026-02-03 13:19:57 +08:00
eball
8b240521b9 cherry-pick: settings, user service: update wallpaper style (#2463) 2026-02-03 13:18:37 +08:00
eball
f2f3645b47 cherry-pick: bfl: enhance user login background handling with style support (#2464) 2026-02-03 13:18:12 +08:00
eball
60f0b123e8 cherry-pick: settings: add settings new version and update provider api (#2456) 2026-02-03 13:17:40 +08:00
eball
ad5ee4d9a7 cherry-pick: cli: upgrade l4-bfl-proxy to v0.3.10 (#2442) 2026-02-03 13:16:31 +08:00
eball
e3477d262b cherry-pick: l4: skip invalid expose port (#2441) 2026-02-03 13:15:46 +08:00
eball
12aacd9d56 cherry-pick: daemon: change pcap open timeout to 1 millisecond to prevent close hang (#2439) 2026-02-03 13:14:42 +08:00
eball
3224d8a81e cherry-pick: feat(olares-app): update version to v1.8.2 (#2433) 2026-02-03 13:13:48 +08:00
eball
8ab1c1997c cherry-pick: bfl: myapps api add rawAppName (#2432) 2026-02-03 13:13:05 +08:00
eball
41e8c188ee cherry-pick: feat(cli): collect nginx logs stored temporarily in some containers (#2429) 2026-02-03 13:12:16 +08:00
eball
e3885df940 cherry-pick: daemon: modify mDNS registration method (#2427) 2026-01-19 23:29:59 +08:00
eball
fa9662afbf cherry-pick: tapr: add max retry for delete action (#2426) 2026-01-19 20:32:02 +08:00
eball
d88f248e28 cherry-pick: hami: revert hami-core latest update (#2424) 2026-01-19 19:38:44 +08:00
eball
e5072023e4 cherry-pick: feat(olares-app): update olares-app version to v1.7.7 (#2423) 2026-01-19 14:09:13 +08:00
eball
0b155a8ea4 cherry-pick: feat(olares-app): update new version to v1.7.6 (#2422) 2026-01-19 14:08:49 +08:00
eball
529208055f cherry-pick: feat: optimize highlight segment order (#2420) 2026-01-16 15:43:05 +08:00
eball
2c3ce26b79 cherry-pick: fix: fix meaningless word highlight (#2418) 2026-01-15 19:34:57 +08:00
eball
4d9104c9f0 cherry-pick: settings: update search origin (#2417) 2026-01-15 19:34:30 +08:00
eball
75c76b80fb cherry-pick: kubeblocks: skip check pod spec,status image (#2414) 2026-01-15 19:33:58 +08:00
eball
4d4ae2c673 cherry-pick: olares-app, login: update version to v1.7.4 (#2413) 2026-01-15 19:33:30 +08:00
eball
644adacfab cherry-pick: fix: fix english highight missing (#2412) 2026-01-15 19:32:58 +08:00
eball
6c383b5a45 cherry-pick: daemon: handle missing auth token for WebSocket connections (#2411) 2026-01-15 19:32:27 +08:00
eball
db49d10b65 cherry-pick: feat(gpu): update gpu plugin version to v2.6.8 (#2410) 2026-01-15 19:31:59 +08:00
eball
1cd47a5abe cherry-pick: user-service: update mtranserverv2 (#2408) 2026-01-14 14:40:38 +08:00
eball
a7e455239e cherry-pick: fix: files check disk space for upload link and copy (#2407) 2026-01-14 14:40:11 +08:00
eball
5df6f3e5c6 cherry-pick: feat(cli): sync kubeconfig for the original user invoking sudo (#2406) 2026-01-14 14:39:42 +08:00
52 changed files with 526 additions and 237 deletions

View File

@@ -54,6 +54,7 @@ rules:
- "/system/configuration/encoding"
- "/api/search/get_directory/"
- "/api/search/sync_search/"
- "/api/share/smb_share_user/"
verbs: ["*"]
---

View File

@@ -317,7 +317,7 @@ spec:
chown -R 1000:1000 /uploadstemp && \
chown -R 1000:1000 /appdata
- name: olares-app-init
image: beclab/system-frontend:v1.7.1
image: beclab/system-frontend:v1.8.5
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -439,7 +439,7 @@ spec:
- name: NATS_SUBJECT_VAULT
value: os.vault.{{ .Values.bfl.username}}
- name: user-service
image: beclab/user-service:v0.0.81
image: beclab/user-service:v0.0.85
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000

View File

@@ -12,4 +12,5 @@ rules:
- "/task/*"
- "/search/*"
- "/monitorsetting/*"
- "/file/*"
verbs: ["*"]

View File

@@ -3,6 +3,7 @@ package os
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"log"
@@ -13,6 +14,9 @@ import (
"strings"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
ctrl "sigs.k8s.io/controller-runtime"
@@ -276,7 +280,7 @@ func collectSystemdLogs(tw *tar.Writer, options *LogCollectOptions) error {
}
func collectDmesgLogs(tw *tar.Writer, options *LogCollectOptions) error {
cmd := exec.Command("dmesg")
cmd := exec.Command("dmesg -T")
output, err := cmd.Output()
if err != nil {
return err
@@ -399,6 +403,126 @@ func collectKubernetesLogs(tw *tar.Writer, options *LogCollectOptions) error {
}
}
if err := collectNginxLogsFromLabeledPods(tw); err != nil {
if !options.IgnoreKubeErrors {
return fmt.Errorf("failed to collect nginx logs from labeled pods: %v", err)
}
}
return nil
}
func collectNginxLogsFromLabeledPods(tw *tar.Writer) error {
if _, err := util.GetCommand("kubectl"); err != nil {
fmt.Printf("warning: kubectl not found, skipping collecting nginx logs from labeled pods\n")
return nil
}
cfg, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to get kubeconfig: %v", err)
}
clientset, err := kubernetes.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("failed to create kube client: %v", err)
}
type selectorSpec struct {
LabelSelector string
ContainerName string
}
selectors := []selectorSpec{
{LabelSelector: "app=l4-bfl-proxy", ContainerName: ""},
{LabelSelector: "tier=bfl", ContainerName: "ingress"},
}
type targetPod struct {
Namespace string
Name string
ContainerName string
}
var targets []targetPod
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
for _, sel := range selectors {
podList, err := clientset.CoreV1().Pods(corev1.NamespaceAll).List(ctx, metav1.ListOptions{LabelSelector: sel.LabelSelector})
if err != nil {
return fmt.Errorf("failed to list pods by label %q: %v", sel.LabelSelector, err)
}
for _, pod := range podList.Items {
targets = append(targets, targetPod{
Namespace: pod.Namespace,
Name: pod.Name,
ContainerName: sel.ContainerName,
})
}
}
if len(targets) == 0 {
return nil
}
// simplest approach: use kubectl cp (it already implements copy via tar over exec)
tempDir, err := os.MkdirTemp("", "olares-nginx-logs-*")
if err != nil {
return fmt.Errorf("failed to create temp directory for nginx logs: %v", err)
}
defer os.RemoveAll(tempDir)
files := []string{"/var/log/nginx/access.log", "/var/log/nginx/error.log"}
for _, target := range targets {
for _, remotePath := range files {
base := filepath.Base(remotePath)
archivePath := filepath.Join("nginx", target.Namespace, target.Name, base)
dest := filepath.Join(tempDir, fmt.Sprintf("%s__%s__%s", target.Namespace, target.Name, base))
err := kubectlCopyFile(target.Namespace, target.Name, target.ContainerName, remotePath, dest)
if err != nil {
return fmt.Errorf("failed to kubectl cp %s/%s:%s: %v", target.Namespace, target.Name, remotePath, err)
}
fi, err := os.Stat(dest)
if err != nil {
return fmt.Errorf("failed to stat copied nginx log %s: %v", dest, err)
}
f, err := os.Open(dest)
if err != nil {
return fmt.Errorf("failed to open copied nginx log %s: %v", dest, err)
}
defer f.Close()
header := &tar.Header{
Name: archivePath,
Mode: 0644,
Size: fi.Size(),
ModTime: time.Now(),
}
if err := tw.WriteHeader(header); err != nil {
return fmt.Errorf("failed to write header for %s: %v", archivePath, err)
}
if _, err := io.CopyN(tw, f, header.Size); err != nil {
return fmt.Errorf("failed to write data for %s: %v", archivePath, err)
}
}
}
return nil
}
func kubectlCopyFile(namespace, pod, container, remotePath, destPath string) error {
args := []string{"-n", namespace, "cp"}
if container != "" {
args = append(args, "-c", container)
}
args = append(args, fmt.Sprintf("%s:%s", pod, remotePath), destPath)
cmd := exec.Command("kubectl", args...)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("kubectl %s failed: %v, output: %s", strings.Join(args, " "), err, strings.TrimSpace(string(out)))
}
return nil
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/beclab/Olares/cli/pkg/k3s/templates"
"github.com/beclab/Olares/cli/pkg/manifest"
"github.com/beclab/Olares/cli/pkg/registry"
"github.com/beclab/Olares/cli/pkg/storage"
)
type InstallContainerModule struct {
@@ -470,6 +471,18 @@ func (j *JoinNodesModule) Init() {
Parallel: true,
}
createSharedLibDirForWorker := &task.RemoteTask{
Name: "CreateSharedLibDir(k3s)",
Desc: "Create shared lib directory on worker",
Hosts: j.Runtime.GetHostsByRole(common.Worker),
Prepare: &prepare.PrepareCollection{
&kubernetes.NodeInCluster{Not: true},
new(common.OnlyWorker),
},
Action: new(storage.CreateSharedLibDir),
Parallel: true,
}
enableK3s := &task.RemoteTask{
Name: "EnableK3sService",
Desc: "Enable k3s service",
@@ -536,6 +549,7 @@ func (j *JoinNodesModule) Init() {
k3sService,
k3sEnv,
k3sRegistryConfig,
createSharedLibDirForWorker,
enableK3s,
copyKubeConfigForMaster,
syncKubeConfigToWorker,

View File

@@ -397,53 +397,23 @@ type CopyK3sKubeConfig struct {
}
func (c *CopyK3sKubeConfig) Execute(runtime connector.Runtime) error {
createConfigDirCmd := "mkdir -p /root/.kube && mkdir -p $HOME/.kube"
getKubeConfigCmd := "cp -f /etc/rancher/k3s/k3s.yaml /root/.kube/config"
chmodKubeConfigCmd := "chmod 0600 /root/.kube/config"
targetHome, targetUID, targetGID, err := utils.ResolveSudoUserHomeAndIDs(runtime)
if err != nil {
return err
}
cmd := strings.Join([]string{createConfigDirCmd, getKubeConfigCmd, chmodKubeConfigCmd}, " && ")
if _, err := runtime.GetRunner().SudoCmd(cmd, false, false); err != nil {
cmds := []string{
"mkdir -p /root/.kube",
"cp -f /etc/rancher/k3s/k3s.yaml /root/.kube/config",
"chmod 0600 /root/.kube/config",
fmt.Sprintf("mkdir -p %s", filepath.Join(targetHome, ".kube")),
fmt.Sprintf("cp -f /etc/rancher/k3s/k3s.yaml %s", filepath.Join(targetHome, ".kube", "config")),
fmt.Sprintf("chmod 0600 %s", filepath.Join(targetHome, ".kube", "config")),
fmt.Sprintf("chown -R %s:%s %s", targetUID, targetGID, filepath.Join(targetHome, ".kube")),
}
if _, err := runtime.GetRunner().SudoCmd(strings.Join(cmds, " && "), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "copy k3s kube config failed")
}
userMkdir := "mkdir -p $HOME/.kube"
if _, err := runtime.GetRunner().Cmd(userMkdir, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user mkdir $HOME/.kube failed")
}
userCopyKubeConfig := "cp -f /etc/rancher/k3s/k3s.yaml $HOME/.kube/config"
if _, err := runtime.GetRunner().SudoCmd(userCopyKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user copy /etc/rancher/k3s/k3s.yaml to $HOME/.kube/config failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 $HOME/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod k3s $HOME/.kube/config 0600 failed")
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
}
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
}
return nil
}
@@ -493,59 +463,29 @@ func (s *SyncKubeConfigToWorker) Execute(runtime connector.Runtime) error {
if v, ok := s.PipelineCache.Get(common.ClusterStatus); ok {
cluster := v.(*K3sStatus)
createConfigDirCmd := "mkdir -p /root/.kube"
if _, err := runtime.GetRunner().SudoCmd(createConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "create .kube dir failed")
}
oldServer := "server: https://127.0.0.1:6443"
newServer := fmt.Sprintf("server: https://%s:%d",
s.KubeConf.Cluster.ControlPlaneEndpoint.Domain,
s.KubeConf.Cluster.ControlPlaneEndpoint.Port)
newKubeConfig := strings.Replace(cluster.KubeConfig, oldServer, newServer, -1)
syncKubeConfigForRootCmd := fmt.Sprintf("echo '%s' > %s", newKubeConfig, "/root/.kube/config")
if _, err := runtime.GetRunner().SudoCmd(syncKubeConfigForRootCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config for root failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 /root/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod k3s $HOME/.kube/config failed")
}
userConfigDirCmd := "mkdir -p $HOME/.kube"
if _, err := runtime.GetRunner().Cmd(userConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user mkdir $HOME/.kube failed")
}
syncKubeConfigForUserCmd := fmt.Sprintf("echo '%s' > %s", newKubeConfig, "$HOME/.kube/config")
if _, err := runtime.GetRunner().Cmd(syncKubeConfigForUserCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config for normal user failed")
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
targetHome, targetUID, targetGID, err := utils.ResolveSudoUserHomeAndIDs(runtime)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
return err
}
targetKubeConfigPath := filepath.Join(targetHome, ".kube", "config")
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
cmds := []string{
"mkdir -p /root/.kube",
fmt.Sprintf("echo '%s' > %s", newKubeConfig, "/root/.kube/config"),
"chmod 0600 /root/.kube/config",
fmt.Sprintf("mkdir -p %s", filepath.Join(targetHome, ".kube")),
fmt.Sprintf("echo '%s' > %s", newKubeConfig, targetKubeConfigPath),
fmt.Sprintf("chmod 0600 %s", targetKubeConfigPath),
fmt.Sprintf("chown -R %s:%s %s", targetUID, targetGID, filepath.Join(targetHome, ".kube")),
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s -R $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
if _, err := runtime.GetRunner().SudoCmd(strings.Join(cmds, " && "), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config failed")
}
}
return nil

View File

@@ -23,6 +23,7 @@ import (
"github.com/beclab/Olares/cli/pkg/core/prepare"
"github.com/beclab/Olares/cli/pkg/core/task"
"github.com/beclab/Olares/cli/pkg/manifest"
"github.com/beclab/Olares/cli/pkg/storage"
)
type StatusModule struct {
@@ -243,6 +244,18 @@ func (j *JoinNodesModule) Init() {
Retry: 5,
}
createSharedLibDirForWorker := &task.RemoteTask{
Name: "CreateSharedLibDir(k8s)",
Desc: "Create shared lib directory on worker",
Hosts: j.Runtime.GetHostsByRole(common.Worker),
Prepare: &prepare.PrepareCollection{
&NodeInCluster{Not: true},
new(common.OnlyWorker),
},
Action: new(storage.CreateSharedLibDir),
Parallel: true,
}
joinWorkerNode := &task.RemoteTask{
Name: "JoinWorkerNode(k8s)",
Desc: "Join worker node",
@@ -323,6 +336,7 @@ func (j *JoinNodesModule) Init() {
j.Tasks = []task.Interface{
generateKubeadmConfig,
joinMasterNode,
createSharedLibDirForWorker,
joinWorkerNode,
copyKubeConfig,
removeMasterTaint,

View File

@@ -417,51 +417,23 @@ type CopyKubeConfigForControlPlane struct {
}
func (c *CopyKubeConfigForControlPlane) Execute(runtime connector.Runtime) error {
createConfigDirCmd := "mkdir -p /root/.kube"
getKubeConfigCmd := "cp -f /etc/kubernetes/admin.conf /root/.kube/config"
cmd := strings.Join([]string{createConfigDirCmd, getKubeConfigCmd}, " && ")
if _, err := runtime.GetRunner().SudoCmd(cmd, false, false); err != nil {
targetHome, targetUID, targetGID, err := utils.ResolveSudoUserHomeAndIDs(runtime)
if err != nil {
return err
}
cmds := []string{
"mkdir -p /root/.kube",
"cp -f /etc/kubernetes/admin.conf /root/.kube/config",
"chmod 0600 /root/.kube/config",
fmt.Sprintf("mkdir -p %s", filepath.Join(targetHome, ".kube")),
fmt.Sprintf("cp -f /etc/kubernetes/admin.conf %s", filepath.Join(targetHome, ".kube", "config")),
fmt.Sprintf("chmod 0600 %s", filepath.Join(targetHome, ".kube", "config")),
fmt.Sprintf("chown -R %s:%s %s", targetUID, targetGID, filepath.Join(targetHome, ".kube")),
}
if _, err := runtime.GetRunner().SudoCmd(strings.Join(cmds, " && "), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "copy kube config failed")
}
userMkdir := "mkdir -p $HOME/.kube"
if _, err := runtime.GetRunner().Cmd(userMkdir, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user mkdir $HOME/.kube failed")
}
userCopyKubeConfig := "cp -f /etc/kubernetes/admin.conf $HOME/.kube/config"
if _, err := runtime.GetRunner().SudoCmd(userCopyKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user copy /etc/kubernetes/admin.conf to $HOME/.kube/config failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 $HOME/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod $HOME/.kube/config failed")
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
}
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
}
return nil
}
@@ -521,53 +493,23 @@ func (s *SyncKubeConfigToWorker) Execute(runtime connector.Runtime) error {
if v, ok := s.PipelineCache.Get(common.ClusterStatus); ok {
cluster := v.(*KubernetesStatus)
createConfigDirCmd := "mkdir -p /root/.kube"
if _, err := runtime.GetRunner().SudoCmd(createConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "create .kube dir failed")
}
syncKubeConfigForRootCmd := fmt.Sprintf("echo '%s' > %s", cluster.KubeConfig, "/root/.kube/config")
if _, err := runtime.GetRunner().SudoCmd(syncKubeConfigForRootCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config for root failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 /root/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod $HOME/.kube/config failed")
}
userConfigDirCmd := "mkdir -p $HOME/.kube"
if _, err := runtime.GetRunner().Cmd(userConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user mkdir $HOME/.kube failed")
}
syncKubeConfigForUserCmd := fmt.Sprintf("echo '%s' > %s", cluster.KubeConfig, "$HOME/.kube/config")
if _, err := runtime.GetRunner().Cmd(syncKubeConfigForUserCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config for normal user failed")
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
targetHome, targetUID, targetGID, err := utils.ResolveSudoUserHomeAndIDs(runtime)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
return err
}
targetKubeConfigPath := filepath.Join(targetHome, ".kube", "config")
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
cmds := []string{
"mkdir -p /root/.kube",
fmt.Sprintf("echo '%s' > %s", cluster.KubeConfig, "/root/.kube/config"),
"chmod 0600 /root/.kube/config",
fmt.Sprintf("mkdir -p %s", filepath.Join(targetHome, ".kube")),
fmt.Sprintf("echo '%s' > %s", cluster.KubeConfig, targetKubeConfigPath),
fmt.Sprintf("chmod 0600 %s", targetKubeConfigPath),
fmt.Sprintf("chown -R %s:%s %s", targetUID, targetGID, filepath.Join(targetHome, ".kube")),
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s -R $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
if _, err := runtime.GetRunner().SudoCmd(strings.Join(cmds, " && "), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "sync kube config failed")
}
}
return nil

View File

@@ -52,7 +52,7 @@ func (t *CreateMiniKubeCluster) Execute(runtime connector.Runtime) error {
}
}
logger.Infof("creating minikube cluster %s ...", t.KubeConf.Arg.MinikubeProfile)
cmd = fmt.Sprintf("%s start -p '%s' --kubernetes-version=v1.33.3 --container-runtime=containerd --network-plugin=cni --cni=calico --cpus='4' --memory='8g' --ports=30180:30180,443:443,80:80", minikube, t.KubeConf.Arg.MinikubeProfile)
cmd = fmt.Sprintf("%s start -p '%s' --extra-config=apiserver.service-node-port-range=445-32767 --kubernetes-version=v1.33.3 --container-runtime=containerd --network-plugin=cni --cni=calico --cpus='4' --memory='8g' --ports=30180:30180,443:443,80:80", minikube, t.KubeConf.Arg.MinikubeProfile)
if _, err := runtime.GetRunner().Cmd(cmd, false, true); err != nil {
return errors.Wrap(err, "failed to create minikube cluster")
}

View File

@@ -396,3 +396,17 @@ func (t *DeleteTerminusData) Execute(runtime connector.Runtime) error {
return nil
}
type CreateSharedLibDir struct {
common.KubeAction
}
func (t *CreateSharedLibDir) Execute(runtime connector.Runtime) error {
if runtime.GetSystemInfo().IsDarwin() {
return nil
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("mkdir -p %s && chown 1000:1000 %s", OlaresSharedLibDir, OlaresSharedLibDir), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create shared lib dir")
}
return nil
}

View File

@@ -38,12 +38,6 @@ type InstallOsSystem struct {
}
func (t *InstallOsSystem) Execute(runtime connector.Runtime) error {
if !runtime.GetSystemInfo().IsDarwin() {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("mkdir -p %s && chown 1000:1000 %s", storage.OlaresSharedLibDir, storage.OlaresSharedLibDir), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "failed to create shared lib dir")
}
}
config, err := ctrl.GetConfig()
if err != nil {
return err
@@ -367,6 +361,11 @@ func (m *InstallOsSystemModule) Init() {
Action: &CreateUserEnvConfigMap{},
}
createSharedLibDir := &task.LocalTask{
Name: "CreateSharedLibDir",
Action: &storage.CreateSharedLibDir{},
}
installOsSystem := &task.LocalTask{
Name: "InstallOsSystem",
Action: &InstallOsSystem{},
@@ -399,6 +398,7 @@ func (m *InstallOsSystemModule) Init() {
m.Tasks = []task.Interface{
applySystemEnv,
createUserEnvConfigMap,
createSharedLibDir,
installOsSystem,
createBackupConfigMap,
checkSystemService,

View File

@@ -0,0 +1,32 @@
package upgrade
import (
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_5_20260122 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_5_20260122) Version() *semver.Version {
return semver.MustParse("1.12.3-20260122")
}
func (u upgrader_1_12_5_20260122) UpgradeSystemComponents() []task.Interface {
pre := []task.Interface{
&task.LocalTask{
Name: "UpgradeL4BFLProxy",
Action: &upgradeL4BFLProxy{Tag: "v0.3.10"},
Retry: 3,
Delay: 5 * time.Second,
},
}
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
}
func init() {
registerDailyUpgrader(upgrader_1_12_5_20260122{})
}

View File

@@ -321,3 +321,54 @@ func GetBufIOReaderOfTerminalInput() (*bufio.Reader, error) {
}
return bufio.NewReader(tty), nil
}
// ResolveSudoUserHomeAndIDs resolves the home directory, uid, and gid for the user
// who invoked sudo. If not running under sudo, it falls back to the current user.
// This is useful for commands that need to operate on the invoking user's home
// directory rather than /root when running with sudo.
func ResolveSudoUserHomeAndIDs(runtime connector.Runtime) (home, uid, gid string, err error) {
uid, err = runtime.GetRunner().Cmd("echo ${SUDO_UID:-}", false, false)
if err != nil {
return "", "", "", errors.Wrap(errors.WithStack(err), "get SUDO_UID failed")
}
gid, err = runtime.GetRunner().Cmd("echo ${SUDO_GID:-}", false, false)
if err != nil {
return "", "", "", errors.Wrap(errors.WithStack(err), "get SUDO_GID failed")
}
uid = strings.TrimSpace(uid)
gid = strings.TrimSpace(gid)
if uid == "" {
uid, err = runtime.GetRunner().Cmd("id -u", false, false)
if err != nil {
return "", "", "", errors.Wrap(errors.WithStack(err), "get current uid failed")
}
gid, err = runtime.GetRunner().Cmd("id -g", false, false)
if err != nil {
return "", "", "", errors.Wrap(errors.WithStack(err), "get current gid failed")
}
uid = strings.TrimSpace(uid)
gid = strings.TrimSpace(gid)
}
home, err = runtime.GetRunner().Cmd(fmt.Sprintf(`getent passwd %s | awk -F: 'NR==1{print $6; exit}'`, uid), false, false)
if err != nil {
home = ""
}
home = strings.TrimSpace(home)
if home == "" {
home, _ = runtime.GetRunner().Cmd(fmt.Sprintf(`awk -F: -v uid=%s '$3==uid {print $6; exit}' /etc/passwd 2>/dev/null`, uid), false, false)
home = strings.TrimSpace(home)
}
if home == "" {
home, err = runtime.GetRunner().Cmd("echo $HOME", false, false)
if err != nil {
return "", "", "", errors.Wrap(errors.WithStack(err), "get HOME failed")
}
home = strings.TrimSpace(home)
}
if home == "" {
return "", "", "", errors.New("resolve user home failed")
}
return home, uid, gid, nil
}

View File

@@ -23,7 +23,7 @@ require (
github.com/containerd/containerd v1.7.29
github.com/distribution/distribution/v3 v3.0.0
github.com/dustin/go-humanize v1.0.1
github.com/eball/zeroconf v0.2.4
github.com/eball/zeroconf v0.2.5
github.com/godbus/dbus/v5 v5.1.0
github.com/gofiber/fiber/v2 v2.52.9
github.com/google/gopacket v1.1.19

View File

@@ -87,8 +87,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eball/echo/v4 v4.13.4-patch h1:5w83KQrEqrxhc1BO0BpRBHssC37vFrWualUM27Rt2sg=
github.com/eball/echo/v4 v4.13.4-patch/go.mod h1:ORgy8LWTq8knpwgaz538rAJMri7WgpoAD6H3zYccn84=
github.com/eball/zeroconf v0.2.4 h1:S5nUHLu2zhpA8YuR/Ue/vXPiY6ynPECkpDXjYV+Ckj4=
github.com/eball/zeroconf v0.2.4/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
github.com/eball/zeroconf v0.2.5 h1:RNINVvj8kbm/r4YoqYu/jWD57l5NJmvRUCfbjlIsbJg=
github.com/eball/zeroconf v0.2.5/go.mod h1:eIbIjGYo9sSMaKWLcveHEPRWdyblz7q9ih2R1HnNw5M=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=

View File

@@ -477,7 +477,7 @@ func (d *DSRProxy) regonfigure() error {
klog.Infof("Calico interface: %s", d.calicoInterface.Name)
var err error
d.pcapHandle, err = pcap.OpenLive(d.vipInterface.Name, 65536, false, pcap.BlockForever)
d.pcapHandle, err = pcap.OpenLive(d.vipInterface.Name, 65536, false, time.Millisecond)
if err != nil {
klog.Error("pcap openlive failed:", err)
return err

View File

@@ -84,8 +84,15 @@ func (p *proxyServer) Start() error {
clientIp = h
}
}
if c.IsWebSocket() {
ctx = context.WithValue(ctx, WSKey, true)
swp := c.Request().Header.Get("Sec-WebSocket-Protocol")
authToken := c.Request().Header.Get("X-Authorization")
if len(authToken) == 0 && len(swp) > 0 {
// handle missing auth token for websocket
c.Request().Header.Set("X-Authorization", swp)
}
}
r := c.Request().WithContext(ctx)
if clientIp != "" {
@@ -243,7 +250,7 @@ func (p *proxyServer) customDialContext(d *net.Dialer) func(ctx context.Context,
}
if isWs {
klog.Info("WebSocket connection detected, using upgraded dialer")
klog.Info("WebSocket connection detected, using upgraded dialer, ", addr)
return tlsDial(ctx, d, func(ctx context.Context, network, addr string) (net.Conn, error) {
return proxyDial(ctx, d, network, newAddr)
}, network, addr, &tls.Config{InsecureSkipVerify: true})

View File

@@ -99,7 +99,7 @@ func (s *server) Restart() error {
instanceName = hostname
}
s.server, err = zeroconf.Register(instanceName, s.serviceName, "local.", hostname, s.port, []string{""}, []net.Interface{*iface})
s.server, err = zeroconf.RegisterAll(instanceName, s.serviceName, "local.", hostname, s.port, []string{""}, []net.Interface{*iface}, false, false, false)
if err != nil {
klog.Error("create mdns server error, ", err)
return err

View File

@@ -193,6 +193,7 @@ func (r *ApplicationManagerController) publishStateChangeEvent(am *appv1alpha1.A
RawAppName: am.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: apputils.AppTitle(am.Spec.Config),
Icon: apputils.AppIcon(am.Spec.Config),
Reason: am.Status.Reason,
Message: am.Status.Message,
})

View File

@@ -252,6 +252,7 @@ func (r *EntranceStatusManagerController) updateEntranceStatus(ctx context.Conte
RawAppName: appCopy.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: app.AppTitle(am.Spec.Config),
Icon: app.AppIcon(am.Spec.Config),
SharedEntrances: appCopy.Spec.SharedEntrances,
})
}

View File

@@ -13,6 +13,7 @@ const (
AppMarketSourceKey = constants.AppMarketSourceKey
AppInstallSourceKey = "bytetrade.io/install-source"
AppUninstallAllKey = "bytetrade.io/uninstall-all"
AppDeleteDataKey = "bytetrade.io/delete-data"
AppStopAllKey = "bytetrade.io/stop-all"
AppResumeAllKey = "bytetrade.io/resume-all"
AppImagesKey = "bytetrade.io/images"
@@ -144,7 +145,8 @@ type Image struct {
// UninstallRequest represents a request to uninstall an application.
type UninstallRequest struct {
All bool `json:"all"`
All bool `json:"all"`
DeleteData bool `json:"deleteData"`
}
// StopRequest represents a request to stop an application.

View File

@@ -426,6 +426,11 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
now := metav1.Now()
name, _ := apputils.FmtAppMgrName(am.Spec.AppName, owner, appconfig.Namespace)
app := &v1alpha1.Application{
@@ -443,6 +448,7 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
Owner: owner,
Entrances: appconfig.Entrances,
SharedEntrances: appconfig.SharedEntrances,
Ports: appconfig.Ports,
Icon: appconfig.Icon,
Settings: map[string]string{
"title": am.Annotations[constants.ApplicationTitleLabel],
@@ -477,6 +483,8 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}
}
@@ -738,6 +746,11 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
now := metav1.Now()
app := v1alpha1.Application{
@@ -754,6 +767,7 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
Namespace: am.Spec.AppNamespace,
Owner: am.Spec.AppOwner,
Entrances: appconfig.Entrances,
Ports: appconfig.Ports,
SharedEntrances: appconfig.SharedEntrances,
Icon: appconfig.Icon,
Settings: map[string]string{
@@ -788,6 +802,8 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}

View File

@@ -77,6 +77,7 @@ func (h *Handler) uninstall(req *restful.Request, resp *restful.Response) {
}
am.Annotations[api.AppTokenKey] = token
am.Annotations[api.AppUninstallAllKey] = fmt.Sprintf("%t", request.All)
am.Annotations[api.AppDeleteDataKey] = fmt.Sprintf("%t", request.DeleteData)
err = h.ctrlClient.Update(req.Request.Context(), &am)
if err != nil {
api.HandleError(resp, req, err)

View File

@@ -187,6 +187,11 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
appconfig.SharedEntrances, err = appconfig.GenSharedEntranceURL(req.Request.Context())
if err != nil {
@@ -214,6 +219,7 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
Namespace: am.Spec.AppNamespace,
Owner: am.Spec.AppOwner,
Entrances: appconfig.Entrances,
Ports: appconfig.Ports,
SharedEntrances: appconfig.SharedEntrances,
Icon: appconfig.Icon,
Settings: map[string]string{
@@ -274,6 +280,8 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}
}

View File

@@ -29,8 +29,14 @@ func (h *HelmOps) UninstallAll() error {
if err != nil {
return err
}
appName := fmt.Sprintf("%s-%s", h.app.Namespace, h.app.AppName)
appmgr, err := h.client.AppClient.AppV1alpha1().ApplicationManagers().Get(h.ctx, appName, metav1.GetOptions{})
if err != nil {
return err
}
deleteData := appmgr.Annotations["bytetrade.io/delete-data"] == "true"
appCacheDirs, err := apputils.TryToGetAppdataDirFromDeployment(h.ctx, h.app.Namespace, h.app.AppName, h.app.OwnerName)
appCacheDirs, appDataDirs, err := apputils.TryToGetAppdataDirFromDeployment(h.ctx, h.app.Namespace, h.app.AppName, h.app.OwnerName, deleteData)
if err != nil {
klog.Warningf("get app %s cache dir failed %v", h.app.AppName, err)
}
@@ -48,6 +54,13 @@ func (h *HelmOps) UninstallAll() error {
klog.Errorf("Failed to clear app cache dirs %v err=%v", appCacheDirs, err)
return err
}
if deleteData {
h.ClearData(client, appDataDirs)
if err != nil {
klog.Errorf("Failed to clear app data dirs %v err=%v", appDataDirs, err)
return err
}
}
err = h.DeleteNamespace(client, h.app.Namespace)
if err != nil {
@@ -117,7 +130,7 @@ func (h *HelmOps) ClearCache(client kubernetes.Interface, appCacheDirs []string)
formattedAppCacheDirs := apputils.FormatCacheDirs(appCacheDirs)
for _, n := range nodes.Items {
URL := fmt.Sprintf(constants.AppDataDirURL, n.Name)
URL := fmt.Sprintf(constants.AppCacheDirURL, n.Name)
c.SetHeader("X-Terminus-Node", n.Name)
c.SetHeader("X-Bfl-User", h.app.OwnerName)
res, e := c.R().SetBody(map[string]interface{}{
@@ -137,6 +150,32 @@ func (h *HelmOps) ClearCache(client kubernetes.Interface, appCacheDirs []string)
return nil
}
func (h *HelmOps) ClearData(client kubernetes.Interface, appDataDirs []string) error {
if len(appDataDirs) > 0 {
klog.Infof("clear app data dirs: %v", appDataDirs)
c := resty.New().SetTimeout(2 * time.Second).
SetAuthToken(h.token)
formattedAppDataDirs := apputils.FormatCacheDirs(appDataDirs)
URL := constants.AppDataDirURL
c.SetHeader("X-Bfl-User", h.app.OwnerName)
res, e := c.R().SetBody(map[string]interface{}{
"dirents": formattedAppDataDirs,
}).Delete(URL)
if e != nil {
klog.Errorf("Failed to delete data dir err=%v", e)
return nil
}
if res.StatusCode() != http.StatusOK {
klog.Infof("delete app data failed with: %v", res.String())
}
}
return nil
}
func (h *HelmOps) ClearMiddlewareRequests(middlewareNamespace string) {
// delete middleware requests crd
for _, mt := range middlewareTypes {

View File

@@ -91,7 +91,8 @@ const (
DependencyTypeSystem = "system"
DependencyTypeApp = "application"
AppDataDirURL = "http://files-service.os-framework/api/resources/cache/%s/"
AppCacheDirURL = "http://files-service.os-framework/api/resources/cache/%s/"
AppDataDirURL = "http://files-service.os-framework/api/resources/drive/Data/"
UserSpaceDirKey = "userspace_hostpath"
UserAppDataDirKey = "appcache_hostpath"

View File

@@ -160,6 +160,7 @@ func PublishAppEventToQueue(p utils.EventParams) {
return p.RawAppName
}(),
Title: p.Title,
Icon: p.Icon,
Reason: p.Reason,
Message: p.Message,
SharedEntrances: p.SharedEntrances,

View File

@@ -233,6 +233,7 @@ func (imc *ImageManagerClient) updateProgress(ctx context.Context, am *appv1alph
RawAppName: am.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: apputils.AppTitle(am.Spec.Config),
Icon: apputils.AppIcon(am.Spec.Config),
})
}
klog.Infof("app %s download progress.... %v", am.Spec.AppName, progressStr)

View File

@@ -21,7 +21,6 @@ import (
"github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
"github.com/beclab/Olares/framework/app-service/pkg/generated/clientset/versioned"
"github.com/beclab/Olares/framework/app-service/pkg/users/userspace"
"github.com/beclab/Olares/framework/app-service/pkg/utils"
"github.com/beclab/Olares/framework/app-service/pkg/utils/files"
@@ -553,7 +552,7 @@ func parseDestination(dest string) (string, string, error) {
return alias, tokens[len(tokens)-1], nil
}
func TryToGetAppdataDirFromDeployment(ctx context.Context, namespace, name, owner string) (appdirs []string, err error) {
func TryToGetAppdataDirFromDeployment(ctx context.Context, namespace, name, owner string, appData bool) (appCacheDirs []string, appDataDirs []string, err error) {
userspaceNs := utils.UserspaceName(owner)
config, err := ctrl.GetConfig()
if err != nil {
@@ -567,7 +566,6 @@ func TryToGetAppdataDirFromDeployment(ctx context.Context, namespace, name, owne
if err != nil {
return
}
appName := fmt.Sprintf("%s-%s", namespace, name)
appCachePath := sts.GetAnnotations()["appcache_hostpath"]
if len(appCachePath) == 0 {
err = errors.New("empty appcache_hostpath")
@@ -576,20 +574,23 @@ func TryToGetAppdataDirFromDeployment(ctx context.Context, namespace, name, owne
if !strings.HasSuffix(appCachePath, "/") {
appCachePath += "/"
}
dClient, err := versioned.NewForConfig(config)
if err != nil {
userspacePath := sts.GetAnnotations()["userspace_hostpath"]
if len(userspacePath) == 0 {
err = errors.New("empty userspace_hostpath annotation")
return
}
appCRD, err := dClient.AppV1alpha1().Applications().Get(ctx, appName, metav1.GetOptions{})
if err != nil {
return
appDataPath := filepath.Join(userspacePath, "Data")
if !strings.HasSuffix(appDataPath, "/") {
appDataPath += "/"
}
deploymentName := appCRD.Spec.DeploymentName
deploymentName := name
deployment, err := clientset.AppsV1().Deployments(namespace).
Get(context.Background(), deploymentName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return tryToGetAppdataDirFromSts(ctx, namespace, deploymentName, appCachePath)
return tryToGetAppdataDirFromSts(ctx, namespace, deploymentName, appCachePath, appDataPath)
}
return
}
@@ -601,15 +602,31 @@ func TryToGetAppdataDirFromDeployment(ctx context.Context, namespace, name, owne
if appDirSet.Has(appDir) {
continue
}
appdirs = append(appdirs, appDir)
appCacheDirs = append(appCacheDirs, appDir)
appDirSet.Insert(appDir)
}
}
}
return appdirs, nil
if appData {
appDirSet := sets.NewString()
for _, v := range deployment.Spec.Template.Spec.Volumes {
if v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, appDataPath) && len(v.HostPath.Path) > len(appDataPath) {
appDir := GetFirstSubDir(v.HostPath.Path, appDataPath)
if appDir != "" {
if appDirSet.Has(appDir) {
continue
}
appDataDirs = append(appDataDirs, appDir)
appDirSet.Insert(appDir)
}
}
}
}
return appCacheDirs, appDataDirs, nil
}
func tryToGetAppdataDirFromSts(ctx context.Context, namespace, stsName, baseDir string) (appdirs []string, err error) {
func tryToGetAppdataDirFromSts(ctx context.Context, namespace, stsName, appCacheDir, appDataDir string) (appCacheDirs []string, appDataDirs []string, err error) {
config, err := ctrl.GetConfig()
if err != nil {
return
@@ -626,18 +643,32 @@ func tryToGetAppdataDirFromSts(ctx context.Context, namespace, stsName, baseDir
}
appDirSet := sets.NewString()
for _, v := range sts.Spec.Template.Spec.Volumes {
if v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, baseDir) && len(v.HostPath.Path) > len(baseDir) {
appDir := GetFirstSubDir(v.HostPath.Path, baseDir)
if v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, appCacheDir) && len(v.HostPath.Path) > len(appCacheDir) {
appDir := GetFirstSubDir(v.HostPath.Path, appCacheDir)
if appDir != "" {
if appDirSet.Has(appDir) {
continue
}
appdirs = append(appdirs, appDir)
appCacheDirs = append(appCacheDirs, appDir)
appDirSet.Insert(appDir)
}
}
}
return appdirs, nil
appDirSet = sets.NewString()
for _, v := range sts.Spec.Template.Spec.Volumes {
if v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, appDataDir) && len(v.HostPath.Path) > len(appDataDir) {
appDir := GetFirstSubDir(v.HostPath.Path, appDataDir)
if appDir != "" {
if appDirSet.Has(appDir) {
continue
}
appDataDirs = append(appDataDirs, appDir)
appDirSet.Insert(appDir)
}
}
}
return appCacheDirs, appDataDirs, nil
}
func GetFirstSubDir(fullPath, basePath string) string {
@@ -1080,13 +1111,28 @@ func IsClonedApp(appName, rawAppName string) bool {
}
func AppTitle(config string) string {
var cfg appcfg.ApplicationConfig
err := json.Unmarshal([]byte(config), &cfg)
if err != nil {
cfg := unmarshalApplicationConfig(config)
if cfg == nil {
return ""
}
return cfg.Title
}
func AppIcon(config string) string {
cfg := unmarshalApplicationConfig(config)
if cfg == nil {
return ""
}
return cfg.Icon
}
func unmarshalApplicationConfig(config string) *appcfg.ApplicationConfig {
var cfg appcfg.ApplicationConfig
err := json.Unmarshal([]byte(config), &cfg)
if err != nil {
return nil
}
return &cfg
}
func GetRawAppName(AppName, rawAppName string) string {
if rawAppName == "" {

View File

@@ -25,6 +25,7 @@ type Event struct {
User string `json:"user"`
EntranceStatuses []v1alpha1.EntranceStatus `json:"entranceStatuses,omitempty"`
Title string `json:"title,omitempty"`
Icon string `json:"icon,omitempty"`
Reason string `json:"reason,omitempty"`
Message string `json:"message,omitempty"`
SharedEntrances []v1alpha1.Entrance `json:"sharedEntrances,omitempty"`
@@ -45,6 +46,7 @@ type EventParams struct {
Reason string
Message string
SharedEntrances []v1alpha1.Entrance
Icon string
}
func PublishEvent(nc *nats.Conn, subject string, data interface{}) error {

View File

@@ -431,7 +431,7 @@ spec:
privileged: true
containers:
- name: authelia
image: beclab/auth:0.2.44
image: beclab/auth:0.2.46
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9091

View File

@@ -29,7 +29,7 @@ spec:
name: check-auth
containers:
- name: auth-front
image: beclab/login:v1.6.38
image: beclab/login:v1.8.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80

View File

@@ -266,7 +266,7 @@ spec:
containers:
- name: api
image: beclab/bfl:v0.4.38
image: beclab/bfl:v0.4.40
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 1000
@@ -304,7 +304,7 @@ spec:
- name: BACKUP_SERVER
value: backup-server.os-framework:8082
- name: L4_PROXY_IMAGE_VERSION
value: v0.3.9
value: v0.3.10
- name: L4_PROXY_SERVICE_ACCOUNT
value: os-network-internal
- name: L4_PROXY_NAMESPACE

View File

@@ -281,7 +281,7 @@ func (h *Handler) handleTerminusInfo(req *restful.Request, resp *restful.Respons
tInfo.TailScaleEnable = denyAll == 1
}
tInfo.LoginBackground = userOp.GetLoginBackground(user)
tInfo.LoginBackground, tInfo.Style = userOp.GetLoginBackground(user)
tInfo.Avatar = userOp.GetAvatar(user)
tInfo.UserDID = userOp.GetUserDID(user)
@@ -347,7 +347,7 @@ func (h *Handler) handleOlaresInfo(req *restful.Request, resp *restful.Response)
tInfo.TailScaleEnable = denyAll == 1
}
tInfo.LoginBackground = userOp.GetLoginBackground(user)
tInfo.LoginBackground, tInfo.Style = userOp.GetLoginBackground(user)
tInfo.Avatar = userOp.GetAvatar(user)
tInfo.UserDID = userOp.GetUserDID(user)

View File

@@ -40,6 +40,7 @@ type TerminusInfo struct {
UserDID string `json:"did"`
ReverseProxy string `json:"reverseProxy"`
Terminusd string `json:"terminusd"`
Style string `json:"style"`
}
type OlaresInfo struct {
@@ -53,6 +54,7 @@ type OlaresInfo struct {
ID string `json:"id"`
UserDID string `json:"did"`
Olaresd string `json:"olaresd"`
Style string `json:"style"`
}
type MyAppsParam struct {

View File

@@ -127,13 +127,18 @@ func (o *UserOperator) GetReverseProxyType() (string, error) {
return o.GetUserAnnotation(user, constants.UserAnnotationReverseProxyType), nil
}
func (o *UserOperator) GetLoginBackground(user *iamV1alpha2.User) string {
func (o *UserOperator) GetLoginBackground(user *iamV1alpha2.User) (string, string) {
b := o.GetUserAnnotation(user, constants.UserLoginBackground)
s := o.GetUserAnnotation(user, constants.UserLoginBackgroundStyle)
if b == "" {
return "/bg/0.jpg"
b = "/bg/0.jpg"
}
return b
if s == "" {
s = "fill"
}
return b, s
}
func (o *UserOperator) GetAvatar(user *iamV1alpha2.User) string {

View File

@@ -612,6 +612,7 @@ func (h *Handler) handleUpdateLocale(req *restful.Request, resp *restful.Respons
func (h *Handler) handlerUpdateUserLoginBackground(req *restful.Request, resp *restful.Response) {
var background struct {
Background string `json:"background"`
Style string `json:"style"`
}
err := req.ReadEntity(&background)
@@ -636,6 +637,7 @@ func (h *Handler) handlerUpdateUserLoginBackground(req *restful.Request, resp *r
err = userOp.UpdateUser(user, []func(*iamV1alpha2.User){
func(u *iamV1alpha2.User) {
u.Annotations[constants.UserLoginBackground] = background.Background
u.Annotations[constants.UserLoginBackgroundStyle] = background.Style
},
})

View File

@@ -240,6 +240,7 @@ func (c *Client) getAppListFromData(apps []map[string]interface{}) ([]*AppInfo,
res = append(res, &AppInfo{
ID: genAppID(appSpec),
Name: stringOrEmpty(appSpec["name"]),
RawAppName: stringOrEmpty(appSpec["rawAppName"]),
Namespace: stringOrEmpty(appSpec["namespace"]),
DeploymentName: stringOrEmpty(appSpec["deployment"]),
Owner: stringOrEmpty(appSpec["owner"]),

View File

@@ -11,6 +11,7 @@ import (
type AppInfo struct {
ID string `json:"id"`
Name string `json:"name"`
RawAppName string `json:"rawAppName"`
Namespace string `json:"namespace"`
DeploymentName string `json:"deployment"`
Owner string `json:"owner"`

View File

@@ -175,7 +175,8 @@ var (
UserAvatar = fmt.Sprintf("%s/avatar", AnnotationGroup)
UserLoginBackground = fmt.Sprintf("%s/login-background", AnnotationGroup)
UserLoginBackground = fmt.Sprintf("%s/login-background", AnnotationGroup)
UserLoginBackgroundStyle = fmt.Sprintf("%s/login-background-style", AnnotationGroup)
)
var (

View File

@@ -210,7 +210,7 @@ spec:
command:
- /samba_share
- name: files
image: beclab/files-server:v0.2.146
image: beclab/files-server:v0.2.148
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true
@@ -314,8 +314,8 @@ spec:
value: os.users
- name: NATS_SUBJECT_SYSTEM_GROUPS
value: os.groups
- name: RESERVED_SPACE
value: '1000'
- name: RESERVED_SPACE_PERCENT
value: '1.00'
- name: OLARES_VERSION
value: '1.12'
- name: FILE_CACHE_DIR

View File

@@ -3,5 +3,5 @@ target: prebuilt
output:
containers:
-
name: beclab/l4-bfl-proxy:v0.3.9
name: beclab/l4-bfl-proxy:v0.3.10
# must have blank new line

View File

@@ -906,6 +906,9 @@ func (s *Server) generateStreamServers() ([]StreamServer, error) {
if bflHost == "" {
return nil, fmt.Errorf("can not find bfl service for user=%s", app.Spec.Owner)
}
if p.ExposePort < 1 || p.ExposePort > 65535 {
continue
}
server := StreamServer{
Protocol: p.Protocol,
Port: p.ExposePort,

View File

@@ -240,7 +240,7 @@ spec:
value: os_framework_search3
containers:
- name: search3
image: beclab/search3:v0.1.2
image: beclab/search3:v0.1.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@@ -301,7 +301,7 @@ spec:
priorityClassName: "system-cluster-critical"
containers:
- name: search3monitor
image: beclab/search3monitor:v0.1.2
image: beclab/search3monitor:v0.1.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8081

View File

@@ -64,7 +64,7 @@ spec:
operator: Exists
containers:
- name: search3-validation
image: beclab/search3validation:v0.1.2
image: beclab/search3validation:v0.1.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8443

View File

@@ -4,7 +4,7 @@ nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
imagePullSecrets: []
version: "v2.6.7"
version: "v2.6.9"
# Nvidia GPU Parameters
resourceName: "nvidia.com/gpu"

View File

@@ -3,7 +3,7 @@ target: prebuilt
output:
containers:
-
name: beclab/hami:v2.6.7
name: beclab/hami:v2.6.9
-
name: beclab/hami-webui-fe-oss:v1.0.8
-

View File

@@ -5,6 +5,6 @@ output:
-
name: beclab/apecloud-kubeblocks-tools:1.0.1
-
name: beclab/apecloud-kubeblocks:1.0.1
name: beclab/kubeblocks:1.0.1-ext1
-
name: beclab/kubeblock-addon-charts:v1.0.1-ext2

View File

@@ -98,7 +98,7 @@ spec:
capabilities:
drop:
- ALL
image: beclab/apecloud-kubeblocks:1.0.1
image: beclab/kubeblocks:1.0.1-ext1
imagePullPolicy: IfNotPresent
ports:
- name: webhook-server

View File

@@ -57,7 +57,7 @@ spec:
path: '{{ $dbbackup_rootpath }}/pg_backup'
containers:
- name: operator-api
image: beclab/middleware-operator:0.2.30
image: beclab/middleware-operator:0.2.31
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080

View File

@@ -198,6 +198,9 @@ func (c *controller) processNextWorkItem() bool {
return nil
}
deleteRetryCount := 0
const maxDeleteRetries = 10
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
for e := c.syncHandler(eobj); e != nil; e = c.syncHandler(eobj) {
@@ -208,6 +211,12 @@ func (c *controller) processNextWorkItem() bool {
return fmt.Errorf("error syncing '%v': %s, requeuing", eobj, e.Error())
}
deleteRetryCount++
if deleteRetryCount >= maxDeleteRetries {
klog.Errorf("error syncing '%v': %s, reached max delete retries, skipping", eobj, e.Error())
break
}
// cause delete action cannot be requeued at the end,
klog.Errorf("error syncing '%v': %s, retry after 1 second", eobj, e.Error())
time.Sleep(time.Second)
@@ -216,6 +225,12 @@ func (c *controller) processNextWorkItem() bool {
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
if deleteRetryCount >= maxDeleteRetries {
klog.Warningf("Skipped syncing '%v' after %d delete retries", eobj, deleteRetryCount)
return nil
}
klog.Infof("Successfully synced '%v'", eobj)
return nil
}(obj)

View File

@@ -37,8 +37,8 @@ func RequireHeader() func(c *fiber.Ctx) error {
klog.Infof("ws-client conn: %s, accessPublic: %v, token: %s, user: %s , header: %+v", connId, accessPublic, token, userName, headers)
var secWebsocketProtocol, ok = headers[constants.WsHeaderSecWebsocketProtocol]
if ok {
c.Set(constants.WsHeaderSecWebsocketProtocol, secWebsocketProtocol)
if ok && len(secWebsocketProtocol) > 0 {
c.Set(constants.WsHeaderSecWebsocketProtocol, secWebsocketProtocol[0])
}
c.Locals(constants.WsLocalAccessPublic, accessPublic)