Compare commits

...

12 Commits

Author SHA1 Message Date
dkeven
5489eb1ffa fix(appservice): avoid race condition between upgrade & applyenv (#2593) 2026-03-02 16:10:23 +08:00
eball
6bc863a39f Merge branch 'main' into module-appservice
* main:
  docs: add troubleshooting guide for missing apps in Market (#2574)
  feat(olares-app): update olares-app version to v1.9.6 (#2573)
  docs: update installation method of drivers on windows (#2566)
  docs: update the initialization steps for OpenClaw tutorial (#2567)
  docs: add troubleshooting guide for memory not released after stopping apps (#2565)
  docs: updates for releasing resources and uninstalling shared apps (#2568)
  feat(olares-app): update olares-app version to v1.9.5 (#2563)
  docs: update method for installing drivers on windows (#2564)
  docs: fix space nav display, extract use-case/developer sidebars, add note in space docs (#2562)
  docs: update custom domain tutorial screenshots and align copy with latest UI (#2559)
  cli: upgrade l4-bfl-proxy to v0.3.11 (#2557)
  l4-bfl-proxy: skip nginx reload if configuration has not changed (#2556)
  docs: update content related to reference app (#2530)
  Use the Robot font to match the theme of the rest of the applications
  bfl: add sync urls to master node (#2540)
  appservice: stop app if it is hami cause unschedule no wait (#2533)
2026-02-27 21:40:10 +08:00
eball
369a150fc1 chore: update app-service image tag to 0.5.4 2026-02-27 21:38:03 +08:00
eball
e5e2d34eec feat: support injecting gpu memory and container selection (#2580)
* refactor: unify GPU resource handling and remove hardcoded values

* fix: handle CPU type selection in GPU resource management

* feat: enhance GPU resource management with memory limits and chip type handling

* feat: update GPU resource patching to support selective container injection

* feat: adjust GPU memory format in deployment patching for compatibility

* fix: revert unchanged file

* Revert "fix: revert unchanged file"

This reverts commit 5f48862758.

* fix: revert unchanged file
2026-02-27 21:21:30 +08:00
hys
7048b58211 update appservice image tag to 0.5.3 2026-02-12 21:01:36 +08:00
hysyeah
40e8c87159 fix: stop app if it is hami cause unschedule no wait (#2531)
* fix: stop app if it is hami cause unschedule

* ingore param from req if size=0
2026-02-12 21:01:36 +08:00
hys
3c7440dfd3 fix: set amd apu/gpu limit key to amd.com/gpu 2026-02-12 21:01:36 +08:00
hys
360d30ce37 fix: add spec ports 2026-02-12 21:00:07 +08:00
hys
b9dc5db688 fix: check k8s request before into installing state 2026-02-12 21:00:07 +08:00
hys
1aad7ac347 fix: v2 app stop 2026-02-12 21:00:07 +08:00
hys
84e4cfdf75 fix: helm upgrade do not use atomic param and allow upgrade failed release 2026-02-12 21:00:07 +08:00
hys
7738eea929 fix: failed release upgrade 2026-02-12 21:00:07 +08:00
8 changed files with 105 additions and 31 deletions

View File

@@ -170,7 +170,7 @@ spec:
priorityClassName: "system-cluster-critical"
containers:
- name: app-service
image: beclab/app-service:0.5.3
image: beclab/app-service:0.5.4
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6755

View File

@@ -342,6 +342,19 @@ func (h *Handler) appUpgrade(req *restful.Request, resp *restful.Response) {
return
}
// hold env batch lease during upgrade kickoff
// to avoid AppEnv controller racing and switching app manager op/state to ApplyEnv in this window
userNamespace := utils.UserspaceName(owner)
releaseLease, err := h.acquireUserEnvBatchLease(req.Request.Context(), userNamespace)
if err != nil {
klog.Errorf("Failed to acquire user env batch lease err=%v", err)
api.HandleError(resp, req, err)
return
}
if releaseLease != nil {
defer releaseLease()
}
err = helper.applyAppEnv(req.Request.Context())
if err != nil {
klog.Errorf("Failed to apply app env err=%v", err)

View File

@@ -32,11 +32,13 @@ import (
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
@@ -303,8 +305,23 @@ func (h *Handler) gpuLimitMutate(ctx context.Context, req *admissionv1.Admission
if gpuRequired == nil {
return resp
}
if annotations[applicationGpuInjectKey] != "true" {
var injectContainer []string
injectAll := false
if injectValue, ok := annotations[applicationGpuInjectKey]; !ok || injectValue == "false" || injectValue == "" {
return resp
} else {
if injectValue != "true" {
injectToken := strings.Split(injectValue, ",")
for _, token := range injectToken {
c := strings.TrimSpace(token)
if c != "" {
injectContainer = append(injectContainer, c)
}
}
} else {
injectAll = true
}
}
GPUType := appcfg.GetSelectedGpuTypeValue()
@@ -321,27 +338,43 @@ func (h *Handler) gpuLimitMutate(ctx context.Context, req *admissionv1.Admission
},
}
patchBytes, err := webhook.CreatePatchForDeployment(tpl, h.getGPUResourceTypeKey(GPUType), envs)
gpuRequiredValue := gpuRequired.Value() / 1024 / 1024 // HAMi gpu memory format
hamiFormatGpuRequired := resource.NewQuantity(gpuRequiredValue, resource.DecimalSI)
patchBytes, err := webhook.CreatePatchForDeployment(
tpl,
injectAll,
injectContainer,
h.getGPUResourceTypeKey(GPUType),
ptr.To(hamiFormatGpuRequired.String()),
envs,
)
if err != nil {
klog.Errorf("create patch error %v", err)
return h.sidecarWebhook.AdmissionError(req.UID, err)
}
klog.Info("patchBytes:", string(patchBytes))
h.sidecarWebhook.PatchAdmissionResponse(resp, patchBytes)
if len(patchBytes) > 0 {
h.sidecarWebhook.PatchAdmissionResponse(resp, patchBytes)
}
return resp
}
// FIXME: should not hardcode
func (h *Handler) getGPUResourceTypeKey(gpuType string) string {
switch gpuType {
case utils.NvidiaCardType:
return constants.NvidiaGPU
case utils.GB10ChipType:
return constants.NvidiaGB10GPU
return constants.NvidiaGPU
case utils.AmdApuCardType:
return constants.AMDGPU
case utils.AmdGpuCardType:
return constants.AMDGPU
case utils.StrixHaloChipType:
return constants.AMDGPU
case utils.CPUType:
klog.Info("CPU type is selected, no GPU resource will be injected")
return ""
default:
return ""
}

View File

@@ -340,8 +340,8 @@ func GetClusterResource(kubeConfig *rest.Config, token string) (*prometheus.Clus
arches.Insert(n.Labels["kubernetes.io/arch"])
if quantity, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
total += quantity.AsApproximateFloat64()
// } else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
// total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.AMDAPU]; ok {
total += quantity.AsApproximateFloat64()
}

View File

@@ -84,10 +84,11 @@ const (
EnvGPUType = "GPU_TYPE"
// gpu resource keys
NvidiaGPU = "nvidia.com/gpu"
NvidiaGB10GPU = "nvidia.com/gb10"
AMDAPU = "amd.com/apu"
AMDGPU = "amd.com/gpu"
NvidiaGPU = "nvidia.com/gpu"
NvidiaGPUMem = "nvidia.com/gpumem"
// NvidiaGB10GPU = "nvidia.com/gb10"
AMDAPU = "amd.com/apu"
AMDGPU = "amd.com/gpu"
AuthorizationLevelOfPublic = "public"
AuthorizationLevelOfPrivate = "private"

View File

@@ -236,7 +236,8 @@ func CheckAppRequirement(token string, appConfig *appcfg.ApplicationConfig, op v
}
// only support nvidia gpu managment by HAMi for now
if appConfig.Requirement.GPU != nil && appConfig.GetSelectedGpuTypeValue() == utils.NvidiaCardType {
if appConfig.Requirement.GPU != nil &&
(appConfig.GetSelectedGpuTypeValue() == utils.NvidiaCardType || appConfig.GetSelectedGpuTypeValue() == utils.GB10ChipType) {
if !appConfig.Requirement.GPU.IsZero() && metrics.GPU.Total <= 0 {
return constants.GPU, constants.SystemGPUNotAvailable, fmt.Errorf(constants.SystemGPUNotAvailableMessage, op)
@@ -400,9 +401,9 @@ func GetClusterResource(token string) (*prometheus.ClusterMetrics, []string, err
arches.Insert(n.Labels["kubernetes.io/arch"])
if quantity, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.AMDAPU]; ok {
// } else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
// total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.AMDGPU]; ok {
total += quantity.AsApproximateFloat64()
}
}

View File

@@ -5,8 +5,10 @@ const (
)
const (
NvidiaCardType = "nvidia" // handling by HAMi
AmdGpuCardType = "amd-gpu" //
AmdApuCardType = "amd-apu" // AMD APU with integrated GPU , AI Max 395 etc.
GB10ChipType = "nvidia-gb10" // NVIDIA GB10 Superchip & unified system memory
CPUType = "cpu" // force to use CPU, no GPU
NvidiaCardType = "nvidia" // handling by HAMi
AmdGpuCardType = "amd-gpu" //
AmdApuCardType = "amd-apu" // AMD APU with integrated GPU , AI Max 395 etc.
GB10ChipType = "nvidia-gb10" // NVIDIA GB10 Superchip & unified system memory
StrixHaloChipType = "strix-halo" // AMD Strix Halo GPU & unified system memory
)

View File

@@ -543,21 +543,23 @@ type EnvKeyValue struct {
}
// CreatePatchForDeployment add gpu env for deployment and returns patch bytes.
func CreatePatchForDeployment(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues []EnvKeyValue) ([]byte, error) {
patches, err := addResourceLimits(tpl, typeKey, envKeyValues)
func CreatePatchForDeployment(tpl *corev1.PodTemplateSpec, injectAll bool, injectContainer []string, gpuTypeKey string, gpumem *string, envKeyValues []EnvKeyValue) ([]byte, error) {
patches, err := addGpuResourceLimits(tpl, injectAll, injectContainer, gpuTypeKey, gpumem)
if err != nil {
return []byte{}, err
}
patches = append(patches, addEnvToPatch(tpl, envKeyValues)...)
return json.Marshal(patches)
}
func addResourceLimits(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues []EnvKeyValue) (patch []patchOp, err error) {
func addGpuResourceLimits(tpl *corev1.PodTemplateSpec, injectAll bool, injectContainer []string, typeKey string, gpumem *string) (patch []patchOp, err error) {
if typeKey == "" {
klog.Warning("No gpu type selected, skip adding resource limits")
return patch, nil
}
if typeKey == constants.NvidiaGPU || typeKey == constants.NvidiaGB10GPU {
// add runtime class for nvidia gpu, HAMi runtime class is "nvidia"
if typeKey == constants.NvidiaGPU {
if tpl.Spec.RuntimeClassName != nil {
patch = append(patch, patchOp{
Op: constants.PatchOpReplace,
@@ -575,21 +577,31 @@ func addResourceLimits(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues
for i := range tpl.Spec.Containers {
container := tpl.Spec.Containers[i]
if !injectAll && !funk.Contains(injectContainer, container.Name) {
continue
}
if len(container.Resources.Limits) == 0 {
limitsValues := map[string]interface{}{
typeKey: "1",
}
if gpumem != nil && *gpumem != "" && typeKey == constants.NvidiaGPU {
limitsValues[constants.NvidiaGPUMem] = *gpumem
}
patch = append(patch, patchOp{
Op: constants.PatchOpAdd,
Path: fmt.Sprintf(resourcePath, i),
Value: map[string]interface{}{
typeKey: "1",
},
Op: constants.PatchOpAdd,
Path: fmt.Sprintf(resourcePath, i),
Value: limitsValues,
})
} else {
t := make(map[string]map[string]string)
t["limits"] = map[string]string{}
for k, v := range container.Resources.Limits {
if k.String() == constants.NvidiaGPU ||
k.String() == constants.NvidiaGB10GPU ||
k.String() == constants.NvidiaGPUMem ||
k.String() == constants.AMDAPU {
// unset all previous gpu limits
continue
@@ -597,12 +609,24 @@ func addResourceLimits(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues
t["limits"][k.String()] = v.String()
}
t["limits"][typeKey] = "1"
if gpumem != nil && *gpumem != "" && typeKey == constants.NvidiaGPU {
t["limits"][constants.NvidiaGPUMem] = *gpumem
}
patch = append(patch, patchOp{
Op: constants.PatchOpReplace,
Path: fmt.Sprintf(resourcePath, i),
Value: t["limits"],
})
}
}
return patch, nil
}
func addEnvToPatch(tpl *corev1.PodTemplateSpec, envKeyValues []EnvKeyValue) (patch []patchOp) {
for i := range tpl.Spec.Containers {
container := tpl.Spec.Containers[i]
envNames := make([]string, 0)
if len(container.Env) == 0 {
value := make([]map[string]string, 0)
@@ -643,7 +667,7 @@ func addResourceLimits(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues
}
return patch, nil
return patch
}
func genPatchesForEnv(op string, containerIdx, envIdx int, name, value string) (patch []patchOp) {