Compare commits

...

11 Commits

Author SHA1 Message Date
lovehunter9
f823625c8e feat: files-server delete all for search3 2025-06-06 17:19:34 +08:00
lovehunter9
1a06378e50 feat: files-server batch_delete 2025-06-06 14:18:35 +08:00
aby913
50f6b127ac backup-server: improve message (#1405) 2025-06-06 00:29:11 +08:00
hysyeah
df23dc64e3 app-service,bfl: fix upgrade failed bug,add appid to pod label;fix call analytics-server (#1404)
* app-service,bfl: fix upgrade failed bug,add appid to pod label;fix call analytics-server

* fix(user-service): add nats env

---------

Co-authored-by: qq815776412 <815776412@qq.com>
2025-06-06 00:28:40 +08:00
lovehunter9
f704cf1846 fix: files-server bug when listing external if any smb folder is stated as host is down (#1403) 2025-06-06 00:27:40 +08:00
simon
66d0eccb2f feat(knowledge): websocket update (#1402)
websocket
2025-06-06 00:27:09 +08:00
aby913
a226fd99b8 refactor: CLI code refactor (#1401)
* refactor: remove unused account files

* refactor: remove unused socat task

* refactor: remove unused flex conntrack task

* refactor: remove unused cri download binaries module

* refactor: remove hook demo

* refactor: remove unused repositoryOnline, repository modules

* refactor: remove unused os rollback

* refactor: remove unused clear node os module

* refactor: remove unused backup dir

* refactor: remove unused local repo manager

* refactor: remove unused cluster pre check module and tasks

* refactor: remove unused cri migrate module

* refactor: remove unused k3s uninstall module and tasks

* refactor: remove unused k8s node delete module

* refactor: remove unused phase startup

* refactor: remove unused storage minio operator module

* refactor: remove unused ks modules

* refactor: remove unused ks plugins cache, redis tasks

* refactor: remove unused ks plugins snapshot controller module

* refactor: remove unused ks plugins monitor notification module

* refactor: remove unused plugins kata and nfd

* refactor: remove unused scripts

* refactor: remove unused filesystem module

* refactor: remove unused certs modules

* refactor: remove unused bootstrap confirm modules

* refactor: remove unused images tasks

* refactor: remove unused k8s prepares

* refactor: remove unused installer module

* refactor: remove unused registry modules
2025-06-06 00:26:37 +08:00
huaiyuan
60b823d9db desktop: update version to v1.3.70 (#1400)
fix(desktop): update version to v1.3.70
2025-06-06 00:24:33 +08:00
wiy
7b9be6cce7 feat(vault-server&user-service): update user server & vault-server support websocket (#1408)
feat(vault-server&settings&user-service): update user server & vault-server support websocket
2025-06-06 00:23:52 +08:00
eball
b99fc51cc2 gpu: fix gpu scheduler bugs (#1407) 2025-06-06 00:19:38 +08:00
salt
cdf70c5c58 fix: fix resources conflict for search3monitor (#1406)
Co-authored-by: Ubuntu <ubuntu@localhost.localdomain>
2025-06-05 22:59:00 +08:00
89 changed files with 74 additions and 9703 deletions

View File

@@ -376,7 +376,7 @@ spec:
- mountPath: /www
name: www-dir
- name: settings-init
image: beclab/settings:v1.3.69
image: beclab/settings:v1.3.71
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -471,7 +471,7 @@ spec:
- mountPath: /www
name: www-dir
- name: edge-desktop-init
image: beclab/desktop:v0.2.59
image: beclab/desktop:v1.3.70
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -626,7 +626,7 @@ spec:
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
- name: user-service
image: beclab/user-service:v0.0.8
image: beclab/user-service:v0.0.10
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
@@ -669,6 +669,19 @@ spec:
name: user-service-secrets
- name: DATABASE_URL
value: postgres://user_service_{{ .Values.bfl.username }}:$(DATABASE_PASSWORD)@citus-master-svc.user-system-{{ .Values.bfl.username }}/user_space_{{ .Values.bfl.username }}_user_service?sslmode=disable
- name: NATS_HOST
value: nats.user-system-{{ .Values.bfl.username }}
- name: NATS_PORT
value: '4222'
- name: NATS_USERNAME_USERSERVICE
value: user-service-{{ .Values.bfl.username }}
- name: NATS_PASSWORD_USERSESRVICE
valueFrom:
secretKeyRef:
key: nats_password
name: user-service-nats-secret
- name: NATS_SUBJECT_USER_APPS
value: terminus.user.*.{{ .Values.bfl.username}}
- name: drive-server
image: beclab/drive:v0.0.72
imagePullPolicy: IfNotPresent
@@ -3401,7 +3414,7 @@ spec:
- appName: files-server
sub: allow
pub: allow
name: "files.*"
name: "*."
permission:
pub: allow
sub: allow
@@ -3452,6 +3465,9 @@ spec:
- appName: knowledge
sub: allow
pub: allow
- appName: download
sub: allow
pub: allow
name: "knowledge.*"
permission:
sub: allow

View File

@@ -18,9 +18,7 @@ require (
github.com/Masterminds/semver/v3 v3.3.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/containerd/containerd v1.7.27
github.com/containers/image/v5 v5.32.2
github.com/dominodatalab/os-release v0.0.0-20190522011736-bcdb4a3e3c2f
github.com/estesp/manifest-tool/v2 v2.1.6
github.com/go-playground/validator/v10 v10.22.0
github.com/jmoiron/sqlx v1.4.0
github.com/joho/godotenv v1.5.1
@@ -29,7 +27,6 @@ require (
github.com/mattn/go-sqlite3 v1.14.22
github.com/mitchellh/mapstructure v1.5.0
github.com/modood/table v0.0.0-20220527013332-8d47e76dad33
github.com/opencontainers/image-spec v1.1.1
github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.6
@@ -70,6 +67,7 @@ require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bshuster-repo/logrus-logstash-hook v1.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
@@ -84,7 +82,6 @@ require (
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
@@ -150,6 +147,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect

2253
cli/go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
package account

View File

@@ -1 +0,0 @@
package account

View File

@@ -1,74 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package binaries
import (
"fmt"
"os/exec"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/cache"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/files"
"github.com/pkg/errors"
)
// CriDownloadHTTP defines the kubernetes' binaries that need to be downloaded in advance and downloads them.
func CriDownloadHTTP(kubeConf *common.KubeConf, path, arch, osType, osVersion, osPlatformFamily string, pipelineCache *cache.Cache) error {
binaries := []*files.KubeBinary{}
switch kubeConf.Arg.Type {
case common.Docker:
docker := files.NewKubeBinary("docker", arch, osType, osVersion, osPlatformFamily, kubekeyapiv1alpha2.DefaultDockerVersion, path, "")
binaries = append(binaries, docker)
case common.Containerd:
containerd := files.NewKubeBinary("containerd", arch, osType, osVersion, osPlatformFamily, kubekeyapiv1alpha2.DefaultContainerdVersion, path, "")
runc := files.NewKubeBinary("runc", arch, osType, osVersion, osPlatformFamily, kubekeyapiv1alpha2.DefaultRuncVersion, path, "")
crictl := files.NewKubeBinary("crictl", arch, osType, osVersion, osPlatformFamily, kubekeyapiv1alpha2.DefaultCrictlVersion, path, "")
binaries = append(binaries, containerd, runc, crictl)
default:
}
binariesMap := make(map[string]*files.KubeBinary)
for _, binary := range binaries {
if err := binary.CreateBaseDir(); err != nil {
return errors.Wrapf(errors.WithStack(err), "create file %s base dir failed", binary.FileName)
}
logger.Infof("%s downloading %s %s %s ...", common.LocalHost, arch, binary.ID, binary.Version)
binariesMap[binary.ID] = binary
if util.IsExist(binary.Path()) {
// download it again if it's incorrect
if err := binary.SHA256Check(); err != nil {
p := binary.Path()
_ = exec.Command("/bin/sh", "-c", fmt.Sprintf("rm -f %s", p)).Run()
} else {
logger.Infof("%s %s is existed", common.LocalHost, binary.ID)
continue
}
}
if err := binary.Download(); err != nil {
return fmt.Errorf("Failed to download %s binary: %s error: %w ", binary.ID, binary.Url, err)
}
}
pipelineCache.Set(common.KubeBinaries+"-"+arch, binariesMap)
return nil
}

View File

@@ -1,55 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package binaries
import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/task"
)
type CriBinariesModule struct {
common.KubeModule
}
func (i *CriBinariesModule) Init() {
i.Name = "CriBinariesModule"
i.Desc = "Download Cri package"
switch i.KubeConf.Arg.Type {
case common.Docker:
i.Tasks = CriBinaries(i)
case common.Containerd:
i.Tasks = CriBinaries(i)
default:
}
}
func CriBinaries(p *CriBinariesModule) []task.Interface {
download := &task.LocalTask{
Name: "DownloadCriPackage",
Desc: "Download Cri package",
Action: new(CriDownload),
}
p.Tasks = []task.Interface{
download,
}
return p.Tasks
}
// TODO: install helm

View File

@@ -6,45 +6,6 @@ import (
"github.com/pkg/errors"
)
func GetSocat(basePath string, manifestMap manifest.InstallationManifest) (string, string, error) {
socat, err := manifestMap.Get("socat")
if err != nil {
return "", "", err
}
path := socat.FilePath(basePath)
if !util.IsExist(path) {
return "", "", errors.Errorf("socat not found in %s", path)
}
return basePath, socat.Filename, nil
}
func GetFlex(basePath string, manifestMap manifest.InstallationManifest) (string, string, error) {
flex, err := manifestMap.Get("flex")
if err != nil {
return "", "", err
}
path := flex.FilePath(basePath)
if !util.IsExist(path) {
return "", "", errors.Errorf("flex not found in %s", path)
}
return basePath, flex.Filename, nil
}
func GetConntrack(basePath string, manifestMap manifest.InstallationManifest) (string, string, error) {
conntrack, err := manifestMap.Get("conntrack")
if err != nil {
return "", "", err
}
path := conntrack.FilePath(basePath)
if !util.IsExist(path) {
return "", "", errors.Errorf("conntrack not found in %s", path)
}
return basePath, conntrack.Filename, nil
}
func GetUbutun24AppArmor(basePath string, manifestMap manifest.InstallationManifest) (string, error) {
apparmor, err := manifestMap.Get("apparmor")
if err != nil {

View File

@@ -23,7 +23,6 @@ import (
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/manifest"
"github.com/pkg/errors"
)
type InstallAppArmorTask struct {
@@ -44,36 +43,3 @@ func (t *InstallAppArmorTask) Execute(runtime connector.Runtime) error {
return nil
}
type CriDownload struct {
common.KubeAction
manifest.ManifestAction
}
func (d *CriDownload) Execute(runtime connector.Runtime) error {
cfg := d.KubeConf.Cluster
archMap := make(map[string]bool)
for _, host := range cfg.Hosts {
switch host.Arch {
case "amd64":
archMap["amd64"] = true
case "arm64":
archMap["arm64"] = true
default:
return errors.New(fmt.Sprintf("Unsupported architecture: %s", host.Arch))
}
}
var systemInfo = runtime.GetSystemInfo()
var osType = systemInfo.GetOsType()
var osPlatformFamily = systemInfo.GetOsPlatformFamily()
var osVersion = systemInfo.GetOsVersion()
for arch := range archMap {
if err := CriDownloadHTTP(d.KubeConf, runtime.GetWorkDir(), arch, osType, osVersion, osPlatformFamily, d.PipelineCache); err != nil {
return err
}
}
return nil
}
// TODO: install helm

View File

@@ -18,7 +18,6 @@ package confirm
import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/module"
"bytetrade.io/web3os/installer/pkg/core/task"
)
@@ -45,105 +44,3 @@ func (i *InstallConfirmModule) Init() {
display,
}
}
type DeleteClusterConfirmModule struct {
common.KubeModule
}
func (d *DeleteClusterConfirmModule) Init() {
d.Name = "DeleteClusterConfirmModule"
d.Desc = "Display delete confirmation form"
display := &task.LocalTask{
Name: "ConfirmForm",
Desc: "Display confirmation form",
Action: &DeleteConfirm{Content: "cluster"},
}
d.Tasks = []task.Interface{
display,
}
}
type DeleteNodeConfirmModule struct {
common.KubeModule
}
func (d *DeleteNodeConfirmModule) Init() {
d.Name = "DeleteNodeConfirmModule"
d.Desc = "Display delete node confirmation form"
display := &task.LocalTask{
Name: "ConfirmForm",
Desc: "Display confirmation form",
Action: &DeleteConfirm{Content: "node"},
}
d.Tasks = []task.Interface{
display,
}
}
type UpgradeConfirmModule struct {
common.KubeModule
Skip bool
}
func (u *UpgradeConfirmModule) IsSkip() bool {
return u.Skip
}
func (u *UpgradeConfirmModule) Init() {
u.Name = "UpgradeConfirmModule"
u.Desc = "Display upgrade confirmation form"
display := &task.LocalTask{
Name: "ConfirmForm",
Desc: "Display confirmation form",
Action: new(UpgradeConfirm),
}
u.Tasks = []task.Interface{
display,
}
}
type CheckFileExistModule struct {
module.BaseTaskModule
FileName string
}
func (c *CheckFileExistModule) Init() {
c.Name = "CheckFileExist"
c.Desc = "Check file if is existed"
check := &task.LocalTask{
Name: "CheckExist",
Desc: "Check output file if existed",
Action: &CheckFile{FileName: c.FileName},
}
c.Tasks = []task.Interface{
check,
}
}
type MigrateCriConfirmModule struct {
common.KubeModule
}
func (d *MigrateCriConfirmModule) Init() {
d.Name = "MigrateCriConfirmModule"
d.Desc = "Display Migrate Cri form"
display := &task.LocalTask{
Name: "ConfirmForm",
Desc: "Display confirmation form",
Action: &MigrateCri{},
}
d.Tasks = []task.Interface{
display,
}
}

View File

@@ -20,7 +20,6 @@ import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"github.com/mitchellh/mapstructure"
@@ -29,10 +28,8 @@ import (
versionutil "k8s.io/apimachinery/pkg/util/version"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
)
// PreCheckResults defines the items to be checked.
@@ -145,226 +142,3 @@ func (i *InstallationConfirm) Execute(runtime connector.Runtime) error {
}
return nil
}
type DeleteConfirm struct {
common.KubeAction
Content string
}
func (d *DeleteConfirm) Execute(runtime connector.Runtime) error {
reader := bufio.NewReader(os.Stdin)
confirmOK := false
for !confirmOK {
fmt.Printf("Are you sure to delete this %s? [yes/no]: ", d.Content)
input, err := reader.ReadString('\n')
if err != nil {
return err
}
input = strings.ToLower(strings.TrimSpace(input))
switch strings.ToLower(input) {
case "yes", "y":
confirmOK = true
case "no", "n":
os.Exit(0)
default:
continue
}
}
return nil
}
type UpgradeConfirm struct {
common.KubeAction
}
func (u *UpgradeConfirm) Execute(runtime connector.Runtime) error {
pre := make([]map[string]string, len(runtime.GetAllHosts()), len(runtime.GetAllHosts()))
for i, host := range runtime.GetAllHosts() {
if v, ok := host.GetCache().Get(common.NodePreCheck); ok {
pre[i] = v.(map[string]string)
} else {
return errors.New("get node check result failed by host cache")
}
}
results := make([]PreCheckResults, len(pre), len(pre))
for i := range pre {
var result PreCheckResults
_ = mapstructure.Decode(pre[i], &result)
results[i] = result
}
table.OutputA(results)
fmt.Println()
warningFlag := false
cmp, err := versionutil.MustParseSemantic(u.KubeConf.Cluster.Kubernetes.Version).Compare("v1.19.0")
if err != nil {
logger.Fatalf("Failed to compare kubernetes version: %v", err)
}
if cmp == 0 || cmp == 1 {
for _, result := range results {
if len(result.Docker) != 0 {
dockerVersion, err := RefineDockerVersion(result.Docker)
if err != nil {
logger.Fatalf("Failed to get docker version: %v", err)
}
cmp, err := versionutil.MustParseSemantic(dockerVersion).Compare("20.10.0")
if err != nil {
logger.Fatalf("Failed to compare docker version: %v", err)
}
warningFlag = warningFlag || (cmp == -1)
}
}
if warningFlag {
fmt.Println(`
Warning:
An old Docker version may cause the failure of upgrade. It is recommended that you upgrade Docker to 20.10+ beforehand.
Issue: https://github.com/kubernetes/kubernetes/issues/101056`)
fmt.Print("\n")
}
}
nodeStats, ok := u.PipelineCache.GetMustString(common.ClusterNodeStatus)
if !ok {
return errors.New("get cluster nodes status failed by pipeline cache")
}
fmt.Println("Cluster nodes status:")
fmt.Println(nodeStats + "\n")
fmt.Println("Upgrade Confirmation:")
currentK8sVersion, ok := u.PipelineCache.GetMustString(common.K8sVersion)
if !ok {
return errors.New("get current Kubernetes version failed by pipeline cache")
}
fmt.Printf("kubernetes version: %s to %s\n", currentK8sVersion, u.KubeConf.Cluster.Kubernetes.Version)
if u.KubeConf.Cluster.KubeSphere.Enabled {
currentKsVersion, ok := u.PipelineCache.GetMustString(common.KubeSphereVersion)
if !ok {
return errors.New("get current KubeSphere version failed by pipeline cache")
}
fmt.Printf("kubesphere version: %s to %s\n", currentKsVersion, u.KubeConf.Cluster.KubeSphere.Version)
}
fmt.Println()
if k8sVersion, err := versionutil.ParseGeneric(u.KubeConf.Cluster.Kubernetes.Version); err == nil {
if cri, ok := u.PipelineCache.GetMustString(common.ClusterNodeCRIRuntimes); ok {
k8sV124 := versionutil.MustParseSemantic("v1.24.0")
if k8sVersion.AtLeast(k8sV124) && versionutil.MustParseSemantic(currentK8sVersion).LessThan(k8sV124) && strings.Contains(cri, "docker") {
fmt.Println("[Notice]")
fmt.Println("Pre-upgrade check failed. The container runtime of the current cluster is Docker.")
fmt.Println("Kubernetes v1.24 and later no longer support dockershim and Docker.")
fmt.Println("Make sure you have completed the migration from Docker to other container runtimes that are compatible with the Kubernetes CRI.")
fmt.Println("For more information, see:")
fmt.Println("https://kubernetes.io/docs/setup/production-environment/container-runtimes/#container-runtimes")
fmt.Println("https://kubernetes.io/blog/2022/02/17/dockershim-faq/")
fmt.Println("")
}
}
}
reader := bufio.NewReader(os.Stdin)
confirmOK := false
for !confirmOK {
fmt.Printf("Continue upgrading cluster? [yes/no]: ")
input, err := reader.ReadString('\n')
if err != nil {
return err
}
input = strings.ToLower(strings.TrimSpace(input))
switch input {
case "yes", "y":
confirmOK = true
case "no", "n":
os.Exit(0)
default:
continue
}
}
return nil
}
func RefineDockerVersion(version string) (string, error) {
var newVersionComponents []string
versionMatchRE := regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`)
parts := versionMatchRE.FindStringSubmatch(version)
if parts == nil {
return "", fmt.Errorf("could not parse %q as version", version)
}
numbers, _ := parts[1], parts[2]
components := strings.Split(numbers, ".")
for index, c := range components {
newVersion := strings.TrimPrefix(c, "0")
if index == len(components)-1 && newVersion == "" {
newVersion = "0"
}
newVersionComponents = append(newVersionComponents, newVersion)
}
return strings.Join(newVersionComponents, "."), nil
}
type CheckFile struct {
action.BaseAction
FileName string
}
func (c *CheckFile) Execute(runtime connector.Runtime) error {
if util.IsExist(c.FileName) {
reader := bufio.NewReader(os.Stdin)
stop := false
for {
if stop {
break
}
fmt.Printf("%s already exists. Are you sure you want to overwrite this file? [yes/no]: ", c.FileName)
input, _ := reader.ReadString('\n')
input = strings.ToLower(strings.TrimSpace(input))
if input != "" {
switch input {
case "yes", "y":
stop = true
case "no", "n":
os.Exit(0)
}
}
}
}
return nil
}
type MigrateCri struct {
common.KubeAction
}
func (d *MigrateCri) Execute(runtime connector.Runtime) error {
reader := bufio.NewReader(os.Stdin)
confirmOK := false
for !confirmOK {
fmt.Printf("Are you sure to Migrate Cri? [yes/no]: ")
input, err := reader.ReadString('\n')
if err != nil {
return err
}
input = strings.ToLower(strings.TrimSpace(input))
switch strings.ToLower(input) {
case "yes", "y":
confirmOK = true
case "no", "n":
os.Exit(0)
default:
continue
}
}
return nil
}

View File

@@ -1,49 +0,0 @@
package hello
import (
"context"
"fmt"
"time"
"bytetrade.io/web3os/installer/pkg/core/ending"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/module"
"bytetrade.io/web3os/installer/pkg/core/util"
)
type HelloHook struct {
Module module.Module
Result *ending.ModuleResult
}
func (h *HelloHook) Init(module module.Module, result *ending.ModuleResult) {
fmt.Println("---hello hook / init---")
h.Module = module
h.Result = result
h.Result.StartTime = time.Now()
}
func (h *HelloHook) Try() error {
fmt.Println("---hello hook / try---", h.Result.StartTime.String())
_, _, err := util.Exec(context.Background(), "echo 'hello, world!!!!!'", true, false)
if err != nil {
h.Result.ErrResult(err)
return err
}
return nil
}
func (h *HelloHook) Catch(err error) error {
fmt.Println("---hello hook / Cache---", err)
time.Sleep(5 * time.Second)
return nil
}
func (h *HelloHook) Finally() {
fmt.Println("---hello hook / Finally---")
h.Result.EndTime = time.Now()
sayHello := h.Result.Status.String()
logger.Infof(">>>> %s %s", sayHello, h.Result.EndTime.String())
}

View File

@@ -1,18 +0,0 @@
package hello
import (
"bytetrade.io/web3os/installer/pkg/core/module"
)
type HelloModule struct {
module.BaseTaskModule
}
func (h *HelloModule) Init() {
h.Name = "HelloModule"
h.Desc = "Say Hello"
h.PostHook = []module.PostHookInterface{
&HelloHook{},
}
}

View File

@@ -17,9 +17,10 @@
package os
import (
"bytetrade.io/web3os/installer/pkg/kubernetes"
"path/filepath"
"bytetrade.io/web3os/installer/pkg/kubernetes"
"bytetrade.io/web3os/installer/pkg/bootstrap/os/templates"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
@@ -195,47 +196,6 @@ func (c *ConfigureOSModule) Init() {
}
}
type ClearNodeOSModule struct {
common.KubeModule
}
func (c *ClearNodeOSModule) Init() {
c.Name = "ClearNodeOSModule"
resetNetworkConfig := &task.RemoteTask{
Name: "ResetNetworkConfig",
Desc: "Reset os network config",
Hosts: c.Runtime.GetHostsByRole(common.Worker),
Prepare: new(DeleteNode),
Action: new(ResetNetworkConfig),
Parallel: true,
}
removeFiles := &task.RemoteTask{
Name: "RemoveFiles",
Desc: "Remove node files",
Hosts: c.Runtime.GetHostsByRole(common.Worker),
Prepare: new(DeleteNode),
Action: new(RemoveNodeFiles),
Parallel: true,
}
daemonReload := &task.RemoteTask{
Name: "DaemonReload",
Desc: "Systemd daemon reload",
Hosts: c.Runtime.GetHostsByRole(common.Worker),
Prepare: new(DeleteNode),
Action: new(DaemonReload),
Parallel: true,
}
c.Tasks = []task.Interface{
resetNetworkConfig,
removeFiles,
daemonReload,
}
}
type ClearOSEnvironmentModule struct {
common.KubeModule
}
@@ -285,157 +245,3 @@ func (c *ClearOSEnvironmentModule) Init() {
daemonReload,
}
}
type RepositoryOnlineModule struct {
common.KubeModule
Skip bool
}
func (r *RepositoryOnlineModule) IsSkip() bool {
return r.Skip
}
func (r *RepositoryOnlineModule) Init() {
r.Name = "RepositoryOnlineModule"
getOSData := &task.RemoteTask{
Name: "GetOSData",
Desc: "Get OS release",
Hosts: r.Runtime.GetAllHosts(),
Action: new(GetOSData),
Parallel: true,
}
newRepo := &task.RemoteTask{
Name: "NewRepoClient",
Desc: "New repository client",
Hosts: r.Runtime.GetAllHosts(),
Action: new(NewRepoClient),
Parallel: true,
Retry: 1,
}
install := &task.RemoteTask{
Name: "InstallPackage",
Desc: "Install packages",
Hosts: r.Runtime.GetAllHosts(),
Action: new(InstallPackage),
Parallel: true,
Retry: 1,
}
r.Tasks = []task.Interface{
getOSData,
newRepo,
install,
}
}
type RepositoryModule struct {
common.KubeModule
Skip bool
}
func (r *RepositoryModule) IsSkip() bool {
return r.Skip
}
func (r *RepositoryModule) Init() {
r.Name = "RepositoryModule"
r.Desc = "Install local repository"
getOSData := &task.RemoteTask{
Name: "GetOSData",
Desc: "Get OS release",
Hosts: r.Runtime.GetAllHosts(),
Action: new(GetOSData),
Parallel: true,
}
sync := &task.RemoteTask{
Name: "SyncRepositoryISOFile",
Desc: "Sync repository iso file to all nodes",
Hosts: r.Runtime.GetAllHosts(),
Action: new(SyncRepositoryFile),
Parallel: true,
Retry: 2,
}
mount := &task.RemoteTask{
Name: "MountISO",
Desc: "Mount iso file",
Hosts: r.Runtime.GetAllHosts(),
Action: new(MountISO),
Parallel: true,
Retry: 1,
}
newRepo := &task.RemoteTask{
Name: "NewRepoClient",
Desc: "New repository client",
Hosts: r.Runtime.GetAllHosts(),
Action: new(NewRepoClient),
Parallel: true,
Retry: 1,
Rollback: new(RollbackUmount),
}
backup := &task.RemoteTask{
Name: "BackupOriginalRepository",
Desc: "Backup original repository",
Hosts: r.Runtime.GetAllHosts(),
Action: new(BackupOriginalRepository),
Parallel: true,
Retry: 1,
Rollback: new(RecoverBackupSuccessNode),
}
add := &task.RemoteTask{
Name: "AddLocalRepository",
Desc: "Add local repository",
Hosts: r.Runtime.GetAllHosts(),
Action: new(AddLocalRepository),
Parallel: true,
Retry: 1,
Rollback: new(RecoverRepository),
}
install := &task.RemoteTask{
Name: "InstallPackage",
Desc: "Install packages",
Hosts: r.Runtime.GetAllHosts(),
Action: new(InstallPackage),
Parallel: true,
Retry: 1,
Rollback: new(RecoverRepository),
}
reset := &task.RemoteTask{
Name: "ResetRepository",
Desc: "Reset repository to the original repository",
Hosts: r.Runtime.GetAllHosts(),
Action: new(ResetRepository),
Parallel: true,
Retry: 1,
}
umount := &task.RemoteTask{
Name: "UmountISO",
Desc: "Umount ISO file",
Hosts: r.Runtime.GetAllHosts(),
Action: new(UmountISO),
Parallel: true,
}
r.Tasks = []task.Interface{
getOSData,
sync,
mount,
newRepo,
backup,
add,
install,
reset,
umount,
}
}

View File

@@ -68,24 +68,6 @@ func (e *EtcdTypeIsKubeKey) PreCheck(_ connector.Runtime) (bool, error) {
return false, nil
}
type DeleteNode struct {
common.KubePrepare
}
func (d *DeleteNode) PreCheck(runtime connector.Runtime) (bool, error) {
nodeName, ok := d.PipelineCache.Get("dstNode")
if !ok {
return true, nil
}
host := runtime.RemoteHost()
if host.GetName() == nodeName {
return true, nil
}
return false, nil
}
type IsPveLxc struct {
common.KubePrepare
}

View File

@@ -1,44 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repository
import (
"fmt"
"strings"
"bytetrade.io/web3os/installer/pkg/core/connector"
)
type Interface interface {
Backup(runtime connector.Runtime) error
IsAlreadyBackUp() bool
Add(runtime connector.Runtime, path string) error
Update(runtime connector.Runtime) error
Install(runtime connector.Runtime, pkg ...string) error
Reset(runtime connector.Runtime) error
}
func New(os string) (Interface, error) {
switch strings.ToLower(os) {
case "ubuntu", "debian":
return NewDeb(), nil
case "centos", "rhel":
return NewRPM(), nil
default:
return nil, fmt.Errorf("unsupported operation system %s", os)
}
}

View File

@@ -1,106 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repository
import (
"fmt"
"strings"
"bytetrade.io/web3os/installer/pkg/core/connector"
)
type Debian struct {
backup bool
}
func NewDeb() Interface {
return &Debian{}
}
func (d *Debian) Backup(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("mv /etc/apt/sources.list /etc/apt/sources.list.kubekey.bak", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey.bak", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mkdir -p /etc/apt/sources.list.d", false, false); err != nil {
return err
}
d.backup = true
return nil
}
func (d *Debian) IsAlreadyBackUp() bool {
return d.backup
}
func (d *Debian) Add(runtime connector.Runtime, path string) error {
if !d.IsAlreadyBackUp() {
return fmt.Errorf("linux repository must be backuped before")
}
if _, err := runtime.GetRunner().SudoCmd("rm -rf /etc/apt/sources.list.d/*", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("echo 'deb [trusted=yes] file://%s /' > /etc/apt/sources.list.d/kubekey.list", path),
true, false); err != nil {
return err
}
return nil
}
func (d *Debian) Update(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().Cmd("sudo apt-get update", true, false); err != nil {
return err
}
return nil
}
func (d *Debian) Install(runtime connector.Runtime, pkg ...string) error {
defaultPkg := []string{"socat", "conntrack", "ipset", "ebtables", "chrony", "ipvsadm"}
if len(pkg) == 0 {
pkg = defaultPkg
} else {
pkg = append(pkg, defaultPkg...)
}
str := strings.Join(pkg, " ")
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("apt install -y %s", str), true, false); err != nil {
return err
}
return nil
}
func (d *Debian) Reset(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("rm -rf /etc/apt/sources.list.d", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mv /etc/apt/sources.list.kubekey.bak /etc/apt/sources.list", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mv /etc/apt/sources.list.d.kubekey.bak /etc/apt/sources.list.d", false, false); err != nil {
return err
}
return nil
}

View File

@@ -1,110 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repository
import (
"fmt"
"strings"
"bytetrade.io/web3os/installer/pkg/core/connector"
)
type RedhatPackageManager struct {
backup bool
}
func NewRPM() Interface {
return &RedhatPackageManager{}
}
func (r *RedhatPackageManager) Backup(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mkdir -p /etc/yum.repos.d", false, false); err != nil {
return err
}
r.backup = true
return nil
}
func (r *RedhatPackageManager) IsAlreadyBackUp() bool {
return r.backup
}
func (r *RedhatPackageManager) Add(runtime connector.Runtime, path string) error {
if !r.IsAlreadyBackUp() {
return fmt.Errorf("linux repository must be backuped before")
}
if _, err := runtime.GetRunner().SudoCmd("rm -rf /etc/yum.repos.d/*", false, false); err != nil {
return err
}
content := fmt.Sprintf(`cat << EOF > /etc/yum.repos.d/CentOS-local.repo
[base-local]
name=CentOS7.6-local
baseurl=file://%s
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF
`, path)
if _, err := runtime.GetRunner().SudoCmd(content, false, false); err != nil {
return err
}
return nil
}
func (r *RedhatPackageManager) Update(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("yum clean all && yum makecache", true, false); err != nil {
return err
}
return nil
}
func (r *RedhatPackageManager) Install(runtime connector.Runtime, pkg ...string) error {
defaultPkg := []string{"openssl", "socat", "conntrack", "ipset", "ebtables", "chrony", "ipvsadm"}
if len(pkg) == 0 {
pkg = defaultPkg
} else {
pkg = append(pkg, defaultPkg...)
}
str := strings.Join(pkg, " ")
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("yum install -y %s", str), true, false); err != nil {
return err
}
return nil
}
func (r *RedhatPackageManager) Reset(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("rm -rf /etc/yum.repos.d", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("mv /etc/yum.repos.d.kubekey.bak /etc/yum.repos.d", false, false); err != nil {
return err
}
return nil
}

View File

@@ -1,89 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package os
import (
"fmt"
"path/filepath"
"bytetrade.io/web3os/installer/pkg/bootstrap/os/repository"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/ending"
"github.com/pkg/errors"
)
type RollbackUmount struct {
common.KubeRollback
}
func (r *RollbackUmount) Execute(runtime connector.Runtime, result *ending.ActionResult) error {
mountPath := filepath.Join(common.TmpDir, "iso")
umountCmd := fmt.Sprintf("umount %s", mountPath)
if _, err := runtime.GetRunner().SudoCmd(umountCmd, false, false); err != nil {
return errors.Wrapf(errors.WithStack(err), "umount %s failed", mountPath)
}
return nil
}
type RecoverBackupSuccessNode struct {
common.KubeRollback
}
func (r *RecoverBackupSuccessNode) Execute(runtime connector.Runtime, result *ending.ActionResult) error {
if result.Status == ending.SUCCESS {
host := runtime.RemoteHost()
repo, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
re := repo.(repository.Interface)
if err := re.Reset(runtime); err != nil {
return errors.Wrapf(errors.WithStack(err), "reset repository failed")
}
}
mountPath := filepath.Join(common.TmpDir, "iso")
umountCmd := fmt.Sprintf("umount %s", mountPath)
if _, err := runtime.GetRunner().SudoCmd(umountCmd, false, false); err != nil {
return errors.Wrapf(errors.WithStack(err), "umount %s failed", mountPath)
}
return nil
}
type RecoverRepository struct {
common.KubeRollback
}
func (r *RecoverRepository) Execute(runtime connector.Runtime, result *ending.ActionResult) error {
host := runtime.RemoteHost()
repo, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
re := repo.(repository.Interface)
_ = re.Reset(runtime)
mountPath := filepath.Join(common.TmpDir, "iso")
umountCmd := fmt.Sprintf("umount %s", mountPath)
if _, err := runtime.GetRunner().SudoCmd(umountCmd, false, false); err != nil {
return errors.Wrapf(errors.WithStack(err), "umount %s failed", mountPath)
}
return nil
}

View File

@@ -18,7 +18,6 @@ package os
import (
"fmt"
"io/fs"
"io/ioutil"
"os"
"path"
@@ -29,7 +28,6 @@ import (
osrelease "github.com/dominodatalab/os-release"
"github.com/pkg/errors"
"bytetrade.io/web3os/installer/pkg/bootstrap/os/repository"
"bytetrade.io/web3os/installer/pkg/bootstrap/os/templates"
"bytetrade.io/web3os/installer/pkg/common"
cc "bytetrade.io/web3os/installer/pkg/core/common"
@@ -441,39 +439,6 @@ func (s *UninstallETCD) Execute(runtime connector.Runtime) error {
return nil
}
type RemoveNodeFiles struct {
common.KubeAction
}
func (r *RemoveNodeFiles) Execute(runtime connector.Runtime) error {
nodeFiles := []string{
"/etc/kubernetes",
"/etc/systemd/system/etcd.service",
"/var/log/calico",
"/etc/cni",
"/var/log/pods/",
"/var/lib/cni",
"/var/lib/calico",
"/var/lib/kubelet",
"/run/calico",
"/run/flannel",
"/etc/flannel",
"/etc/systemd/system/kubelet.service",
"/etc/systemd/system/kubelet.service.d",
"/usr/local/bin/kubelet",
"/usr/local/bin/kubeadm",
"/usr/bin/kubelet",
"/tmp/kubekey",
"/etc/kubekey",
"/var/openebs",
}
for _, file := range nodeFiles {
_, _ = runtime.GetRunner().SudoCmd(fmt.Sprintf("rm -rf %s", file), false, false)
}
return nil
}
type RemoveClusterFiles struct {
common.KubeAction
}
@@ -487,106 +452,6 @@ func (r *RemoveClusterFiles) Execute(runtime connector.Runtime) error {
return nil
}
type BackupDirBase struct {
BackupDir string
}
func (b *BackupDirBase) InitPath(runtime connector.Runtime) error {
b.BackupDir = path.Clean(b.BackupDir)
if b.BackupDir == "." {
return errors.New("backup dir is empty")
}
if !strings.HasSuffix(b.BackupDir, runtime.GetWorkDir()) {
logger.Warnf("backup dir does not in workdir %s, prepending the path prefix for safety", runtime.GetWorkDir())
b.BackupDir = path.Join(runtime.GetWorkDir(), b.BackupDir)
}
if err := util.CreateDir(b.BackupDir); err != nil {
return errors.Wrapf(err, "failed to create backup dir %s", b.BackupDir)
}
return nil
}
type BackupFilesToDir struct {
common.KubeAction
*BackupDirBase
Files []string
}
func (a *BackupFilesToDir) Execute(runtime connector.Runtime) error {
if err := a.InitPath(runtime); err != nil {
return err
}
for _, file := range a.Files {
if file == "" {
continue
}
if !util.IsExist(file) {
logger.Warnf("backup target file does not exist: %s", file)
continue
}
if !filepath.IsAbs(file) {
var err error
file, err = filepath.Abs(file)
if err != nil {
return errors.Wrapf(err, "failed to get absolute path of %s", file)
}
}
if err := util.CreateDir(path.Join(a.BackupDir, path.Dir(file))); err != nil {
return errors.Wrapf(err, "failed to create backup dir %s for file %s", path.Dir(file), path.Base(file))
}
logger.Debugf("copying file %s to backup dir %s", file, a.BackupDir)
if err := util.CopyFile(file, path.Join(a.BackupDir, file)); err != nil {
return errors.Wrapf(err, "failed to copy file %s to backup dir", file)
}
}
return nil
}
type ClearBackUpDir struct {
common.KubeAction
*BackupDirBase
}
func (a *ClearBackUpDir) Execute(runtime connector.Runtime) error {
if err := a.InitPath(runtime); err != nil {
return err
}
if err := util.RemoveDir(a.BackupDir); err != nil {
return errors.Wrapf(err, "failed to remove backup dir %s", a.BackupDir)
}
return nil
}
type RestoreBackedUpFiles struct {
common.KubeAction
*BackupDirBase
}
func (a *RestoreBackedUpFiles) Execute(runtime connector.Runtime) error {
if err := a.InitPath(runtime); err != nil {
return err
}
return filepath.WalkDir(a.BackupDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
originalPath, err := filepath.Rel(a.BackupDir, path)
if err != nil {
return errors.Wrapf(err, "failed to get original path of backed up file %s", path)
}
if err := util.CreateDir(filepath.Dir(originalPath)); err != nil {
return errors.Wrapf(err, "failed to create original dir of backed up file %s", originalPath)
}
if err := util.CopyFile(path, originalPath); err != nil {
return errors.Wrapf(err, "failed to restore backed up file %s", originalPath)
}
return nil
})
}
type DaemonReload struct {
common.KubeAction
}
@@ -619,194 +484,6 @@ func (g *GetOSData) Execute(runtime connector.Runtime) error {
return nil
}
type SyncRepositoryFile struct {
common.KubeAction
}
func (s *SyncRepositoryFile) Execute(runtime connector.Runtime) error {
if err := utils.ResetTmpDir(runtime); err != nil {
return errors.Wrap(err, "reset tmp dir failed")
}
host := runtime.RemoteHost()
release, ok := host.GetCache().Get(Release)
if !ok {
return errors.New("get os release failed by root cache")
}
r := release.(*osrelease.Data)
fileName := fmt.Sprintf("%s-%s-%s.iso", r.ID, r.VersionID, host.GetArch())
src := filepath.Join(runtime.GetWorkDir(), "repository", host.GetArch(), r.ID, r.VersionID, fileName)
dst := filepath.Join(common.TmpDir, fileName)
if err := runtime.GetRunner().Scp(src, dst); err != nil {
return errors.Wrapf(errors.WithStack(err), "scp %s to %s failed", src, dst)
}
host.GetCache().Set("iso", fileName)
return nil
}
type MountISO struct {
common.KubeAction
}
func (m *MountISO) Execute(runtime connector.Runtime) error {
mountPath := filepath.Join(common.TmpDir, "iso")
if err := runtime.GetRunner().MkDir(mountPath); err != nil {
return errors.Wrapf(errors.WithStack(err), "create mount dir failed")
}
host := runtime.RemoteHost()
isoFile, _ := host.GetCache().GetMustString("iso")
path := filepath.Join(common.TmpDir, isoFile)
mountCmd := fmt.Sprintf("sudo mount -t iso9660 -o loop %s %s", path, mountPath)
if _, err := runtime.GetRunner().Cmd(mountCmd, false, false); err != nil {
return errors.Wrapf(errors.WithStack(err), "mount %s at %s failed", path, mountPath)
}
return nil
}
type NewRepoClient struct {
common.KubeAction
}
func (n *NewRepoClient) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
release, ok := host.GetCache().Get(Release)
if !ok {
return errors.New("get os release failed by host cache")
}
r := release.(*osrelease.Data)
repo, err := repository.New(r.ID)
if err != nil {
checkDeb, debErr := runtime.GetRunner().SudoCmd("which apt", false, false)
if debErr == nil && strings.Contains(checkDeb, "bin") {
repo = repository.NewDeb()
}
checkRPM, rpmErr := runtime.GetRunner().SudoCmd("which yum", false, false)
if rpmErr == nil && strings.Contains(checkRPM, "bin") {
repo = repository.NewRPM()
}
if debErr != nil && rpmErr != nil {
return errors.Wrap(errors.WithStack(err), "new repository manager failed")
} else if debErr == nil && rpmErr == nil {
return errors.New("can't detect the main package repository, only one of apt or yum is supported")
}
}
host.GetCache().Set("repo", repo)
return nil
}
type BackupOriginalRepository struct {
common.KubeAction
}
func (b *BackupOriginalRepository) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
r, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
repo := r.(repository.Interface)
if err := repo.Backup(runtime); err != nil {
return errors.Wrap(errors.WithStack(err), "backup repository failed")
}
return nil
}
type AddLocalRepository struct {
common.KubeAction
}
func (a *AddLocalRepository) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
r, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
repo := r.(repository.Interface)
if installErr := repo.Add(runtime, filepath.Join(common.TmpDir, "iso")); installErr != nil {
return errors.Wrap(errors.WithStack(installErr), "add local repository failed")
}
if installErr := repo.Update(runtime); installErr != nil {
return errors.Wrap(errors.WithStack(installErr), "update local repository failed")
}
return nil
}
type InstallPackage struct {
common.KubeAction
}
func (i *InstallPackage) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
repo, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
r := repo.(repository.Interface)
var pkg []string
if _, ok := r.(*repository.Debian); ok {
pkg = i.KubeConf.Cluster.System.Debs
} else if _, ok := r.(*repository.RedhatPackageManager); ok {
pkg = i.KubeConf.Cluster.System.Rpms
}
if installErr := r.Install(runtime, pkg...); installErr != nil {
return errors.Wrap(errors.WithStack(installErr), "install repository package failed")
}
return nil
}
type ResetRepository struct {
common.KubeAction
}
func (r *ResetRepository) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
repo, ok := host.GetCache().Get("repo")
if !ok {
return errors.New("get repo failed by host cache")
}
re := repo.(repository.Interface)
var resetErr error
defer func() {
if resetErr != nil {
mountPath := filepath.Join(common.TmpDir, "iso")
umountCmd := fmt.Sprintf("umount %s", mountPath)
_, _ = runtime.GetRunner().SudoCmd(umountCmd, false, false)
}
}()
if resetErr = re.Reset(runtime); resetErr != nil {
return errors.Wrap(errors.WithStack(resetErr), "reset repository failed")
}
return nil
}
type UmountISO struct {
common.KubeAction
}
func (u *UmountISO) Execute(runtime connector.Runtime) error {
mountPath := filepath.Join(common.TmpDir, "iso")
umountCmd := fmt.Sprintf("umount %s", mountPath)
if _, err := runtime.GetRunner().SudoCmd(umountCmd, false, false); err != nil {
return errors.Wrapf(errors.WithStack(err), "umount %s failed", mountPath)
}
return nil
}
type NodeConfigureNtpServer struct {
common.KubeAction
}

View File

@@ -2,19 +2,15 @@ package patch
import (
"fmt"
"path"
"strings"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/binaries"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/manifest"
)
type EnableSSHTask struct {
@@ -134,91 +130,6 @@ func (t *PatchTask) Execute(runtime connector.Runtime) error {
return nil
}
type SocatTask struct {
common.KubeAction
manifest.ManifestAction
}
func (t *SocatTask) Execute(runtime connector.Runtime) error {
filePath, fileName, err := binaries.GetSocat(t.BaseDir, t.Manifest)
if err != nil {
logger.Errorf("failed to download socat: %v", err)
return err
}
f := path.Join(filePath, fileName)
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("tar xzvf %s -C %s", f, filePath), false, false); err != nil {
logger.Errorf("failed to extract %s %v", f, err)
return err
}
tp := path.Join(filePath, fmt.Sprintf("socat-%s", kubekeyapiv1alpha2.DefaultSocatVersion))
if err := util.ChangeDir(tp); err == nil {
if _, err := runtime.GetRunner().SudoCmd("./configure --prefix=/usr && make -j4 && make install && strip socat", false, false); err != nil {
logger.Errorf("failed to install socat %v", err)
return err
}
}
if err := util.ChangeDir(runtime.GetBaseDir()); err != nil {
logger.Errorf("failed to change dir %v", err)
return err
}
return nil
}
type ConntrackTask struct {
common.KubeAction
manifest.ManifestAction
}
func (t *ConntrackTask) Execute(runtime connector.Runtime) error {
flexFilePath, flexFileName, err := binaries.GetFlex(t.BaseDir, t.Manifest)
if err != nil {
logger.Errorf("failed to download flex: %v", err)
return err
}
filePath, fileName, err := binaries.GetConntrack(t.BaseDir, t.Manifest)
if err != nil {
logger.Errorf("failed to download conntrack: %v", err)
return err
}
fl := path.Join(flexFilePath, flexFileName)
f := path.Join(filePath, fileName)
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("tar xzvf %s -C %s", fl, filePath), false, true); err != nil {
logger.Errorf("failed to extract %s %v", flexFilePath, err)
return err
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("tar xzvf %s -C %s", f, filePath), false, true); err != nil {
logger.Errorf("failed to extract %s %v", f, err)
return err
}
// install
fp := path.Join(flexFilePath, fmt.Sprintf("flex-%s", kubekeyapiv1alpha2.DefaultFlexVersion))
if err := util.ChangeDir(fp); err == nil {
if _, err := runtime.GetRunner().SudoCmd("autoreconf -i && ./configure --prefix=/usr && make -j4 && make install", false, true); err != nil {
logger.Errorf("failed to install flex %v", err)
return err
}
}
tp := path.Join(filePath, fmt.Sprintf("conntrack-tools-conntrack-tools-%s", kubekeyapiv1alpha2.DefaultConntrackVersion))
if err := util.ChangeDir(tp); err == nil {
if _, err := runtime.GetRunner().SudoCmd("autoreconf -i && ./configure --prefix=/usr && make -j4 && make install", false, true); err != nil {
logger.Errorf("failed to install conntrack %v", err)
return err
}
}
if err := util.ChangeDir(runtime.GetBaseDir()); err != nil {
logger.Errorf("failed to change dir %v", err)
return err
}
return nil
}
type CorrectHostname struct {
common.KubeAction
}

View File

@@ -21,7 +21,6 @@ import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/module"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/manifest"
)
@@ -66,10 +65,6 @@ func (m *GetStorageKeyModule) Init() {
}
}
type GetKubeVersionModule struct {
module.BaseTaskModule
}
type RunPrechecksModule struct {
common.KubeModule
manifest.ManifestModule
@@ -156,87 +151,3 @@ func (n *NodePreCheckModule) Init() {
preCheck,
}
}
type ClusterPreCheckModule struct {
common.KubeModule
}
func (c *ClusterPreCheckModule) Init() {
c.Name = "ClusterPreCheckModule"
c.Desc = "Do pre-check on cluster"
getKubeConfig := &task.RemoteTask{
Name: "GetKubeConfig",
Desc: "Get KubeConfig file",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GetKubeConfig),
Parallel: true,
}
getAllNodesK8sVersion := &task.RemoteTask{
Name: "GetAllNodesK8sVersion",
Desc: "Get all nodes Kubernetes version",
Hosts: c.Runtime.GetHostsByRole(common.K8s),
Action: new(GetAllNodesK8sVersion),
Parallel: true,
}
calculateMinK8sVersion := &task.RemoteTask{
Name: "CalculateMinK8sVersion",
Desc: "Calculate min Kubernetes version",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(CalculateMinK8sVersion),
Parallel: true,
}
checkDesiredK8sVersion := &task.RemoteTask{
Name: "CheckDesiredK8sVersion",
Desc: "Check desired Kubernetes version",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(CheckDesiredK8sVersion),
Parallel: true,
}
ksVersionCheck := &task.RemoteTask{
Name: "KsVersionCheck",
Desc: "Check KubeSphere version",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(KsVersionCheck),
Parallel: true,
}
dependencyCheck := &task.RemoteTask{
Name: "DependencyCheck",
Desc: "Check dependency matrix for KubeSphere and Kubernetes",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(KubeSphereExist),
},
Action: new(DependencyCheck),
Parallel: true,
}
getKubernetesNodesStatus := &task.RemoteTask{
Name: "GetKubernetesNodesStatus",
Desc: "Get kubernetes nodes status",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GetKubernetesNodesStatus),
Parallel: true,
}
c.Tasks = []task.Interface{
getKubeConfig,
getAllNodesK8sVersion,
calculateMinK8sVersion,
checkDesiredK8sVersion,
ksVersionCheck,
dependencyCheck,
getKubernetesNodesStatus,
}
}

View File

@@ -1,38 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package precheck
import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"github.com/pkg/errors"
)
type KubeSphereExist struct {
common.KubePrepare
}
func (k *KubeSphereExist) PreCheck(runtime connector.Runtime) (bool, error) {
currentKsVersion, ok := k.PipelineCache.GetMustString(common.KubeSphereVersion)
if !ok {
return false, errors.New("get current KubeSphere version failed by pipeline cache")
}
if currentKsVersion != "" {
return true, nil
}
return false, nil
}

View File

@@ -21,16 +21,16 @@ import (
"bytes"
"context"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/Masterminds/semver/v3"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"net"
"os"
"regexp"
ctrl "sigs.k8s.io/controller-runtime"
"strings"
"time"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
@@ -38,10 +38,7 @@ import (
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/utils"
"bytetrade.io/web3os/installer/pkg/version/kubernetes"
"bytetrade.io/web3os/installer/pkg/version/kubesphere"
"github.com/pkg/errors"
versionutil "k8s.io/apimachinery/pkg/util/version"
kclient "k8s.io/client-go/kubernetes"
)
@@ -357,196 +354,6 @@ func (n *NodePreCheck) Execute(runtime connector.Runtime) error {
return nil
}
type GetKubeConfig struct {
common.KubeAction
}
func (g *GetKubeConfig) Execute(runtime connector.Runtime) error {
var kubeConfigPath = "$HOME/.kube/config"
if util.IsExist(kubeConfigPath) {
return nil
}
if util.IsExist("/etc/kubernetes/admin.conf") {
if _, err := runtime.GetRunner().Cmd("mkdir -p $HOME/.kube", false, false); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd("cp /etc/kubernetes/admin.conf $HOME/.kube/config", false, false); err != nil {
return err
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
}
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
}
}
return errors.New("kube config not found")
}
type GetAllNodesK8sVersion struct {
common.KubeAction
}
func (g *GetAllNodesK8sVersion) Execute(runtime connector.Runtime) error {
var nodeK8sVersion string
kubeletVersionInfo, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubelet --version", false, false)
if err != nil {
return errors.Wrap(err, "get current kubelet version failed")
}
nodeK8sVersion = strings.Split(kubeletVersionInfo, " ")[1]
host := runtime.RemoteHost()
if host.IsRole(common.Master) {
apiserverVersion, err := runtime.GetRunner().SudoCmd(
"cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep 'image:' | rev | cut -d ':' -f1 | rev",
false, false)
if err != nil {
return errors.Wrap(err, "get current kube-apiserver version failed")
}
nodeK8sVersion = apiserverVersion
}
host.GetCache().Set(common.NodeK8sVersion, nodeK8sVersion)
return nil
}
type CalculateMinK8sVersion struct {
common.KubeAction
}
func (g *CalculateMinK8sVersion) Execute(runtime connector.Runtime) error {
versionList := make([]*versionutil.Version, 0, len(runtime.GetHostsByRole(common.K8s)))
for _, host := range runtime.GetHostsByRole(common.K8s) {
version, ok := host.GetCache().GetMustString(common.NodeK8sVersion)
if !ok {
return errors.Errorf("get node %s Kubernetes version failed by host cache", host.GetName())
}
if versionObj, err := versionutil.ParseSemantic(version); err != nil {
return errors.Wrap(err, "parse node version failed")
} else {
versionList = append(versionList, versionObj)
}
}
minVersion := versionList[0]
for _, version := range versionList {
if !minVersion.LessThan(version) {
minVersion = version
}
}
g.PipelineCache.Set(common.K8sVersion, fmt.Sprintf("v%s", minVersion))
return nil
}
type CheckDesiredK8sVersion struct {
common.KubeAction
}
func (k *CheckDesiredK8sVersion) Execute(_ connector.Runtime) error {
if ok := kubernetes.VersionSupport(k.KubeConf.Cluster.Kubernetes.Version); !ok {
return errors.New(fmt.Sprintf("does not support upgrade to Kubernetes %s",
k.KubeConf.Cluster.Kubernetes.Version))
}
k.PipelineCache.Set(common.DesiredK8sVersion, k.KubeConf.Cluster.Kubernetes.Version)
return nil
}
type KsVersionCheck struct {
common.KubeAction
}
func (k *KsVersionCheck) Execute(runtime connector.Runtime) error {
ksVersionStr, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl get deploy -n kubesphere-system ks-console -o jsonpath='{.metadata.labels.version}'",
false, false)
if err != nil {
if k.KubeConf.Cluster.KubeSphere.Enabled {
return errors.Wrap(err, "get kubeSphere version failed")
} else {
ksVersionStr = ""
}
}
ccKsVersionStr, ccErr := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl get ClusterConfiguration ks-installer -n kubesphere-system -o jsonpath='{.metadata.labels.version}'",
false, false)
if ccErr == nil && ksVersionStr == "v3.1.0" {
ksVersionStr = ccKsVersionStr
}
k.PipelineCache.Set(common.KubeSphereVersion, ksVersionStr)
return nil
}
type DependencyCheck struct {
common.KubeAction
}
func (d *DependencyCheck) Execute(_ connector.Runtime) error {
currentKsVersion, ok := d.PipelineCache.GetMustString(common.KubeSphereVersion)
if !ok {
return errors.New("get current KubeSphere version failed by pipeline cache")
}
desiredVersion := d.KubeConf.Cluster.KubeSphere.Version
if d.KubeConf.Cluster.KubeSphere.Enabled {
var version string
if latest, ok := kubesphere.LatestRelease(desiredVersion); ok {
version = latest.Version
} else if ks, ok := kubesphere.DevRelease(desiredVersion); ok {
version = ks.Version
} else {
r := regexp.MustCompile("v(\\d+\\.)?(\\d+\\.)?(\\*|\\d+)")
version = r.FindString(desiredVersion)
}
ksInstaller, ok := kubesphere.VersionMap[version]
if !ok {
return errors.New(fmt.Sprintf("Unsupported version: %s", desiredVersion))
}
if currentKsVersion != desiredVersion {
if ok := ksInstaller.UpgradeSupport(currentKsVersion); !ok {
return errors.New(fmt.Sprintf("Unsupported upgrade plan: %s to %s", currentKsVersion, desiredVersion))
}
}
if ok := ksInstaller.K8sSupport(d.KubeConf.Cluster.Kubernetes.Version); !ok {
return errors.New(fmt.Sprintf("KubeSphere %s does not support running on Kubernetes %s",
version, d.KubeConf.Cluster.Kubernetes.Version))
}
} else {
ksInstaller, ok := kubesphere.VersionMap[currentKsVersion]
if !ok {
return errors.New(fmt.Sprintf("Unsupported version: %s", currentKsVersion))
}
if ok := ksInstaller.K8sSupport(d.KubeConf.Cluster.Kubernetes.Version); !ok {
return errors.New(fmt.Sprintf("KubeSphere %s does not support running on Kubernetes %s",
currentKsVersion, d.KubeConf.Cluster.Kubernetes.Version))
}
}
return nil
}
type GetKubernetesNodesStatus struct {
common.KubeAction
}

View File

@@ -16,126 +16,6 @@
package registry
import (
"crypto/x509"
"fmt"
"net"
"path/filepath"
"strings"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/utils/certs"
"github.com/pkg/errors"
"k8s.io/client-go/util/cert"
certutil "k8s.io/client-go/util/cert"
netutils "k8s.io/utils/net"
)
const (
RegistryCertificateBaseName = "dockerhub.kubekey.local"
LocalCertsDir = "localCertsDir"
CertsFileList = "certsFileList"
)
// KubekeyCertEtcdCA is the definition of the root CA used by the hosted etcd server.
func KubekeyCertRegistryCA() *certs.KubekeyCert {
return &certs.KubekeyCert{
Name: "registry-ca",
LongName: "self-signed CA to provision identities for registry",
BaseName: "ca",
Config: certs.CertConfig{
Config: certutil.Config{
CommonName: "registry-ca",
},
},
}
}
// KubekeyCertEtcdAdmin is the definition of the cert for etcd admin.
func KubekeyCertRegistryServer(altNames *certutil.AltNames) *certs.KubekeyCert {
return &certs.KubekeyCert{
Name: "registry-server",
LongName: "certificate for registry server",
BaseName: RegistryCertificateBaseName,
CAName: "registry-ca",
Config: certs.CertConfig{
Config: certutil.Config{
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
AltNames: *altNames,
CommonName: RegistryCertificateBaseName,
},
},
}
}
type FetchCerts struct {
common.KubeAction
}
func (f *FetchCerts) Execute(runtime connector.Runtime) error {
src := "/etc/ssl/registry/ssl"
dst := fmt.Sprintf("%s/pki/registry", runtime.GetWorkDir())
certs, err := runtime.GetRunner().SudoCmd("ls /etc/ssl/registry/ssl/ | grep .pem", false, false)
if err != nil {
return nil
}
certsList := strings.Split(certs, "\r\n")
if len(certsList) > 0 {
for _, cert := range certsList {
if err := runtime.GetRunner().Fetch(filepath.Join(dst, cert), filepath.Join(src, cert), false, true); err != nil {
return errors.Wrap(err, fmt.Sprintf("Fetch %s failed", filepath.Join(src, cert)))
}
}
}
return nil
}
type GenerateCerts struct {
common.KubeAction
}
func (g *GenerateCerts) Execute(runtime connector.Runtime) error {
pkiPath := fmt.Sprintf("%s/pki/registry", runtime.GetWorkDir())
var altName cert.AltNames
dnsList := []string{"localhost", RegistryCertificateBaseName, runtime.GetHostsByRole(common.Registry)[0].GetName()}
ipList := []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback, netutils.ParseIPSloppy(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress())}
altName.DNSNames = dnsList
altName.IPs = ipList
files := []string{"ca.pem", "ca-key.pem", fmt.Sprintf("%s.pem", RegistryCertificateBaseName), fmt.Sprintf("%s-key.pem", RegistryCertificateBaseName)}
// CA
certsList := []*certs.KubekeyCert{KubekeyCertRegistryCA()}
// Certs
certsList = append(certsList, KubekeyCertRegistryServer(&altName))
var lastCACert *certs.KubekeyCert
for _, c := range certsList {
if c.CAName == "" {
err := certs.GenerateCA(c, pkiPath, g.KubeConf)
if err != nil {
return err
}
lastCACert = c
} else {
err := certs.GenerateCerts(c, lastCACert, pkiPath, g.KubeConf)
if err != nil {
return err
}
}
}
g.ModuleCache.Set(LocalCertsDir, pkiPath)
g.ModuleCache.Set(CertsFileList, files)
return nil
}

View File

@@ -1,295 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"path/filepath"
"bytetrade.io/web3os/installer/pkg/bootstrap/registry/templates"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/container"
docker_template "bytetrade.io/web3os/installer/pkg/container/templates"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
)
type RegistryCertsModule struct {
common.KubeModule
Skip bool
}
func (p *RegistryCertsModule) IsSkip() bool {
return p.Skip
}
func (i *RegistryCertsModule) Init() {
i.Name = "InitRegistryModule"
i.Desc = "Init a local registry"
fetchCerts := &task.RemoteTask{
Name: "FetchRegistryCerts",
Desc: "Fetch registry certs",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Prepare: new(FirstRegistryNode),
Action: new(FetchCerts),
Parallel: false,
}
generateCerts := &task.LocalTask{
Name: "GenerateRegistryCerts",
Desc: "Generate registry Certs",
Action: new(GenerateCerts),
}
syncCertsFile := &task.RemoteTask{
Name: "SyncCertsFile",
Desc: "Synchronize certs file",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(SyncCertsFile),
Parallel: true,
Retry: 1,
}
syncCertsToAllNodes := &task.RemoteTask{
Name: "SyncCertsFileToAllNodes",
Desc: "Synchronize certs file to all nodes",
Hosts: i.Runtime.GetAllHosts(),
Action: new(SyncCertsToAllNodes),
Parallel: true,
Retry: 1,
}
i.Tasks = []task.Interface{
fetchCerts,
generateCerts,
syncCertsFile,
syncCertsToAllNodes,
}
}
type InstallRegistryModule struct {
common.KubeModule
}
func (i *InstallRegistryModule) Init() {
i.Name = "InstallRegistryModule"
i.Desc = "Install local registry"
switch i.KubeConf.Cluster.Registry.Type {
case common.Harbor:
i.Tasks = InstallHarbor(i)
default:
i.Tasks = InstallRegistry(i)
}
}
func InstallRegistry(i *InstallRegistryModule) []task.Interface {
installRegistryBinary := &task.RemoteTask{
Name: "InstallRegistryBinary",
Desc: "Install local registry",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(InstallRegistryBinary),
Parallel: true,
Retry: 1,
}
generateRegistryService := &task.RemoteTask{
Name: "GenerateRegistryService",
Desc: "Generate registry service",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: &action.Template{
Name: "GenerateRegistryService",
Template: templates.RegistryServiceTempl,
Dst: "/etc/systemd/system/registry.service",
},
Parallel: true,
Retry: 1,
}
generateRegistryConfig := &task.RemoteTask{
Name: "GenerateRegistryConfig",
Desc: "Generate registry config",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: &action.Template{
Name: "GenerateRegistryConfig",
Template: templates.RegistryConfigTempl,
Dst: "/etc/kubekey/registry/config.yaml",
Data: util.Data{
"Certificate": fmt.Sprintf("%s.pem", RegistryCertificateBaseName),
"Key": fmt.Sprintf("%s-key.pem", RegistryCertificateBaseName),
},
},
Parallel: true,
Retry: 1,
}
startRgistryService := &task.RemoteTask{
Name: "StartRegistryService",
Desc: "Start registry service",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(StartRegistryService),
Parallel: true,
Retry: 1,
}
return []task.Interface{
installRegistryBinary,
generateRegistryService,
generateRegistryConfig,
startRgistryService,
}
}
func InstallHarbor(i *InstallRegistryModule) []task.Interface {
// Install docker
// syncBinaries := &task.RemoteTask{
// Name: "SyncDockerBinaries",
// Desc: "Sync docker binaries",
// Hosts: i.Runtime.GetHostsByRole(common.Registry),
// Prepare: &prepare.PrepareCollection{
// &container.DockerExist{Not: true},
// },
// Action: new(container.SyncDockerBinaries),
// Parallel: true,
// Retry: 2,
// }
generateDockerService := &task.RemoteTask{
Name: "GenerateDockerService",
Desc: "Generate docker service",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Prepare: &prepare.PrepareCollection{
&container.DockerExist{Not: true},
},
Action: &action.Template{
Name: "GenerateDockerService",
Template: docker_template.DockerService,
Dst: filepath.Join("/etc/systemd/system", docker_template.DockerService.Name()),
},
Parallel: true,
}
generateDockerConfig := &task.RemoteTask{
Name: "GenerateDockerConfig",
Desc: "Generate docker config",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Prepare: &prepare.PrepareCollection{
&container.DockerExist{Not: true},
},
Action: &action.Template{
Name: "GenerateDockerConfig",
Template: docker_template.DockerConfig,
Dst: filepath.Join("/etc/docker/", docker_template.DockerConfig.Name()),
Data: util.Data{
"Mirrors": docker_template.Mirrors(i.KubeConf),
"InsecureRegistries": docker_template.InsecureRegistries(i.KubeConf),
},
},
Parallel: true,
}
// enableDocker := &task.RemoteTask{
// Name: "EnableDocker",
// Desc: "Enable docker",
// Hosts: i.Runtime.GetHostsByRole(common.Registry),
// Prepare: &prepare.PrepareCollection{
// &container.DockerExist{Not: true},
// },
// Action: new(container.EnableDocker),
// Parallel: true,
// }
// Install docker compose
installDockerCompose := &task.RemoteTask{
Name: "InstallDockerCompose",
Desc: "Install docker compose",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(InstallDockerCompose),
Parallel: true,
Retry: 2,
}
// Install Harbor
syncHarborPackage := &task.RemoteTask{
Name: "SyncHarborPackage",
Desc: "Sync harbor package",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(SyncHarborPackage),
Parallel: true,
Retry: 2,
}
// generate Harbor Systemd
generateHarborService := &task.RemoteTask{
Name: "GenerateHarborService",
Desc: "Generate harbor service",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: &action.Template{
Name: "GenerateHarborService",
Template: templates.HarborServiceTempl,
Dst: "/etc/systemd/system/harbor.service",
Data: util.Data{
"Harbor_install_path": "/opt/harbor",
},
},
Parallel: true,
Retry: 1,
}
generateHarborConfig := &task.RemoteTask{
Name: "GenerateHarborConfig",
Desc: "Generate harbor config",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: &action.Template{
Name: "GenerateHarborConfig",
Template: templates.HarborConfigTempl,
Dst: "/opt/harbor/harbor.yml",
Data: util.Data{
"Domain": RegistryCertificateBaseName,
"Certificate": fmt.Sprintf("%s.pem", RegistryCertificateBaseName),
"Key": fmt.Sprintf("%s-key.pem", RegistryCertificateBaseName),
},
},
Parallel: true,
Retry: 1,
}
startHarbor := &task.RemoteTask{
Name: "StartHarbor",
Desc: "start harbor",
Hosts: i.Runtime.GetHostsByRole(common.Registry),
Action: new(StartHarbor),
Parallel: true,
Retry: 2,
}
return []task.Interface{
// syncBinaries,
generateDockerService,
generateDockerConfig,
// enableDocker,
installDockerCompose,
syncHarborPackage,
generateHarborService,
generateHarborConfig,
startHarbor,
}
}

View File

@@ -1,34 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
)
type FirstRegistryNode struct {
common.KubePrepare
Not bool
}
func (f *FirstRegistryNode) PreCheck(runtime connector.Runtime) (bool, error) {
if runtime.GetHostsByRole(common.Registry)[0].GetName() == runtime.RemoteHost().GetName() {
return !f.Not, nil
}
return f.Not, nil
}

View File

@@ -1,229 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"path/filepath"
"strings"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/files"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
)
type SyncCertsFile struct {
common.KubeAction
}
func (s *SyncCertsFile) Execute(runtime connector.Runtime) error {
localCertsDir, ok := s.ModuleCache.Get(LocalCertsDir)
if !ok {
return errors.New("get registry local certs dir by module cache failed")
}
files, ok := s.ModuleCache.Get(CertsFileList)
if !ok {
return errors.New("get registry certs file list by module cache failed")
}
dir := localCertsDir.(string)
fileList := files.([]string)
for _, fileName := range fileList {
if err := runtime.GetRunner().SudoScp(filepath.Join(dir, fileName), filepath.Join(common.RegistryCertDir, fileName)); err != nil {
return errors.Wrap(errors.WithStack(err), "scp registry certs file failed")
}
}
return nil
}
type SyncCertsToAllNodes struct {
common.KubeAction
}
func (s *SyncCertsToAllNodes) Execute(runtime connector.Runtime) error {
localCertsDir, ok := s.ModuleCache.Get(LocalCertsDir)
if !ok {
return errors.New("get registry local certs dir by module cache failed")
}
files, ok := s.ModuleCache.Get(CertsFileList)
if !ok {
return errors.New("get registry certs file list by module cache failed")
}
dir := localCertsDir.(string)
fileList := files.([]string)
for _, fileName := range fileList {
var dstFileName string
switch fileName {
case "ca.pem":
dstFileName = "ca.crt"
case "ca-key.pem":
continue
default:
if strings.HasSuffix(fileName, "-key.pem") {
dstFileName = strings.Replace(fileName, "-key.pem", ".key", -1)
} else {
dstFileName = strings.Replace(fileName, ".pem", ".cert", -1)
}
}
if err := runtime.GetRunner().SudoScp(filepath.Join(dir, fileName), filepath.Join(filepath.Join("/etc/docker/certs.d", RegistryCertificateBaseName), dstFileName)); err != nil {
return errors.Wrap(errors.WithStack(err), "scp registry certs file to /etc/docker/certs.d/ failed")
}
if err := runtime.GetRunner().SudoScp(filepath.Join(dir, fileName), filepath.Join(common.RegistryCertDir, dstFileName)); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("scp registry certs file to %s failed", common.RegistryCertDir))
}
}
return nil
}
type InstallRegistryBinary struct {
common.KubeAction
}
func (g *InstallRegistryBinary) Execute(runtime connector.Runtime) error {
if err := utils.ResetTmpDir(runtime); err != nil {
return err
}
binariesMapObj, ok := g.PipelineCache.Get(common.KubeBinaries + "-" + runtime.RemoteHost().GetArch())
if !ok {
return errors.New("get KubeBinary by pipeline cache failed")
}
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
registry, ok := binariesMap[common.Registry]
if !ok {
return errors.New("get KubeBinary key registry by pipeline cache failed")
}
dst := filepath.Join(common.TmpDir, registry.FileName)
if err := runtime.GetRunner().Scp(registry.Path(), dst); err != nil {
return errors.Wrap(errors.WithStack(err), "sync etcd tar.gz failed")
}
installCmd := fmt.Sprintf("tar -zxf %s && mv -f registry /usr/local/bin/ && chmod +x /usr/local/bin/registry", dst)
if _, err := runtime.GetRunner().SudoCmd(installCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "install etcd binaries failed")
}
return nil
}
type StartRegistryService struct {
common.KubeAction
}
func (g *StartRegistryService) Execute(runtime connector.Runtime) error {
installCmd := "systemctl daemon-reload && systemctl enable registry && systemctl restart registry"
if _, err := runtime.GetRunner().SudoCmd(installCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "start registry service failed")
}
fmt.Println()
fmt.Println("Local image registry created successfully. Address: dockerhub.kubekey.local")
fmt.Println()
return nil
}
type InstallDockerCompose struct {
common.KubeAction
}
func (g *InstallDockerCompose) Execute(runtime connector.Runtime) error {
if err := utils.ResetTmpDir(runtime); err != nil {
return err
}
binariesMapObj, ok := g.PipelineCache.Get(common.KubeBinaries + "-" + runtime.RemoteHost().GetArch())
if !ok {
return errors.New("get KubeBinary by pipeline cache failed")
}
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
compose, ok := binariesMap[common.DockerCompose]
if !ok {
return errors.New("get KubeBinary key docker-compose by pipeline cache failed")
}
dst := filepath.Join(common.TmpDir, compose.FileName)
if err := runtime.GetRunner().Scp(compose.Path(), dst); err != nil {
return errors.Wrap(errors.WithStack(err), "sync docker-compose failed")
}
installCmd := fmt.Sprintf("mv -f %s /usr/local/bin/docker-compose && chmod +x /usr/local/bin/docker-compose", dst)
if _, err := runtime.GetRunner().SudoCmd(installCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "install dokcer-compose failed")
}
return nil
}
type SyncHarborPackage struct {
common.KubeAction
}
func (g *SyncHarborPackage) Execute(runtime connector.Runtime) error {
if err := utils.ResetTmpDir(runtime); err != nil {
return err
}
binariesMapObj, ok := g.PipelineCache.Get(common.KubeBinaries + "-" + runtime.RemoteHost().GetArch())
if !ok {
return errors.New("get KubeBinary by pipeline cache failed")
}
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
harbor, ok := binariesMap[common.Harbor]
if !ok {
return errors.New("get KubeBinary key harbor by pipeline cache failed")
}
dst := filepath.Join(common.TmpDir, harbor.FileName)
if err := runtime.GetRunner().Scp(harbor.Path(), dst); err != nil {
return errors.Wrap(errors.WithStack(err), "sync harbor package failed")
}
installCmd := fmt.Sprintf("tar -zxvf %s -C /opt", dst)
if _, err := runtime.GetRunner().SudoCmd(installCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "unzip harbor package failed")
}
return nil
}
type StartHarbor struct {
common.KubeAction
}
func (g *StartHarbor) Execute(runtime connector.Runtime) error {
startCmd := "cd /opt/harbor && chmod +x install.sh && export PATH=$PATH:/usr/local/bin; ./install.sh --with-notary --with-trivy --with-chartmuseum && systemctl daemon-reload && systemctl enable harbor && systemctl restart harbor"
if _, err := runtime.GetRunner().SudoCmd(startCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "start harbor failed")
}
fmt.Println()
fmt.Println("Local image registry created successfully. Address: dockerhub.kubekey.local")
fmt.Println()
return nil
}

View File

@@ -1,136 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
import (
"text/template"
"github.com/lithammer/dedent"
)
var (
// HarborServiceTempl defines the template of registry's configuration file.
HarborServiceTempl = template.Must(template.New("harborSerivce").Parse(
dedent.Dedent(`[Unit]
Description=Harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
StartLimitIntervalSec=0
[Service]
Type=simple
ExecStart=/usr/local/bin/docker-compose -f {{ .Harbor_install_path }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -f {{ .Harbor_install_path }}/docker-compose.yml down
Restart=on-failure
[Install]
WantedBy=multi-user.target
`)))
// HarborConfigTempl defines the template of registry's configuration file.
HarborConfigTempl = template.Must(template.New("harborConfig").Parse(
dedent.Dedent(`# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: {{ .Domain }}
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
certificate: /etc/ssl/registry/ssl/{{ .Certificate }}
private_key: /etc/ssl/registry/ssl/{{ .Key }}
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: Harbor12345
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 100
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: 900
# The default data volume
data_volume: /mnt/registry
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
skip_update: false
#
# insecure The flag to skip verifying registry certificate
insecure: false
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 10
chart:
# Change the value of absolute_url to enabled can enable absolute url in chart
absolute_url: disabled
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.4.0
# Global proxy
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy
`)))
)

View File

@@ -1,54 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
import (
"text/template"
"github.com/lithammer/dedent"
)
var (
// RegistryServiceTempl defines the template of registry service for systemd.
RegistryServiceTempl = template.Must(template.New("registryService").Parse(
dedent.Dedent(`[Unit]
Description=v2 Registry server for Container
After=network.target
StartLimitIntervalSec=0
[Service]
Type=simple
ExecStart=/usr/local/bin/registry serve /etc/kubekey/registry/config.yaml
Restart=on-failure
[Install]
WantedBy=multi-user.target
`)))
// RegistryConfigTempl defines the template of registry's configuration file.
RegistryConfigTempl = template.Must(template.New("registryConfig").Parse(
dedent.Dedent(`version: 0.1
log:
fields:
service: registry
storage:
cache:
layerinfo: inmemory
filesystem:
rootdirectory: /mnt/registry
http:
addr: :443
tls:
certificate: /etc/ssl/registry/ssl/{{ .Certificate }}
key: /etc/ssl/registry/ssl/{{ .Key }}
`)))
)

View File

@@ -24,107 +24,10 @@ import (
"bytetrade.io/web3os/installer/pkg/certs/templates"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/kubernetes"
)
type CheckCertsModule struct {
common.KubeModule
}
func (c *CheckCertsModule) Init() {
c.Name = "CheckCertsModule"
c.Desc = "Check cluster certs"
check := &task.RemoteTask{
Name: "CheckClusterCerts",
Desc: "Check cluster certs",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Action: new(ListClusterCerts),
Parallel: true,
}
c.Tasks = []task.Interface{
check,
}
}
type PrintClusterCertsModule struct {
common.KubeModule
}
func (p *PrintClusterCertsModule) Init() {
p.Name = "PrintClusterCertsModule"
p.Desc = "Display cluster certs form"
display := &task.LocalTask{
Name: "DisplayCertsForm",
Desc: "Display cluster certs form",
Action: new(DisplayForm),
}
p.Tasks = []task.Interface{
display,
}
}
type RenewCertsModule struct {
common.KubeModule
}
func (r *RenewCertsModule) Init() {
r.Name = "RenewCertsModule"
r.Desc = "Renew control-plane certs"
renew := &task.RemoteTask{
Name: "RenewCerts",
Desc: "Renew control-plane certs",
Hosts: r.Runtime.GetHostsByRole(common.Master),
Action: new(RenewCerts),
Parallel: false,
Retry: 5,
}
copyKubeConfig := &task.RemoteTask{
Name: "CopyKubeConfig",
Desc: "Copy admin.conf to ~/.kube/config",
Hosts: r.Runtime.GetHostsByRole(common.Master),
Action: new(kubernetes.CopyKubeConfigForControlPlane),
Parallel: true,
Retry: 2,
}
fetchKubeConfig := &task.RemoteTask{
Name: "FetchKubeConfig",
Desc: "Fetch kube config file from control-plane",
Hosts: r.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(FetchKubeConfig),
Parallel: true,
}
syncKubeConfig := &task.RemoteTask{
Name: "SyncKubeConfig",
Desc: "Synchronize kube config to worker",
Hosts: r.Runtime.GetHostsByRole(common.Worker),
Prepare: &prepare.PrepareCollection{
new(common.OnlyWorker),
},
Action: new(SyneKubeConfigToWorker),
Parallel: true,
Retry: 3,
}
r.Tasks = []task.Interface{
renew,
copyKubeConfig,
fetchKubeConfig,
syncKubeConfig,
}
}
type AutoRenewCertsModule struct {
common.KubeModule
Skip bool

View File

@@ -17,384 +17,18 @@
package certs
import (
"encoding/base64"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"text/tabwriter"
"time"
"bytetrade.io/web3os/installer/pkg/certs/templates"
"bytetrade.io/web3os/installer/pkg/common"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
versionutil "k8s.io/apimachinery/pkg/util/version"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
certutil "k8s.io/client-go/util/cert"
)
type Certificate struct {
Name string
Expires string
Residual string
AuthorityName string
NodeName string
}
type CaCertificate struct {
AuthorityName string
Expires string
Residual string
NodeName string
}
var (
certificateList = []string{
"apiserver.crt",
"apiserver-kubelet-client.crt",
"front-proxy-client.crt",
}
caCertificateList = []string{
"ca.crt",
"front-proxy-ca.crt",
}
kubeConfigList = []string{
"admin.conf",
"controller-manager.conf",
"scheduler.conf",
}
)
type ListClusterCerts struct {
common.KubeAction
}
func (l *ListClusterCerts) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
certificates := make([]*Certificate, 0)
caCertificates := make([]*CaCertificate, 0)
for _, certFileName := range certificateList {
certPath := filepath.Join(common.KubeCertDir, certFileName)
certContext, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("cat %s", certPath), false, false)
if err != nil {
return errors.Wrap(err, "get cluster certs failed")
}
if cert, err := getCertInfo(certContext, certFileName, host.GetName()); err != nil {
return err
} else {
certificates = append(certificates, cert)
}
}
for _, kubeConfigFileName := range kubeConfigList {
kubeConfigPath := filepath.Join(common.KubeConfigDir, kubeConfigFileName)
newConfig := clientcmdapi.NewConfig()
kubeconfigBytes, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("cat %s", kubeConfigPath), false, false)
decoded, _, err := clientcmdlatest.Codec.Decode([]byte(kubeconfigBytes), &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, newConfig)
if err != nil {
return err
}
newConfig = decoded.(*clientcmdapi.Config)
for _, a := range newConfig.AuthInfos {
certContextBase64 := a.ClientCertificateData
tmp := base64.StdEncoding.EncodeToString(certContextBase64)
certContext, err := base64.StdEncoding.DecodeString(tmp)
if err != nil {
return err
}
if cert, err := getCertInfo(string(certContext), kubeConfigFileName, host.GetName()); err != nil {
return err
} else {
certificates = append(certificates, cert)
}
}
}
for _, caCertFileName := range caCertificateList {
certPath := filepath.Join(common.KubeCertDir, caCertFileName)
caCertContext, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("cat %s", certPath), false, false)
if err != nil {
return errors.Wrap(err, "Failed to get cluster certs")
}
if cert, err := getCaCertInfo(caCertContext, caCertFileName, host.GetName()); err != nil {
return err
} else {
caCertificates = append(caCertificates, cert)
}
}
host.GetCache().Set(common.Certificate, certificates)
host.GetCache().Set(common.CaCertificate, caCertificates)
return nil
}
func getCertInfo(certContext, certFileName, nodeName string) (*Certificate, error) {
certs, err1 := certutil.ParseCertsPEM([]byte(certContext))
if err1 != nil {
return nil, errors.Wrap(err1, "Failed to get cluster certs")
}
var authorityName string
switch certFileName {
case "apiserver.crt":
authorityName = "ca"
case "apiserver-kubelet-client.crt":
authorityName = "ca"
case "front-proxy-client.crt":
authorityName = "front-proxy-ca"
default:
authorityName = ""
}
cert := Certificate{
Name: certFileName,
Expires: certs[0].NotAfter.Format("Jan 02, 2006 15:04 MST"),
Residual: ResidualTime(certs[0].NotAfter),
AuthorityName: authorityName,
NodeName: nodeName,
}
return &cert, nil
}
func getCaCertInfo(certContext, certFileName, nodeName string) (*CaCertificate, error) {
certs, err := certutil.ParseCertsPEM([]byte(certContext))
if err != nil {
return nil, errors.Wrap(err, "Failed to get cluster certs")
}
cert1 := CaCertificate{
AuthorityName: certFileName,
Expires: certs[0].NotAfter.Format("Jan 02, 2006 15:04 MST"),
Residual: ResidualTime(certs[0].NotAfter),
NodeName: nodeName,
}
return &cert1, nil
}
func ResidualTime(t time.Time) string {
d := time.Until(t)
if seconds := int(d.Seconds()); seconds < -1 {
return fmt.Sprintf("<invalid>")
} else if seconds < 0 {
return fmt.Sprintf("0s")
} else if seconds < 60 {
return fmt.Sprintf("%ds", seconds)
} else if minutes := int(d.Minutes()); minutes < 60 {
return fmt.Sprintf("%dm", minutes)
} else if hours := int(d.Hours()); hours < 24 {
return fmt.Sprintf("%dh", hours)
} else if hours < 24*365 {
return fmt.Sprintf("%dd", hours/24)
}
return fmt.Sprintf("%dy", int(d.Hours()/24/365))
}
type DisplayForm struct {
common.KubeAction
}
func (d *DisplayForm) Execute(runtime connector.Runtime) error {
certificates := make([]*Certificate, 0)
caCertificates := make([]*CaCertificate, 0)
for _, host := range runtime.GetHostsByRole(common.Master) {
certs, ok := host.GetCache().Get(common.Certificate)
if !ok {
return errors.New("get certificate failed by pipeline cache")
}
ca, ok := host.GetCache().Get(common.CaCertificate)
if !ok {
return errors.New("get ca certificate failed by pipeline cache")
}
hostCertificates := certs.([]*Certificate)
hostCaCertificates := ca.([]*CaCertificate)
certificates = append(certificates, hostCertificates...)
caCertificates = append(caCertificates, hostCaCertificates...)
}
w := tabwriter.NewWriter(os.Stdout, 10, 4, 3, ' ', 0)
_, _ = fmt.Fprintln(w, "CERTIFICATE\tEXPIRES\tRESIDUAL TIME\tCERTIFICATE AUTHORITY\tNODE")
for _, cert := range certificates {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
cert.Name,
cert.Expires,
cert.Residual,
cert.AuthorityName,
cert.NodeName,
)
_, _ = fmt.Fprintln(w, s)
continue
}
_, _ = fmt.Fprintln(w)
_, _ = fmt.Fprintln(w, "CERTIFICATE AUTHORITY\tEXPIRES\tRESIDUAL TIME\tNODE")
for _, caCert := range caCertificates {
c := fmt.Sprintf("%s\t%s\t%s\t%-8v",
caCert.AuthorityName,
caCert.Expires,
caCert.Residual,
caCert.NodeName,
)
_, _ = fmt.Fprintln(w, c)
continue
}
_ = w.Flush()
return nil
}
type RenewCerts struct {
common.KubeAction
}
func (r *RenewCerts) Execute(runtime connector.Runtime) error {
var kubeadmAlphaList = []string{
"/usr/local/bin/kubeadm alpha certs renew apiserver",
"/usr/local/bin/kubeadm alpha certs renew apiserver-kubelet-client",
"/usr/local/bin/kubeadm alpha certs renew front-proxy-client",
"/usr/local/bin/kubeadm alpha certs renew admin.conf",
"/usr/local/bin/kubeadm alpha certs renew controller-manager.conf",
"/usr/local/bin/kubeadm alpha certs renew scheduler.conf",
}
var kubeadmList = []string{
"/usr/local/bin/kubeadm certs renew apiserver",
"/usr/local/bin/kubeadm certs renew apiserver-kubelet-client",
"/usr/local/bin/kubeadm certs renew front-proxy-client",
"/usr/local/bin/kubeadm certs renew admin.conf",
"/usr/local/bin/kubeadm certs renew controller-manager.conf",
"/usr/local/bin/kubeadm certs renew scheduler.conf",
}
var restartList = []string{
"docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f",
"docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f",
"docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f",
"systemctl restart kubelet",
}
version, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubeadm version -o short", true, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "kubeadm get version failed")
}
cmp, err := versionutil.MustParseSemantic(version).Compare("v1.20.0")
if err != nil {
return errors.Wrap(errors.WithStack(err), "parse kubeadm version failed")
}
if cmp == -1 {
_, err := runtime.GetRunner().SudoCmd(strings.Join(kubeadmAlphaList, " && "), false, false)
if err != nil {
return errors.Wrap(err, "kubeadm alpha certs renew failed")
}
} else {
_, err := runtime.GetRunner().SudoCmd(strings.Join(kubeadmList, " && "), false, false)
if err != nil {
return errors.Wrap(err, "kubeadm alpha certs renew failed")
}
}
_, err = runtime.GetRunner().SudoCmd(strings.Join(restartList, " && "), false, false)
if err != nil {
return errors.Wrap(err, "kube-apiserver, kube-schedule, kube-controller-manager or kubelet restart failed")
}
return nil
}
type FetchKubeConfig struct {
common.KubeAction
}
func (f *FetchKubeConfig) Execute(runtime connector.Runtime) error {
if err := utils.ResetTmpDir(runtime); err != nil {
return err
}
tmpConfigFile := filepath.Join(common.TmpDir, "admin.conf")
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("cp /etc/kubernetes/admin.conf %s", tmpConfigFile), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "copy kube config to /tmp/ failed")
}
host := runtime.RemoteHost()
if err := runtime.GetRunner().Fetch(filepath.Join(runtime.GetWorkDir(), host.GetName(), "admin.conf"), tmpConfigFile, false, true); err != nil {
return errors.Wrap(errors.WithStack(err), "fetch kube config file failed")
}
return nil
}
type SyneKubeConfigToWorker struct {
common.KubeAction
}
func (s *SyneKubeConfigToWorker) Execute(runtime connector.Runtime) error {
createConfigDirCmd := "mkdir -p /root/.kube"
if _, err := runtime.GetRunner().SudoCmd(createConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "create .kube dir failed")
}
firstMaster := runtime.GetHostsByRole(common.Master)[0]
localFile := filepath.Join(runtime.GetWorkDir(), firstMaster.GetName(), "admin.conf")
if err := runtime.GetRunner().SudoScp(localFile, "/root/.kube/config"); err != nil {
return errors.Wrap(errors.WithStack(err), "sudo scp config file to worker /root/.kube/config failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 /root/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod 0600 /root/.kube/config failed")
}
// that doesn't work
//if err := runtime.GetRunner().SudoScp(filepath.Join(runtime.GetWorkDir(), firstMaster.GetName(), "admin.conf"), "$HOME/.kube/config"); err != nil {
// return errors.Wrap(errors.WithStack(err), "sudo scp config file to worker $HOME/.kube/config failed")
//}
if host := runtime.RemoteHost(); host.GetUser() != "root" {
userConfigDirCmd := "mkdir -p $HOME/.kube"
if _, err := runtime.GetRunner().Cmd(userConfigDirCmd, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user mkdir $HOME/.kube failed")
}
getKubeConfigCmdUsr := "cp -f /root/.kube/config $HOME/.kube/config"
if _, err := runtime.GetRunner().SudoCmd(getKubeConfigCmdUsr, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "user copy /etc/kubernetes/admin.conf to $HOME/.kube/config failed")
}
if _, err := runtime.GetRunner().SudoCmd("chmod 0600 $HOME/.kube/config", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chmod 0600 $HOME/.kube/config failed")
}
// userId, err := runtime.GetRunner().Cmd("echo $(id -u)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user id failed")
// }
// userGroupId, err := runtime.GetRunner().Cmd("echo $(id -g)", false, false)
// if err != nil {
// return errors.Wrap(errors.WithStack(err), "get user group id failed")
// }
userId, err := runtime.GetRunner().Cmd("echo $SUDO_UID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user id failed")
}
userGroupId, err := runtime.GetRunner().Cmd("echo $SUDO_GID", false, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get user group id failed")
}
chownKubeConfig := fmt.Sprintf("chown -R %s:%s $HOME/.kube", userId, userGroupId)
if _, err := runtime.GetRunner().SudoCmd(chownKubeConfig, false, false); err != nil {
return errors.Wrap(errors.WithStack(err), "chown user kube config failed")
}
}
return nil
}
type EnableRenewService struct {
common.KubeAction
}

View File

@@ -26,16 +26,11 @@ import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/container/templates"
"bytetrade.io/web3os/installer/pkg/core/action"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/images"
"bytetrade.io/web3os/installer/pkg/manifest"
"bytetrade.io/web3os/installer/pkg/registry"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
)
@@ -306,366 +301,6 @@ func umountPoints(runtime connector.Runtime) error {
return nil
}
type CordonNode struct {
common.KubeAction
}
func (d *CordonNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl cordon %s ", nodeName), true, false); err != nil {
return errors.Wrap(err, fmt.Sprintf("cordon the node: %s failed", nodeName))
}
return nil
}
type UnCordonNode struct {
common.KubeAction
}
func (d *UnCordonNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
f := true
for f {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl uncordon %s", nodeName), true, false); err == nil {
break
}
}
return nil
}
type DrainNode struct {
common.KubeAction
}
func (d *DrainNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl drain %s --delete-emptydir-data --ignore-daemonsets --timeout=2m --force", nodeName), true, false); err != nil {
return errors.Wrap(err, fmt.Sprintf("drain the node: %s failed", nodeName))
}
return nil
}
type RestartCri struct {
common.KubeAction
}
func (i *RestartCri) Execute(runtime connector.Runtime) error {
switch i.KubeConf.Arg.Type {
case common.Docker:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart docker "), true, false); err != nil {
return errors.Wrap(err, "restart docker")
}
case common.Containerd:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart containerd"), true, false); err != nil {
return errors.Wrap(err, "restart containerd")
}
default:
logger.Fatalf("Unsupported container runtime: %s", strings.TrimSpace(i.KubeConf.Arg.Type))
}
return nil
}
type EditKubeletCri struct {
common.KubeAction
}
func (i *EditKubeletCri) Execute(runtime connector.Runtime) error {
switch i.KubeConf.Arg.Type {
case common.Docker:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"sed -i 's#--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod#--pod#' /var/lib/kubelet/kubeadm-flags.env"),
true, false); err != nil {
return errors.Wrap(err, "Change KubeletTo Containerd failed")
}
case common.Containerd:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"sed -i 's#--network-plugin=cni --pod#--network-plugin=cni --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod#' /var/lib/kubelet/kubeadm-flags.env"),
true, false); err != nil {
return errors.Wrap(err, "Change KubeletTo Containerd failed")
}
default:
logger.Fatalf("Unsupported container runtime: %s", strings.TrimSpace(i.KubeConf.Arg.Type))
}
return nil
}
type RestartKubeletNode struct {
common.KubeAction
}
func (d *RestartKubeletNode) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl restart kubelet"), true, false); err != nil {
return errors.Wrap(err, "RestartNode Kube failed")
}
return nil
}
func MigrateSelfNodeCriTasks(runtime connector.Runtime, kubeAction common.KubeAction) error {
host := runtime.RemoteHost()
tasks := []task.Interface{}
CordonNode := &task.RemoteTask{
Name: "CordonNode",
Desc: "Cordon Node",
Hosts: []connector.Host{host},
Action: new(CordonNode),
Parallel: false,
}
DrainNode := &task.RemoteTask{
Name: "DrainNode",
Desc: "Drain Node",
Hosts: []connector.Host{host},
Action: new(DrainNode),
Parallel: false,
}
RestartCri := &task.RemoteTask{
Name: "RestartCri",
Desc: "Restart Cri",
Hosts: []connector.Host{host},
Action: new(RestartCri),
Parallel: false,
}
EditKubeletCri := &task.RemoteTask{
Name: "EditKubeletCri",
Desc: "Edit Kubelet Cri",
Hosts: []connector.Host{host},
Action: new(EditKubeletCri),
Parallel: false,
}
RestartKubeletNode := &task.RemoteTask{
Name: "RestartKubeletNode",
Desc: "Restart Kubelet Node",
Hosts: []connector.Host{host},
Action: new(RestartKubeletNode),
Parallel: false,
}
UnCordonNode := &task.RemoteTask{
Name: "UnCordonNode",
Desc: "UnCordon Node",
Hosts: []connector.Host{host},
Action: new(UnCordonNode),
Parallel: false,
}
switch kubeAction.KubeConf.Cluster.Kubernetes.ContainerManager {
// case common.Docker:
// Uninstall := &task.RemoteTask{
// Name: "DisableDocker",
// Desc: "Disable docker",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// &DockerExist{Not: false},
// },
// Action: new(DisableDocker),
// Parallel: false,
// }
// tasks = append(tasks, CordonNode, DrainNode, Uninstall)
case common.Containerd:
Uninstall := &task.RemoteTask{
Name: "UninstallContainerd",
Desc: "Uninstall containerd",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: false},
},
Action: new(DisableContainerd),
Parallel: false,
Retry: 2,
Delay: 5 * time.Second,
}
tasks = append(tasks, CordonNode, DrainNode, Uninstall)
}
// if kubeAction.KubeConf.Arg.Type == common.Docker {
// syncBinaries := &task.RemoteTask{
// Name: "SyncDockerBinaries",
// Desc: "Sync docker binaries",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// // &kubernetes.NodeInCluster{Not: true},
// &DockerExist{Not: true},
// },
// Action: new(SyncDockerBinaries),
// Parallel: false,
// }
// generateDockerService := &task.RemoteTask{
// Name: "GenerateDockerService",
// Desc: "Generate docker service",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// // &kubernetes.NodeInCluster{Not: true},
// &DockerExist{Not: true},
// },
// Action: &action.Template{
// Name: "GenerateDockerService",
// Template: templates.DockerService,
// Dst: filepath.Join("/etc/systemd/system", templates.DockerService.Name()),
// },
// Parallel: false,
// }
// generateDockerConfig := &task.RemoteTask{
// Name: "GenerateDockerConfig",
// Desc: "Generate docker config",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// // &kubernetes.NodeInCluster{Not: true},
// &DockerExist{Not: true},
// },
// Action: &action.Template{
// Name: "GenerateDockerConfig",
// Template: templates.DockerConfig,
// Dst: filepath.Join("/etc/docker/", templates.DockerConfig.Name()),
// Data: util.Data{
// "Mirrors": templates.Mirrors(kubeAction.KubeConf),
// "InsecureRegistries": templates.InsecureRegistries(kubeAction.KubeConf),
// "DataRoot": templates.DataRoot(kubeAction.KubeConf),
// },
// },
// Parallel: false,
// }
// enableDocker := &task.RemoteTask{
// Name: "EnableDocker",
// Desc: "Enable docker",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// // &kubernetes.NodeInCluster{Not: true},
// &DockerExist{Not: true},
// },
// Action: new(EnableDocker),
// Parallel: false,
// }
// dockerLoginRegistry := &task.RemoteTask{
// Name: "Login PrivateRegistry",
// Desc: "Add auths to container runtime",
// Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// // &kubernetes.NodeInCluster{Not: true},
// &DockerExist{},
// &PrivateRegistryAuth{},
// },
// Action: new(DockerLoginRegistry),
// Parallel: false,
// }
// tasks = append(tasks, syncBinaries, generateDockerService, generateDockerConfig, enableDocker, dockerLoginRegistry,
// RestartCri, EditKubeletCri, RestartKubeletNode, UnCordonNode)
// }
if kubeAction.KubeConf.Arg.Type == common.Containerd {
syncContainerd := &task.RemoteTask{
Name: "SyncContainerd",
Desc: "Sync containerd binaries",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: new(SyncContainerd),
Parallel: false,
}
syncCrictlBinaries := &task.RemoteTask{
Name: "SyncCrictlBinaries",
Desc: "Sync crictl binaries",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&CrictlExist{Not: true},
},
Action: new(SyncCrictlBinaries),
Parallel: false,
}
generateContainerdService := &task.RemoteTask{
Name: "GenerateContainerdService",
Desc: "Generate containerd service",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Name: "GenerateContainerdService",
Template: templates.ContainerdService,
Dst: filepath.Join("/etc/systemd/system", templates.ContainerdService.Name()),
},
Parallel: false,
}
generateContainerdConfig := &task.RemoteTask{
Name: "GenerateContainerdConfig",
Desc: "Generate containerd config",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Name: "GenerateContainerdConfig",
Template: templates.ContainerdConfig,
Dst: filepath.Join("/etc/containerd/", templates.ContainerdConfig.Name()),
Data: util.Data{
"Mirrors": templates.Mirrors(kubeAction.KubeConf),
"InsecureRegistries": kubeAction.KubeConf.Cluster.Registry.InsecureRegistries,
"SandBoxImage": images.GetImage(runtime, kubeAction.KubeConf, "pause").ImageName(),
"Auths": registry.DockerRegistryAuthEntries(kubeAction.KubeConf.Cluster.Registry.Auths),
"DataRoot": templates.DataRoot(kubeAction.KubeConf),
},
},
Parallel: false,
}
generateCrictlConfig := &task.RemoteTask{
Name: "GenerateCrictlConfig",
Desc: "Generate crictl config",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Name: "GenerateCrictlConfig",
Template: templates.CrictlConfig,
Dst: filepath.Join("/etc/", templates.CrictlConfig.Name()),
Data: util.Data{
"Endpoint": kubeAction.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint,
},
},
Parallel: false,
}
enableContainerd := &task.RemoteTask{
Name: "EnableContainerd",
Desc: "Enable containerd",
Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// &ContainerdExist{Not: true},
// },
Action: new(EnableContainerd),
Parallel: false,
}
tasks = append(tasks, syncContainerd, syncCrictlBinaries, generateContainerdService, generateContainerdConfig,
generateCrictlConfig, enableContainerd, RestartCri, EditKubeletCri, RestartKubeletNode, UnCordonNode)
}
for i := range tasks {
t := tasks[i]
t.Init(runtime, kubeAction.ModuleCache, kubeAction.PipelineCache)
if res := t.Execute(); res.IsFailed() {
return res.CombineErr()
}
}
return nil
}
type MigrateSelfNodeCri struct {
common.KubeAction
}
func (d *MigrateSelfNodeCri) Execute(runtime connector.Runtime) error {
if err := MigrateSelfNodeCriTasks(runtime, d.KubeAction); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("MigrateSelfNodeCriTasks failed:"))
}
return nil
}
type KillContainerdProcess struct {
common.KubeAction
Signal string

View File

@@ -276,84 +276,6 @@ func UninstallContainerd(m *UninstallContainerModule) []task.Interface {
}
}
type CriMigrateModule struct {
common.KubeModule
Skip bool
}
func (i *CriMigrateModule) IsSkip() bool {
return i.Skip
}
func (p *CriMigrateModule) Init() {
p.Name = "CriMigrateModule"
p.Desc = "Cri Migrate manager"
if p.KubeConf.Arg.Role == common.Worker {
p.Tasks = MigrateWCri(p)
} else if p.KubeConf.Arg.Role == common.Master {
p.Tasks = MigrateMCri(p)
} else if p.KubeConf.Arg.Role == "all" {
p.Tasks = MigrateACri(p)
} else {
logger.Fatalf("Unsupported Role: %s", strings.TrimSpace(p.KubeConf.Arg.Role))
}
}
func MigrateWCri(p *CriMigrateModule) []task.Interface {
MigrateWCri := &task.RemoteTask{
Name: "MigrateToDocker",
Desc: "Migrate To Docker",
Hosts: p.Runtime.GetHostsByRole(common.Worker),
Prepare: new(common.OnlyWorker),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateWCri,
}
return p.Tasks
}
func MigrateMCri(p *CriMigrateModule) []task.Interface {
MigrateMCri := &task.RemoteTask{
Name: "MigrateMasterToDocker",
Desc: "Migrate Master To Docker",
Hosts: p.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.IsMaster),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateMCri,
}
return p.Tasks
}
func MigrateACri(p *CriMigrateModule) []task.Interface {
MigrateACri := &task.RemoteTask{
Name: "MigrateMasterToDocker",
Desc: "Migrate Master To Docker",
Hosts: p.Runtime.GetHostsByRole(common.K8s),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateACri,
}
return p.Tasks
}
type DeleteZfsMountModule struct {
common.KubeModule
Skip bool

View File

@@ -42,22 +42,3 @@ func (c *ChownModule) Init() {
userKubeDir,
}
}
type ChownWorkDirModule struct {
module.BaseTaskModule
}
func (c *ChownWorkDirModule) Init() {
c.Name = "ChownWorkerModule"
c.Desc = "Change kubekey work dir mode and owner"
userKubeDir := &task.LocalTask{
Name: "ChownFileAndDir",
Desc: "Chown ./kubekey dir",
Action: &LocalTaskChown{Path: c.Runtime.GetWorkDir()},
}
c.Tasks = []task.Interface{
userKubeDir,
}
}

View File

@@ -18,11 +18,9 @@ package filesystem
import (
"fmt"
"os/exec"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/util"
"github.com/pkg/errors"
)
@@ -55,17 +53,3 @@ func (c *ChownFileAndDir) Execute(runtime connector.Runtime) error {
}
return nil
}
type LocalTaskChown struct {
action.BaseAction
Path string
}
func (l *LocalTaskChown) Execute(runtime connector.Runtime) error {
if exist := util.IsExist(l.Path); exist {
if err := exec.Command("/bin/sh", "-c", fmt.Sprintf("chown -R ${SUDO_UID}:${SUDO_GID} %s", l.Path)).Run(); err != nil {
return errors.Wrapf(errors.WithStack(err), "chown %s failed", l.Path)
}
}
return nil
}

View File

@@ -17,17 +17,9 @@
package images
import (
"bufio"
"fmt"
"os"
"strings"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"github.com/pkg/errors"
)
const (
@@ -92,151 +84,6 @@ func (image Image) ImageRepo() string {
return fmt.Sprintf("%s%s", prefix, image.Repo)
}
// PullImages is used to pull images in the list of Image.
func (images *Images) PullImages(runtime connector.Runtime, kubeConf *common.KubeConf) error {
pullCmd := "docker"
switch kubeConf.Cluster.Kubernetes.ContainerManager {
case "crio":
pullCmd = "crictl"
case "containerd":
pullCmd = "crictl"
case "isula":
pullCmd = "isula"
default:
pullCmd = "docker"
}
host := runtime.RemoteHost()
for _, image := range images.Images {
switch {
case host.IsRole(common.Master) && image.Group == kubekeyapiv1alpha2.Master && image.Enable,
host.IsRole(common.Worker) && image.Group == kubekeyapiv1alpha2.Worker && image.Enable,
(host.IsRole(common.Master) || host.IsRole(common.Worker)) && image.Group == kubekeyapiv1alpha2.K8s && image.Enable,
host.IsRole(common.ETCD) && image.Group == kubekeyapiv1alpha2.Etcd && image.Enable:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s inspecti -q %s", pullCmd, image.ImageName()), false, false); err == nil {
logger.Infof("%s pull image %s exists", pullCmd, image.ImageName())
continue
}
// fmt.Printf("%s downloading image %s\n", pullCmd, image.ImageName())
logger.Debugf("%s pull image: %s - %s", host.GetName(), image.ImageName(), runtime.RemoteHost().GetName())
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("%s pull %s", pullCmd, image.ImageName()), false, false); err != nil {
return errors.Wrap(err, "pull image failed")
}
default:
continue
}
}
return nil
}
// type LocalImage struct {
// Filename string
// }
// type LocalImages []LocalImage
// func (i LocalImages) LoadImages(runtime connector.Runtime, kubeConf *common.KubeConf) error {
// loadCmd := "docker"
// host := runtime.RemoteHost()
// retry := func(f func() error, times int) (err error) {
// for i := 0; i < times; i++ {
// err = f()
// if err == nil {
// return nil
// }
// var dur = 5 + (i+1)*10
// logger.Warnf("load image %s failed, wait for %d seconds(%d times)", err, dur, i+1)
// if (i + 1) < times {
// time.Sleep(time.Duration(dur) * time.Second)
// }
// }
// return
// }
// for _, image := range i {
// switch {
// case host.IsRole(common.Master):
// // logger.Debugf("%s preloading image: %s", host.GetName(), image.Filename)
// start := time.Now()
// fileName := filepath.Base(image.Filename)
// // fileName = strings.ReplaceAll(fileName, ".gz", "")
// // fmt.Println(">>> ", fileName, HasSuffixI(image.Filename, ".tar.gz", ".tgz"))
// if HasSuffixI(image.Filename, ".tar.gz", ".tgz") {
// switch kubeConf.Cluster.Kubernetes.ContainerManager {
// case "crio":
// loadCmd = "ctr" // BUG
// case "containerd":
// loadCmd = "ctr -n k8s.io images import -"
// case "isula":
// loadCmd = "isula"
// default:
// loadCmd = "docker load"
// }
// // continue if load image error
// if err := retry(func() error {
// logger.Infof("preloading image: %s", fileName)
// if stdout, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("env PATH=$PATH gunzip -c %s | %s", image.Filename, loadCmd), false, false); err != nil {
// return fmt.Errorf("%s", fileName)
// } else {
// logger.Infof("%s in %s\n", formatLoadImageRes(stdout, fileName), time.Since(start))
// // fmt.Printf("%s in %s\n", formatLoadImageRes(stdout, fileName), time.Since(start))
// }
// return nil
// }, 5); err != nil {
// return fmt.Errorf("%s", fileName)
// }
// } else if HasSuffixI(image.Filename, ".tar") {
// switch kubeConf.Cluster.Kubernetes.ContainerManager {
// case "crio":
// loadCmd = "ctr" // BUG
// case "containerd":
// loadCmd = "ctr -n k8s.io images import"
// case "isula":
// loadCmd = "isula"
// default:
// loadCmd = "docker load -i"
// }
// if err := retry(func() error {
// logger.Infof("preloading image: %s", fileName)
// if stdout, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("env PATH=$PATH %s %s", loadCmd, image.Filename), false, false); err != nil {
// return fmt.Errorf("%s", fileName)
// } else {
// logger.Infof("%s in %s\n", formatLoadImageRes(stdout, fileName), time.Since(start))
// // fmt.Printf("%s in %s\n", formatLoadImageRes(stdout, fileName), time.Since(start))
// }
// return nil
// }, 5); err != nil {
// return fmt.Errorf("%s", fileName)
// }
// } else {
// logger.Warnf("invalid image file name %s, skip ...", image.Filename)
// return nil
// }
// default:
// continue
// }
// }
// return nil
// }
func formatLoadImageRes(str string, fileName string) string {
if strings.Contains(str, "(sha256:") {
str = strings.Split(str, "(sha256:")[0]
} else {
return fmt.Sprintf("%s %s", str, fileName)
}
return fmt.Sprintf("%s (%s)...done ", str, fileName)
}
func HasSuffixI(s string, suffixes ...string) bool {
s = strings.ToLower(s)
for _, suffix := range suffixes {
@@ -246,27 +93,3 @@ func HasSuffixI(s string, suffixes ...string) bool {
}
return false
}
func readImageManifest(mfPath string) ([]string, error) {
if !util.IsExist(mfPath) {
return nil, nil
}
file, err := os.Open(mfPath)
if err != nil {
return nil, err
}
defer file.Close()
var res []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
res = append(res, line)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return res, nil
}

View File

@@ -1,46 +1,25 @@
package images
import (
"context"
"encoding/json"
"fmt"
"github.com/containerd/containerd/pkg/cri/labels"
"github.com/containerd/containerd/reference/docker"
"math"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/containerd/containerd/pkg/cri/labels"
"github.com/containerd/containerd/reference/docker"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/cache"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/manifest"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/cavaliergopher/grab/v3"
)
const MAX_IMPORT_RETRY int = 5
type CheckImageManifest struct {
common.KubePrepare
}
func (p *CheckImageManifest) PreCheck(runtime connector.Runtime) (bool, error) {
// var imageManifest = path.Join(runtime.GetHomeDir(), cc.TerminusKey, cc.ManifestDir, cc.ManifestImage)
var imageManifest = path.Join(runtime.GetBaseDir(), cc.ManifestDir, cc.ManifestImage)
if utils.IsExist(imageManifest) {
return true, nil
}
return false, fmt.Errorf("image manifest not exist")
}
type LoadImages struct {
common.KubeAction
manifest.ManifestAction
@@ -256,148 +235,3 @@ func inspectImage(runner *connector.Runner, containerManager, imageRepoTag strin
return nil
}
func downloadImageFile(arch, imageRepoTag, imageFilePath string) error {
var err error
if arch == common.Amd64 {
arch = ""
} else {
arch = arch + "/"
}
var imageFileName = path.Base(imageFilePath)
var url = fmt.Sprintf("%s/%s%s", cc.DownloadUrl, arch, imageFileName)
for i := 5; i > 0; i-- {
totalSize, _ := getImageFileSize(url)
if totalSize > 0 {
logger.Infof("get image %s size: %s", imageRepoTag, utils.FormatBytes(totalSize))
}
client := grab.NewClient()
req, _ := grab.NewRequest(imageFilePath, url)
req.HTTPRequest = req.HTTPRequest.WithContext(context.Background())
ctx, cancel := context.WithTimeout(req.HTTPRequest.Context(), 5*time.Minute)
defer cancel()
req.HTTPRequest = req.HTTPRequest.WithContext(ctx)
resp := client.Do(req)
t := time.NewTicker(500 * time.Millisecond)
defer t.Stop()
var downloaded int64
Loop:
for {
select {
case <-t.C:
downloaded = resp.BytesComplete()
var progressInfo string
if totalSize != 0 {
result := float64(downloaded) / float64(totalSize)
progressInfo = fmt.Sprintf("transferred %s %s / %s (%.2f%%) / speed: %s", imageFileName, utils.FormatBytes(resp.BytesComplete()), utils.FormatBytes(totalSize), math.Round(result*10000)/100, utils.FormatBytes(int64(resp.BytesPerSecond())))
logger.Info(progressInfo)
} else {
progressInfo = fmt.Sprintf("transferred %s %s / speed: %s\n", imageFileName, utils.FormatBytes(resp.BytesComplete()), utils.FormatBytes(int64(resp.BytesPerSecond())))
logger.Infof(progressInfo)
}
case <-resp.Done:
break Loop
}
}
if err = resp.Err(); err != nil {
logger.Infof("download %s error %v", imageFileName, err)
time.Sleep(2 * time.Second)
continue
}
}
return err
}
func pullImage(runner *connector.Runner, containerManager, imageRepoTag, imageHashTag, dst string) error {
var pullCmd string = "docker"
var inspectCmd string = "docker"
var exportCmd string = "docker"
switch containerManager {
case "crio": // not implement
pullCmd = "ctr"
inspectCmd = "ctr"
exportCmd = "ctr"
case "containerd":
pullCmd = "crictl pull %s"
inspectCmd = "crictl inspecti -q %s"
exportCmd = "ctr -n k8s.io image export %s %s"
case "isula": // not implement
pullCmd = "isula"
inspectCmd = "isula"
exportCmd = "isula"
default:
pullCmd = "docker pull %s"
exportCmd = "docker save -o %s %s"
}
var cmd = fmt.Sprintf(pullCmd, imageRepoTag)
if _, err := runner.Host.SudoCmd(cmd, false, false); err != nil {
return fmt.Errorf("pull %s error %v", imageRepoTag, err)
}
var repoTag = imageRepoTag
if containerManager == "containerd" {
cmd = fmt.Sprintf(inspectCmd, imageRepoTag)
stdout, err := runner.Host.SudoCmd(cmd, false, false)
if err != nil {
return fmt.Errorf("inspect %s error %v", imageRepoTag, err)
}
var ii ImageInspect
if err = json.Unmarshal([]byte(stdout), &ii); err != nil {
return fmt.Errorf("unmarshal %s error %v", imageRepoTag, err)
}
repoTag = ii.Status.RepoTags[0]
}
var dstFile = path.Join(dst, fmt.Sprintf("%s.tar", imageHashTag))
cmd = fmt.Sprintf(exportCmd, dstFile, repoTag)
if _, err := runner.Host.SudoCmd(cmd, false, false); err != nil {
return fmt.Errorf("export %s error: %v", imageRepoTag, err)
}
if _, err := runner.Host.SudoCmd(fmt.Sprintf("gzip %s", dstFile), false, false); err != nil {
return fmt.Errorf("gzip %s error: %v", dstFile, err)
}
return nil
}
func getImageFileSize(url string) (int64, error) {
resp, err := http.Head(url)
if err != nil {
return -1, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return -1, fmt.Errorf("bad status: %s", resp.Status)
}
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return -1, fmt.Errorf("failed to parse content length: %v, header: %s", err, resp.Header.Get("Content-Length"))
}
return size, nil
}
type RateLimiter struct {
r, n int
}
func NewLimiter(r int) grab.RateLimiter {
return &RateLimiter{r: r}
}
func (c *RateLimiter) WaitN(ctx context.Context, n int) (err error) {
c.n += n
time.Sleep(
time.Duration(1.00 / float64(c.r) * float64(n) * float64(time.Second)))
return
}

View File

@@ -41,9 +41,7 @@ func (p *PreloadImagesModule) Init() {
Name: "PreloadImages",
Hosts: p.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
// &MasterPullImages{Not: true},
&plugins.IsCloudInstance{Not: true},
// &CheckImageManifest{},
&ContainerdInstalled{},
},
Action: &LoadImages{

View File

@@ -22,21 +22,6 @@ import (
"bytetrade.io/web3os/installer/pkg/core/connector"
)
type MasterPullImages struct {
common.KubePrepare
Not bool
}
func (n *MasterPullImages) PreCheck(runtime connector.Runtime) (bool, error) {
host := runtime.RemoteHost()
v, ok := host.GetCache().GetMustBool(common.SkipMasterNodePullImages)
if ok && v && n.Not {
return !n.Not, nil
}
return n.Not, nil
}
type ContainerdInstalled struct {
common.KubePrepare
}

View File

@@ -17,177 +17,16 @@
package images
import (
"fmt"
"strings"
kubekeyv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"github.com/containerd/containerd/platforms"
"github.com/containers/image/v5/types"
manifesttypes "github.com/estesp/manifest-tool/v2/pkg/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
versionutil "k8s.io/apimachinery/pkg/util/version"
)
var defaultUserAgent = "kubekey"
var k8sImageRepositoryAddr = "registry.k8s.io"
type dockerImageOptions struct {
arch string
os string
variant string
username string
password string
dockerCertPath string
SkipTLSVerify bool
}
type ImageInspect struct {
Status ImageInspectStatus `json:"status"`
}
type ImageInspectStatus struct {
Id string `json:"id"`
RepoTags []string `json:"repoTags"`
RepoDigests []string `json:"repoDigests"`
Size string `json:"size"`
Uid struct{} `json:"uid"`
Username string `json:"username"`
Spec struct{} `json:"spec"`
}
func (d *dockerImageOptions) systemContext() *types.SystemContext {
ctx := &types.SystemContext{
ArchitectureChoice: d.arch,
OSChoice: d.os,
VariantChoice: d.variant,
DockerRegistryUserAgent: defaultUserAgent,
DockerInsecureSkipTLSVerify: types.NewOptionalBool(d.SkipTLSVerify),
}
return ctx
}
type srcImageOptions struct {
dockerImage dockerImageOptions
imageName string
sharedBlobDir string
}
func (s *srcImageOptions) systemContext() *types.SystemContext {
ctx := s.dockerImage.systemContext()
ctx.DockerCertPath = s.dockerImage.dockerCertPath
ctx.OCISharedBlobDirPath = s.sharedBlobDir
ctx.DockerAuthConfig = &types.DockerAuthConfig{
Username: s.dockerImage.username,
Password: s.dockerImage.password,
}
return ctx
}
type destImageOptions struct {
dockerImage dockerImageOptions
imageName string
}
func (d *destImageOptions) systemContext() *types.SystemContext {
ctx := d.dockerImage.systemContext()
ctx.DockerCertPath = d.dockerImage.dockerCertPath
ctx.DockerAuthConfig = &types.DockerAuthConfig{
Username: d.dockerImage.username,
Password: d.dockerImage.password,
}
return ctx
}
// ParseArchVariant
// Ex:
// amd64 returns amd64, ""
// arm/v8 returns arm, v8
func ParseArchVariant(platform string) (string, string) {
osArchArr := strings.Split(platform, "/")
variant := ""
arch := osArchArr[0]
if len(osArchArr) > 1 {
variant = osArchArr[1]
}
return arch, variant
}
func ParseImageWithArchTag(ref string) (string, ocispec.Platform) {
n := strings.LastIndex(ref, "-")
if n < 0 {
logger.Fatalf("get arch or variant index failed: %s", ref)
}
archOrVariant := ref[n+1:]
// try to parse the arch-only case
specifier := fmt.Sprintf("linux/%s", archOrVariant)
if p, err := platforms.Parse(specifier); err == nil && isKnownArch(p.Architecture) {
return ref[:n], p
}
archStr := ref[:n]
a := strings.LastIndex(archStr, "-")
if a < 0 {
logger.Fatalf("get arch index failed: %s", ref)
}
arch := archStr[a+1:]
// parse the case where both arch and variant exist
specifier = fmt.Sprintf("linux/%s/%s", arch, archOrVariant)
p, err := platforms.Parse(specifier)
if err != nil {
logger.Fatalf("parse image %s failed: %s", ref, err.Error())
}
return ref[:a], p
}
func isKnownArch(arch string) bool {
switch arch {
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
return true
}
return false
}
// ParseImageTag
// Get a repos name and returns the right reposName + tag
// The tag can be confusing because of a port in a repository name.
//
// Ex: localhost.localdomain:5000/samalba/hipache:latest
func ParseImageTag(repos string) (string, string) {
n := strings.LastIndex(repos, ":")
if n < 0 {
return repos, ""
}
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
return repos[:n], tag
}
return repos, ""
}
func NewManifestSpec(image string, entries []manifesttypes.ManifestEntry) manifesttypes.YAMLInput {
var srcImages []manifesttypes.ManifestEntry
for _, e := range entries {
srcImages = append(srcImages, manifesttypes.ManifestEntry{
Image: e.Image,
Platform: e.Platform,
})
}
return manifesttypes.YAMLInput{
Image: image,
Manifests: srcImages,
}
}
// GetImage defines the list of all images and gets image object by name.
func GetImage(runtime connector.ModuleRuntime, kubeConf *common.KubeConf, name string) Image {
var image Image

View File

@@ -17,17 +17,17 @@
package k3s
import (
"bytetrade.io/web3os/installer/pkg/kubernetes"
"path/filepath"
"strings"
"time"
"bytetrade.io/web3os/installer/pkg/kubernetes"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/container"
containertemplates "bytetrade.io/web3os/installer/pkg/container/templates"
"bytetrade.io/web3os/installer/pkg/core/action"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
@@ -598,39 +598,3 @@ func (s *SaveKubeConfigModule) Init() {
save,
}
}
type UninstallK3sModule struct {
common.KubeModule
}
func (m *UninstallK3sModule) Init() {
m.Name = "UninstallK3s"
uninstallK3s := &task.LocalTask{
Name: "UninstallK3s",
Prepare: new(CheckK3sUninstallScript),
Action: new(UninstallK3s),
}
deleteCalicoCNI := &task.LocalTask{
Name: "DeleteCalicoCNI",
Action: new(DeleteCalicoCNI),
Retry: 1,
}
m.Tasks = []task.Interface{
uninstallK3s,
deleteCalicoCNI,
}
}
func checkContainerExists(runtime connector.Runtime) bool {
var cmd = "if [ -z $(which containerd) ] || [ ! -e /run/containerd/containerd.sock ]; " +
"then echo 'not exist'; " +
"fi"
var runner = runtime.GetRunner()
if output, err := runner.Host.SudoCmd(cmd, false, false); err != nil || strings.Contains(output, "not exist") {
return false
}
return true
}

View File

@@ -20,10 +20,8 @@ import (
"context"
"encoding/base64"
"fmt"
"path"
"path/filepath"
"strings"
"time"
"bytetrade.io/web3os/installer/pkg/storage"
storagetpl "bytetrade.io/web3os/installer/pkg/storage/templates"
@@ -43,7 +41,6 @@ import (
"bytetrade.io/web3os/installer/pkg/k3s/templates"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/v4/net"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
versionutil "k8s.io/apimachinery/pkg/util/version"
@@ -689,42 +686,3 @@ func (g *GenerateK3sRegistryConfig) Execute(runtime connector.Runtime) error {
}
return nil
}
type UninstallK3s struct {
common.KubeAction
}
func (t *UninstallK3s) Execute(runtime connector.Runtime) error {
var scriptPath = path.Join(common.BinDir, "k3s-uninstall.sh")
if _, err := runtime.GetRunner().SudoCmd(scriptPath, false, true); err != nil {
return err
}
return nil
}
type DeleteCalicoCNI struct {
common.KubeAction
}
func (t *DeleteCalicoCNI) Execute(runtime connector.Runtime) error {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
ifInfo, _ := net.InterfacesWithContext(ctx)
if ifInfo == nil {
return nil
}
for _, i := range ifInfo {
var name = i.Name
if len(name) < 5 || name[0:4] != "cali" {
continue
}
if _, err := runtime.GetRunner().Cmd(fmt.Sprintf("ip link delete %s", name), false, false); err != nil {
logger.Errorf("delete ip link %s error %v", name, err)
}
time.Sleep(200 * time.Millisecond)
}
return nil
}

View File

@@ -1,44 +0,0 @@
package kubernetes
import (
"context"
"fmt"
"strings"
"time"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/util"
)
func CheckKubeExists() (string, string, bool) {
var kubectl, err = util.GetCommand(common.CommandKubectl)
if err != nil || kubectl == "" {
return "", "", false
}
var ver string
var ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
var cmd = fmt.Sprintf("%s get nodes -o jsonpath='{.items[0].status.nodeInfo.kubeletVersion}'", kubectl)
stdout, _, err := util.ExecWithContext(ctx, cmd, false, false)
if err != nil || stdout == "" {
return "", "", false
}
if strings.Contains(stdout, "k3s") {
if strings.Contains(stdout, "-") {
stdout = strings.ReplaceAll(stdout, "-", "+")
}
var v1 = strings.Split(stdout, "+")
if len(v1) != 2 {
return ver, "k3s", true
}
ver = fmt.Sprintf("%s-k3s", v1[0])
return ver, "k3s", true
} else {
ver = stdout
return ver, "k8s", true
}
}

View File

@@ -17,7 +17,6 @@
package kubernetes
import (
"fmt"
"time"
"bytetrade.io/web3os/installer/pkg/common"
@@ -38,10 +37,9 @@ func (k *StatusModule) Init() {
k.PipelineCache.GetOrSet(common.ClusterStatus, cluster)
clusterStatus := &task.RemoteTask{
Name: "GetClusterStatus",
Desc: "Get kubernetes cluster status",
Hosts: k.Runtime.GetHostsByRole(common.Master),
//Prepare: new(NoClusterInfo),
Name: "GetClusterStatus",
Desc: "Get kubernetes cluster status",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Action: new(GetClusterStatus),
Parallel: false,
}
@@ -382,79 +380,6 @@ func (c *UmountKubeModule) Init() {
}
}
type CompareConfigAndClusterInfoModule struct {
common.KubeModule
}
func (c *CompareConfigAndClusterInfoModule) Init() {
c.Name = "CompareConfigAndClusterInfoModule"
c.Desc = "Compare config and cluster nodes info"
check := &task.RemoteTask{
Name: "FindNode(k8s)",
Desc: "Find information about nodes that are expected to be deleted",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(FindNode),
}
c.Tasks = []task.Interface{
check,
}
}
type DeleteKubeNodeModule struct {
common.KubeModule
}
func (d *DeleteKubeNodeModule) Init() {
d.Name = "DeleteKubeNodeModule"
d.Desc = "Delete kubernetes node"
drain := &task.RemoteTask{
Name: "DrainNode(k8s)",
Desc: "Node safely evict all pods",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(DrainNode),
Retry: 2,
}
deleteNode := &task.RemoteTask{
Name: "DeleteNode(k8s)",
Desc: "Delete the node using kubectl",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(KubectlDeleteNode),
Retry: 5,
}
d.Tasks = []task.Interface{
drain,
deleteNode,
}
}
type SetUpgradePlanModule struct {
common.KubeModule
Step UpgradeStep
}
func (s *SetUpgradePlanModule) Init() {
s.Name = fmt.Sprintf("SetUpgradePlanModule %d/%d", s.Step, len(UpgradeStepList))
s.Desc = "Set upgrade plan"
plan := &task.LocalTask{
Name: "SetUpgradePlan(k8s)",
Desc: "Set upgrade plan",
Action: &SetUpgradePlan{Step: s.Step},
}
s.Tasks = []task.Interface{
plan,
}
}
type SaveKubeConfigModule struct {
common.KubeModule
}

View File

@@ -17,31 +17,12 @@
package kubernetes
import (
"fmt"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"github.com/pkg/errors"
)
type NoClusterInfo struct {
common.KubePrepare
}
func (n *NoClusterInfo) PreCheck(_ connector.Runtime) (bool, error) {
if v, ok := n.PipelineCache.Get(common.ClusterStatus); ok {
cluster := v.(*KubernetesStatus)
if cluster.ClusterInfo == "" {
return true, nil
}
} else {
return false, errors.New("get kubernetes cluster status by pipeline cache failed")
}
return false, nil
}
type NodesInfoGetter interface {
GetNodesInfo() map[string]string
}
@@ -93,75 +74,6 @@ func (c *ClusterIsExist) PreCheck(_ connector.Runtime) (bool, error) {
}
}
type NotEqualPlanVersion struct {
common.KubePrepare
}
func (n *NotEqualPlanVersion) PreCheck(runtime connector.Runtime) (bool, error) {
planVersion, ok := n.PipelineCache.GetMustString(common.PlanK8sVersion)
if !ok {
return false, errors.New("get upgrade plan Kubernetes version failed by pipeline cache")
}
currentVersion, ok := n.PipelineCache.GetMustString(common.K8sVersion)
if !ok {
return false, errors.New("get cluster Kubernetes version failed by pipeline cache")
}
if currentVersion == planVersion {
return false, nil
}
return true, nil
}
type ClusterNotEqualDesiredVersion struct {
common.KubePrepare
}
func (c *ClusterNotEqualDesiredVersion) PreCheck(runtime connector.Runtime) (bool, error) {
clusterK8sVersion, ok := c.PipelineCache.GetMustString(common.K8sVersion)
if !ok {
return false, errors.New("get cluster Kubernetes version failed by pipeline cache")
}
if c.KubeConf.Cluster.Kubernetes.Version == clusterK8sVersion {
return false, nil
}
return true, nil
}
type NotEqualDesiredVersion struct {
common.KubePrepare
}
func (n *NotEqualDesiredVersion) PreCheck(runtime connector.Runtime) (bool, error) {
host := runtime.RemoteHost()
nodeK8sVersion, ok := host.GetCache().GetMustString(common.NodeK8sVersion)
if !ok {
return false, errors.New("get node Kubernetes version failed by host cache")
}
if n.KubeConf.Cluster.Kubernetes.Version == nodeK8sVersion {
return false, nil
}
return true, nil
}
type GetKubeletVersion struct {
common.KubePrepare
CommandDelete bool
}
func (g *GetKubeletVersion) PreCheck(runtime connector.Runtime) (bool, error) {
kubeletVersion, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubectl get nodes -o jsonpath='{.items[0].status.nodeInfo.kubeletVersion}'", false, true)
if err != nil {
logger.Errorf("failed to get kubelet version: %v", err)
return false, fmt.Errorf("failed to get kubelet version: %v", err)
}
g.PipelineCache.Set(common.CacheKubeletVersion, kubeletVersion)
return true, nil
}
type CheckKubeadmExist struct {
common.KubePrepare
}

View File

@@ -24,7 +24,6 @@ import (
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"time"
@@ -38,7 +37,6 @@ import (
corev1 "k8s.io/api/core/v1"
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
versionutil "k8s.io/apimachinery/pkg/util/version"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -47,10 +45,7 @@ import (
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/files"
"bytetrade.io/web3os/installer/pkg/images"
"bytetrade.io/web3os/installer/pkg/kubernetes/templates"
"bytetrade.io/web3os/installer/pkg/kubernetes/templates/v1beta2"
@@ -637,66 +632,6 @@ func (u *UmountKubelet) Execute(runtime connector.Runtime) error {
return nil
}
type FindNode struct {
common.KubeAction
}
func (f *FindNode) Execute(runtime connector.Runtime) error {
var resArr []string
res, err := runtime.GetRunner().Cmd(
"sudo -E /usr/local/bin/kubectl get nodes | grep -v NAME | grep -v 'master\\|control-plane' | awk '{print $1}'",
true, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "kubectl get nodes failed")
}
if !strings.Contains(res, "\r\n") {
resArr = append(resArr, res)
} else {
resArr = strings.Split(res, "\r\n")
}
workerName := make(map[string]struct{})
for j := 0; j < len(runtime.GetHostsByRole(common.Worker)); j++ {
workerName[runtime.GetHostsByRole(common.Worker)[j].GetName()] = struct{}{}
}
var node string
for i := 0; i < len(resArr); i++ {
if _, ok := workerName[resArr[i]]; ok && resArr[i] == f.KubeConf.Arg.NodeName {
node = resArr[i]
break
}
}
if node == "" {
return errors.New("" +
"1. check the node name in the config-sample.yaml\n" +
"2. check the node name in the Kubernetes cluster\n" +
"3. only support to delete a worker\n")
}
f.PipelineCache.Set("dstNode", node)
return nil
}
type DrainNode struct {
common.KubeAction
}
func (d *DrainNode) Execute(runtime connector.Runtime) error {
nodeName, ok := d.PipelineCache.Get("dstNode")
if !ok {
return errors.New("get dstNode failed by pipeline cache")
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"/usr/local/bin/kubectl drain %s --delete-emptydir-data --ignore-daemonsets --timeout=2m --force", nodeName),
true, false); err != nil {
return errors.Wrap(err, "drain the node failed")
}
return nil
}
type KubectlDeleteCurrentWorkerNode struct {
common.KubeAction
FailOnError bool
@@ -725,298 +660,6 @@ func (k *KubectlDeleteCurrentWorkerNode) Execute(runtime connector.Runtime) erro
return nil
}
type KubectlDeleteNode struct {
common.KubeAction
}
func (k *KubectlDeleteNode) Execute(runtime connector.Runtime) error {
nodeName, ok := k.PipelineCache.Get("dstNode")
if !ok {
return errors.New("get dstNode failed by pipeline cache")
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"/usr/local/bin/kubectl delete node %s", nodeName),
true, false); err != nil {
return errors.Wrap(err, "delete the node failed")
}
return nil
}
type SetUpgradePlan struct {
common.KubeAction
Step UpgradeStep
}
func (s *SetUpgradePlan) Execute(_ connector.Runtime) error {
currentVersion, ok := s.PipelineCache.GetMustString(common.K8sVersion)
if !ok {
return errors.New("get current Kubernetes version failed by pipeline cache")
}
desiredVersion, ok := s.PipelineCache.GetMustString(common.DesiredK8sVersion)
if !ok {
return errors.New("get desired Kubernetes version failed by pipeline cache")
}
if cmp, err := versionutil.MustParseSemantic(currentVersion).Compare(desiredVersion); err != nil {
return err
} else if cmp == 1 {
logger.Infof(
"%s The current version (%s) is greater than the target version (%s)",
common.LocalHost, currentVersion, desiredVersion)
os.Exit(0)
}
if s.Step == ToV121 {
v122 := versionutil.MustParseSemantic("v1.22.0")
atLeast := versionutil.MustParseSemantic(desiredVersion).AtLeast(v122)
cmp, err := versionutil.MustParseSemantic(currentVersion).Compare("v1.21.5")
if err != nil {
return err
}
if atLeast && cmp <= 0 {
desiredVersion = "v1.21.5"
}
}
s.PipelineCache.Set(common.PlanK8sVersion, desiredVersion)
return nil
}
type CalculateNextVersion struct {
common.KubeAction
}
func (c *CalculateNextVersion) Execute(_ connector.Runtime) error {
currentVersion, ok := c.PipelineCache.GetMustString(common.K8sVersion)
if !ok {
return errors.New("get current Kubernetes version failed by pipeline cache")
}
planVersion, ok := c.PipelineCache.GetMustString(common.PlanK8sVersion)
if !ok {
return errors.New("get upgrade plan Kubernetes version failed by pipeline cache")
}
nextVersionStr := calculateNextStr(currentVersion, planVersion)
c.KubeConf.Cluster.Kubernetes.Version = nextVersionStr
return nil
}
func calculateNextStr(currentVersion, desiredVersion string) string {
current := versionutil.MustParseSemantic(currentVersion)
target := versionutil.MustParseSemantic(desiredVersion)
var nextVersionMinor uint
if target.Minor() == current.Minor() {
nextVersionMinor = current.Minor()
} else {
nextVersionMinor = current.Minor() + 1
}
if nextVersionMinor == target.Minor() {
return desiredVersion
} else {
nextVersionPatchList := make([]int, 0)
for supportVersionStr := range files.FileSha256["kubeadm"]["amd64"] {
supportVersion := versionutil.MustParseSemantic(supportVersionStr)
if supportVersion.Minor() == nextVersionMinor {
nextVersionPatchList = append(nextVersionPatchList, int(supportVersion.Patch()))
}
}
sort.Ints(nextVersionPatchList)
nextVersion := current.WithMinor(nextVersionMinor)
nextVersion = nextVersion.WithPatch(uint(nextVersionPatchList[len(nextVersionPatchList)-1]))
return fmt.Sprintf("v%s", nextVersion.String())
}
}
type UpgradeKubeMaster struct {
common.KubeAction
ModuleName string
}
func (u *UpgradeKubeMaster) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
if err := KubeadmUpgradeTasks(runtime, u); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade cluster using kubeadm failed: %s", host.GetName()))
}
if _, err := runtime.GetRunner().SudoCmd("systemctl stop kubelet", false, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("stop kubelet failed: %s", host.GetName()))
}
if err := SetKubeletTasks(runtime, u.KubeAction); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("set kubelet failed: %s", host.GetName()))
}
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl restart kubelet", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("restart kubelet failed: %s", host.GetName()))
}
time.Sleep(10 * time.Second)
return nil
}
type UpgradeKubeWorker struct {
common.KubeAction
ModuleName string
}
func (u *UpgradeKubeWorker) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
if _, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubeadm upgrade node", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade node using kubeadm failed: %s", host.GetName()))
}
if _, err := runtime.GetRunner().SudoCmd("systemctl stop kubelet", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("stop kubelet failed: %s", host.GetName()))
}
if err := SetKubeletTasks(runtime, u.KubeAction); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("set kubelet failed: %s", host.GetName()))
}
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl restart kubelet", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("restart kubelet failed: %s", host.GetName()))
}
time.Sleep(10 * time.Second)
if err := SyncKubeConfigTask(runtime, u.KubeAction); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("sync kube config to worker failed: %s", host.GetName()))
}
return nil
}
func KubeadmUpgradeTasks(runtime connector.Runtime, u *UpgradeKubeMaster) error {
host := runtime.RemoteHost()
kubeadmUpgrade := &task.RemoteTask{
Name: "KubeadmUpgrade(k8s)",
Desc: "Upgrade cluster using kubeadm",
Hosts: []connector.Host{host},
Prepare: new(NotEqualDesiredVersion),
Action: new(KubeadmUpgrade),
Parallel: false,
Retry: 3,
}
copyKubeConfig := &task.RemoteTask{
Name: "CopyKubeConfig",
Desc: "Copy admin.conf to ~/.kube/config",
Hosts: []connector.Host{host},
Prepare: new(NotEqualDesiredVersion),
Action: new(CopyKubeConfigForControlPlane),
Parallel: false,
Retry: 2,
}
tasks := []task.Interface{
kubeadmUpgrade,
copyKubeConfig,
}
for i := range tasks {
t := tasks[i]
t.Init(runtime, u.ModuleCache, u.PipelineCache)
if res := t.Execute(); res.IsFailed() {
return res.CombineErr()
}
}
return nil
}
type KubeadmUpgrade struct {
common.KubeAction
}
func (k *KubeadmUpgrade) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"timeout -k 600s 600s /usr/local/bin/kubeadm upgrade apply %s -y "+
"--ignore-preflight-errors=all "+
"--allow-experimental-upgrades "+
"--allow-release-candidate-upgrades "+
"--etcd-upgrade=false "+
"--certificate-renewal=true ",
k.KubeConf.Cluster.Kubernetes.Version), false, false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade master failed: %s", host.GetName()))
}
return nil
}
func SetKubeletTasks(runtime connector.Runtime, kubeAction common.KubeAction) error {
host := runtime.RemoteHost()
syncKubelet := &task.RemoteTask{
Name: "SyncKubelet",
Desc: "synchronize kubelet",
Hosts: []connector.Host{host},
Prepare: new(NotEqualDesiredVersion),
Action: new(SyncKubelet),
Parallel: false,
Retry: 2,
}
enableKubelet := &task.RemoteTask{
Name: "EnableKubelet",
Desc: "enable kubelet service",
Hosts: []connector.Host{host},
Prepare: new(NotEqualDesiredVersion),
Action: new(EnableKubelet),
Parallel: false,
Retry: 5,
}
tasks := []task.Interface{
syncKubelet,
enableKubelet,
}
for i := range tasks {
t := tasks[i]
t.Init(runtime, kubeAction.ModuleCache, kubeAction.PipelineCache)
if res := t.Execute(); res.IsFailed() {
return res.CombineErr()
}
}
return nil
}
func SyncKubeConfigTask(runtime connector.Runtime, kubeAction common.KubeAction) error {
host := runtime.RemoteHost()
syncKubeConfig := &task.RemoteTask{
Name: "SyncKubeConfig",
Desc: "synchronize kube config to worker",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
new(NotEqualDesiredVersion),
new(common.OnlyWorker),
},
Action: new(SyncKubeConfigToWorker),
Parallel: true,
Retry: 3,
}
tasks := []task.Interface{
syncKubeConfig,
}
for i := range tasks {
t := tasks[i]
t.Init(runtime, kubeAction.ModuleCache, kubeAction.PipelineCache)
if res := t.Execute(); res.IsFailed() {
return res.CombineErr()
}
}
return nil
}
type SetCurrentK8sVersion struct {
common.KubeAction
}
func (s *SetCurrentK8sVersion) Execute(_ connector.Runtime) error {
s.PipelineCache.Set(common.K8sVersion, s.KubeConf.Cluster.Kubernetes.Version)
return nil
}
type SaveKubeConfig struct {
common.KubeAction
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
type UpgradeStep int
const (
ToV121 UpgradeStep = iota + 1
ToV122
)
var UpgradeStepList = []UpgradeStep{
ToV121,
ToV122,
}
func (u UpgradeStep) String() string {
switch u {
case ToV121:
return "to v1.21"
case ToV122:
return "to v1.22"
default:
return "invalid option"
}
}

View File

@@ -17,18 +17,16 @@
package kubesphere
import (
"path/filepath"
"time"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/version/kubesphere/templates"
"fmt"
"os"
"path/filepath"
"time"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/version/kubesphere"
)
type DeleteKubeSphereCachesModule struct {
@@ -159,44 +157,6 @@ func (d *DeployModule) Init() {
}
}
func MirrorRepo(kubeConf *common.KubeConf) string {
repo := kubeConf.Cluster.Registry.PrivateRegistry
namespaceOverride := kubeConf.Cluster.Registry.NamespaceOverride
version := kubeConf.Cluster.KubeSphere.Version
_, ok := kubesphere.CNSource[version]
if ok && os.Getenv("KKZONE") == "cn" {
if repo == "" {
repo = "registry.cn-beijing.aliyuncs.com/kubesphereio"
} else if len(namespaceOverride) != 0 {
repo = fmt.Sprintf("%s/%s", repo, namespaceOverride)
} else {
repo = fmt.Sprintf("%s/kubesphere", repo)
}
} else {
if repo == "" {
_, latest := kubesphere.LatestRelease(version)
_, dev := kubesphere.DevRelease(version)
_, stable := kubesphere.StabledVersionSupport(version)
switch {
case stable:
repo = "kubesphere"
case dev:
repo = "kubespheredev"
case latest:
repo = "kubespheredev"
default:
repo = "kubesphere"
}
} else if len(namespaceOverride) != 0 {
repo = fmt.Sprintf("%s/%s", repo, namespaceOverride)
} else {
repo = fmt.Sprintf("%s/kubesphere", repo)
}
}
return repo
}
type CheckResultModule struct {
common.KubeModule
Skip bool
@@ -240,59 +200,3 @@ func (c *CheckResultModule) Init() {
getKubeCommand,
}
}
type CleanClusterConfigurationModule struct {
common.KubeModule
Skip bool
}
func (c *CleanClusterConfigurationModule) IsSkip() bool {
return c.Skip
}
func (c *CleanClusterConfigurationModule) Init() {
c.Name = "CleanClusterConfigurationModule"
c.Desc = "Clean redundant ClusterConfiguration config"
// ensure there is no cc config, and prevent to reset cc config when upgrade the cluster
clean := &task.LocalTask{
Name: "CleanClusterConfiguration",
Desc: "Clean redundant ClusterConfiguration config",
Action: new(CleanCC),
}
c.Tasks = []task.Interface{
clean,
}
}
type ConvertModule struct {
common.KubeModule
Skip bool
}
func (c *ConvertModule) IsSkip() bool {
return c.Skip
}
func (c *ConvertModule) Init() {
c.Name = "ConvertModule"
c.Desc = "Convert ks-installer config v2 to v3"
convert := &task.RemoteTask{
Name: "ConvertV2ToV3",
Desc: "Convert ks-installer config v2 to v3",
Hosts: c.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
new(VersionBelowV3),
},
Action: new(ConvertV2ToV3),
Parallel: true,
}
c.Tasks = []task.Interface{
convert,
}
}

View File

@@ -1,42 +0,0 @@
package plugins
import (
"path"
"bytetrade.io/web3os/installer/pkg/common"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/util"
)
type CachedBuilder struct {
common.KubeAction
}
func (t *CachedBuilder) Execute(runtime connector.Runtime) error {
// cachedDir := path.Join(runtime.GetHomeDir(), cc.TerminusKey, cc.ManifestDir)
cachedDir := path.Join(runtime.GetBaseDir(), cc.ManifestDir)
if !util.IsExist(cachedDir) {
util.Mkdir(cachedDir)
}
// cachedImageDir := path.Join(runtime.GetHomeDir(), cc.TerminusKey, cc.ImageCacheDir)
cachedImageDir := path.Join(runtime.GetBaseDir(), cc.ImageCacheDir)
if !util.IsExist(cachedImageDir) {
util.Mkdir(cachedImageDir)
}
// cachedPkgDir := path.Join(runtime.GetHomeDir(), cc.TerminusKey, cc.PackageCacheDir)
cachedPkgDir := path.Join(runtime.GetBaseDir(), cc.PackageCacheDir)
if !util.IsExist(cachedPkgDir) {
util.Mkdir(cachedPkgDir)
}
// cachedBuildFilesDir := path.Join(runtime.GetHomeDir(), cc.TerminusKey, cc.BuildFilesCacheDir)
cachedBuildFilesDir := path.Join(runtime.GetBaseDir(), cc.BuildFilesCacheDir)
if !util.IsExist(cachedBuildFilesDir) {
util.Mkdir(cachedBuildFilesDir)
}
return nil
}

View File

@@ -8,23 +8,6 @@ import (
"bytetrade.io/web3os/installer/pkg/core/task"
)
type GenerateCachedModule struct {
common.KubeModule
}
func (m *GenerateCachedModule) Init() {
m.Name = "GenerateCachedDir"
cachedBuilder := &task.LocalTask{
Name: "GenerateCachedDir",
Action: new(CachedBuilder),
}
m.Tasks = []task.Interface{
cachedBuilder,
}
}
type CopyEmbed struct {
common.KubeModule
}
@@ -73,45 +56,8 @@ func (t *DeployKsPluginsModule) Init() {
Parallel: false,
}
// checkMasterNum := &task.RemoteTask{
// Name: "CheckMasterNum",
// Hosts: t.Runtime.GetHostsByRole(common.Master),
// Prepare: &prepare.PrepareCollection{
// new(common.OnlyFirstMaster),
// new(NotEqualDesiredVersion),
// },
// Action: new(CheckMasterNum),
// Parallel: true,
// }
t.Tasks = []task.Interface{
checkNodeState,
initNs,
// checkMasterNum,
}
}
// +
type DebugModule struct {
common.KubeModule
}
func (m *DebugModule) Init() {
m.Name = "Debug"
patchRedis := &task.RemoteTask{
Name: "PatchRedis",
Hosts: m.Runtime.GetHostsByRole(common.ETCD),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
},
Action: new(PatchRedisStatus),
Parallel: true,
}
m.Tasks = []task.Interface{
patchRedis,
}
}

View File

@@ -1,89 +0,0 @@
package plugins
import (
"context"
"fmt"
"path"
"time"
"bytetrade.io/web3os/installer/pkg/common"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/utils"
ctrl "sigs.k8s.io/controller-runtime"
)
type CreateMonitorNotification struct {
common.KubeAction
}
func (t *CreateMonitorNotification) Execute(runtime connector.Runtime) error {
nodeNumIf, ok := t.PipelineCache.Get(common.CacheNodeNum)
if !ok || nodeNumIf == nil {
return fmt.Errorf("node get failed")
}
var replicas int
var nodeNum = nodeNumIf.(int64)
if nodeNum < 3 {
replicas = 1
} else {
replicas = 2
}
config, err := ctrl.GetConfig()
if err != nil {
return err
}
var appName = common.ChartNameMonitorNotification
var appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, "ks-monitor", "notification-manager")
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceKubesphereMonitoringSystem)
if err != nil {
return err
}
var ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
var values = make(map[string]interface{})
values["Release"] = map[string]string{
"Namespace": common.NamespaceKubesphereMonitoringSystem,
"Replicas": fmt.Sprintf("%d", replicas),
}
if err := utils.UpgradeCharts(ctx, actionConfig, settings, appName, appPath, "", common.NamespaceKubesphereMonitoringSystem, values, false); err != nil {
return err
}
return nil
}
// +
type CreateNotificationModule struct {
common.KubeModule
}
func (m *CreateNotificationModule) Init() {
m.Name = "CreateMonitorNotification"
createMonitorNotifiction := &task.RemoteTask{
Name: "CreateNotification",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
new(common.GetNodeNum),
},
Action: new(CreateMonitorNotification),
Parallel: false,
Retry: 0,
}
m.Tasks = []task.Interface{
createMonitorNotifiction,
}
}

View File

@@ -25,7 +25,6 @@ import (
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/utils"
"github.com/pkg/errors"
versionutil "k8s.io/apimachinery/pkg/util/version"
)
type IsCloudInstance struct {
@@ -98,23 +97,6 @@ func (p *GenerateRedisPassword) PreCheck(runtime connector.Runtime) (bool, error
return true, nil
}
type VersionBelowV3 struct {
common.KubePrepare
}
func (v *VersionBelowV3) PreCheck(runtime connector.Runtime) (bool, error) {
versionStr, ok := v.PipelineCache.GetMustString(common.KubeSphereVersion)
if !ok {
return false, errors.New("get current kubesphere version failed by pipeline cache")
}
version := versionutil.MustParseSemantic(versionStr)
v300 := versionutil.MustParseSemantic("v3.0.0")
if v.KubeConf.Cluster.KubeSphere.Enabled && v.KubeConf.Cluster.KubeSphere.Version == "v3.0.0" && version.LessThan(v300) {
return true, nil
}
return false, nil
}
type NotEqualDesiredVersion struct {
common.KubePrepare
}

View File

@@ -78,15 +78,6 @@ func (t *BackupRedisManifests) Execute(runtime connector.Runtime) error {
return nil
}
type DeployRedisHA struct {
common.KubeAction
}
func (t *DeployRedisHA) Execute(runtime connector.Runtime) error {
return nil
}
type DeployRedis struct {
common.KubeAction
}
@@ -115,28 +106,6 @@ func (t *DeployRedis) Execute(runtime connector.Runtime) error {
return nil
}
type PatchRedisStatus struct {
common.KubeAction
}
func (t *PatchRedisStatus) Execute(runtime connector.Runtime) error {
//kubectlpath, err := util.GetCommand(common.CommandKubectl)
//if err != nil {
// return fmt.Errorf("kubectl not found")
//}
//
//var jsonPatch = fmt.Sprintf(`{"status": {"redis": {"status": "enabled", "enabledTime": "%s"}}}`,
// time.Now().Format("2006-01-02T15:04:05Z"))
//var cmd = fmt.Sprintf("%s patch cc ks-installer --type merge -p '%s' -n %s", kubectlpath, jsonPatch, common.NamespaceKubesphereSystem)
//
//_, err = runtime.GetRunner().SudoCmd(cmd, false, true)
//if err != nil {
// return errors.Wrap(errors.WithStack(err), "patch redis status failed")
//}
return nil
}
// +++++
type DeployRedisModule struct {
@@ -171,19 +140,6 @@ func (m *DeployRedisModule) Init() {
Retry: 0,
}
deployRedisHA := &task.RemoteTask{
Name: "DeployRedisHA",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
new(common.GetMasterNum),
},
Action: new(DeployRedisHA), // todo skip
Parallel: false,
Retry: 0,
}
deployRedis := &task.RemoteTask{
Name: "DeployRedis",
Hosts: m.Runtime.GetHostsByRole(common.Master),
@@ -197,24 +153,9 @@ func (m *DeployRedisModule) Init() {
Retry: 0,
}
patchRedis := &task.RemoteTask{
Name: "PatchRedisStatus",
Hosts: m.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
},
Action: new(PatchRedisStatus),
Parallel: false,
Retry: 10,
Delay: 5 * time.Second,
}
m.Tasks = []task.Interface{
createRedisSecret,
backupRedisManifests,
deployRedisHA, // todo
deployRedis,
patchRedis,
}
}

View File

@@ -1,89 +0,0 @@
package plugins
import (
"context"
"fmt"
"path"
"time"
"bytetrade.io/web3os/installer/pkg/common"
cc "bytetrade.io/web3os/installer/pkg/core/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/prepare"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/utils"
ctrl "sigs.k8s.io/controller-runtime"
)
type DeploySnapshotController struct {
common.KubeAction
}
func (t *DeploySnapshotController) Execute(runtime connector.Runtime) error {
kubectlpath, err := util.GetCommand(common.CommandKubectl)
if err != nil {
return fmt.Errorf("kubectl not found")
}
var buildFilesDir = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir)
var scrd = path.Join(buildFilesDir, "snapshot-controller", "crds", "snapshot.storage.k8s.io_volumesnapshot.yaml")
var cmd = fmt.Sprintf("%s apply -f %s --force", kubectlpath, scrd)
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
logger.Errorf("Install snapshot controller failed: %v", err)
}
config, err := ctrl.GetConfig()
if err != nil {
return err
}
var appName = common.ChartNameSnapshotController
var appPath = path.Join(runtime.GetInstallerDir(), cc.BuildFilesCacheDir, cc.BuildDir, appName)
actionConfig, settings, err := utils.InitConfig(config, common.NamespaceKubeSystem)
if err != nil {
return err
}
var ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
var values = make(map[string]interface{})
values["Release"] = map[string]string{
"Namespace": common.NamespaceKubeSystem,
}
if err := utils.UpgradeCharts(ctx, actionConfig, settings, appName, appPath, "",
common.NamespaceKubeSystem, values, false); err != nil {
return err
}
return nil
}
type DeploySnapshotControllerModule struct {
common.KubeModule
}
func (d *DeploySnapshotControllerModule) Init() {
d.Name = "DeploySnapshotController"
createSnapshotController := &task.RemoteTask{
Name: "CreateSnapshotController",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(NotEqualDesiredVersion),
},
Action: new(DeploySnapshotController),
Retry: 2,
Parallel: false,
}
d.Tasks = []task.Interface{
createSnapshotController,
}
}

View File

@@ -17,22 +17,20 @@
package kubesphere
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
ksv2 "bytetrade.io/web3os/installer/pkg/kubesphere/v2"
ksv3 "bytetrade.io/web3os/installer/pkg/kubesphere/v3"
"bytetrade.io/web3os/installer/pkg/version/kubesphere"
"bytetrade.io/web3os/installer/pkg/version/kubesphere/templates"
"fmt"
"github.com/pkg/errors"
yamlV2 "gopkg.in/yaml.v2"
"os"
"path"
"path/filepath"
"strings"
)
type DeleteKubeSphereCaches struct {
@@ -405,111 +403,3 @@ func (c *Check) Execute(runtime connector.Runtime) error {
//defer conn.Close()
return nil
}
type CleanCC struct {
common.KubeAction
}
func (c *CleanCC) Execute(runtime connector.Runtime) error {
c.KubeConf.Cluster.KubeSphere.Configurations = "\n"
return nil
}
type ConvertV2ToV3 struct {
common.KubeAction
}
func (c *ConvertV2ToV3) Execute(runtime connector.Runtime) error {
configV2Str, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl get cm -n kubesphere-system ks-installer -o jsonpath='{.data.ks-config\\.yaml}'",
false, false)
if err != nil {
return err
}
clusterCfgV2 := ksv2.V2{}
clusterCfgV3 := ksv3.V3{}
if err := yamlV2.Unmarshal([]byte(configV2Str), &clusterCfgV2); err != nil {
return err
}
configV3, err := MigrateConfig2to3(&clusterCfgV2, &clusterCfgV3)
if err != nil {
return err
}
c.KubeConf.Cluster.KubeSphere.Configurations = "---\n" + configV3
return nil
}
func MigrateConfig2to3(v2 *ksv2.V2, v3 *ksv3.V3) (string, error) {
v3.Etcd = ksv3.Etcd(v2.Etcd)
v3.Persistence = ksv3.Persistence(v2.Persistence)
v3.Alerting = ksv3.Alerting(v2.Alerting)
v3.Notification = ksv3.Notification(v2.Notification)
v3.LocalRegistry = v2.LocalRegistry
v3.Servicemesh = ksv3.Servicemesh(v2.Servicemesh)
v3.Devops = ksv3.Devops(v2.Devops)
v3.Openpitrix = ksv3.Openpitrix(v2.Openpitrix)
v3.Console = ksv3.Console(v2.Console)
if v2.MetricsServerNew.Enabled == "" {
if v2.MetricsServerOld.Enabled == "true" || v2.MetricsServerOld.Enabled == "True" {
v3.MetricsServer.Enabled = true
} else {
v3.MetricsServer.Enabled = false
}
} else {
if v2.MetricsServerNew.Enabled == "true" || v2.MetricsServerNew.Enabled == "True" {
v3.MetricsServer.Enabled = true
} else {
v3.MetricsServer.Enabled = false
}
}
v3.Monitoring.PrometheusMemoryRequest = v2.Monitoring.PrometheusMemoryRequest
//v3.Monitoring.PrometheusReplicas = v2.Monitoring.PrometheusReplicas
v3.Monitoring.PrometheusVolumeSize = v2.Monitoring.PrometheusVolumeSize
//v3.Monitoring.AlertmanagerReplicas = 1
v3.Common.EtcdVolumeSize = v2.Common.EtcdVolumeSize
v3.Common.MinioVolumeSize = v2.Common.MinioVolumeSize
v3.Common.MysqlVolumeSize = v2.Common.MysqlVolumeSize
v3.Common.OpenldapVolumeSize = v2.Common.OpenldapVolumeSize
v3.Common.RedisVolumSize = v2.Common.RedisVolumSize
//v3.Common.ES.ElasticsearchDataReplicas = v2.Logging.ElasticsearchDataReplicas
//v3.Common.ES.ElasticsearchMasterReplicas = v2.Logging.ElasticsearchMasterReplicas
v3.Common.ES.ElkPrefix = v2.Logging.ElkPrefix
v3.Common.ES.LogMaxAge = v2.Logging.LogMaxAge
if v2.Logging.ElasticsearchVolumeSize == "" {
v3.Common.ES.ElasticsearchDataVolumeSize = v2.Logging.ElasticsearchDataVolumeSize
v3.Common.ES.ElasticsearchMasterVolumeSize = v2.Logging.ElasticsearchMasterVolumeSize
} else {
v3.Common.ES.ElasticsearchMasterVolumeSize = "4Gi"
v3.Common.ES.ElasticsearchDataVolumeSize = v2.Logging.ElasticsearchVolumeSize
}
v3.Logging.Enabled = v2.Logging.Enabled
v3.Logging.LogsidecarReplicas = v2.Logging.LogsidecarReplicas
v3.Authentication.JwtSecret = ""
v3.Multicluster.ClusterRole = "none"
v3.Events.Ruler.Replicas = 2
var clusterConfiguration = ksv3.ClusterConfig{
ApiVersion: "installer.kubesphere.io/v1alpha1",
Kind: "ClusterConfiguration",
Metadata: ksv3.Metadata{
Name: "ks-installer",
Namespace: "kubesphere-system",
Label: ksv3.Label{Version: "v3.0.0"},
},
Spec: v3,
}
configV3, err := yamlV2.Marshal(clusterConfiguration)
if err != nil {
return "", err
}
return string(configV3), nil
}

View File

@@ -1,9 +1,10 @@
package cluster
import (
cc "bytetrade.io/web3os/installer/pkg/core/common"
"path"
cc "bytetrade.io/web3os/installer/pkg/core/common"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/addons"
@@ -21,7 +22,6 @@ import (
"bytetrade.io/web3os/installer/pkg/kubesphere"
ksplugins "bytetrade.io/web3os/installer/pkg/kubesphere/plugins"
"bytetrade.io/web3os/installer/pkg/manifest"
"bytetrade.io/web3os/installer/pkg/plugins"
"bytetrade.io/web3os/installer/pkg/plugins/dns"
"bytetrade.io/web3os/installer/pkg/plugins/network"
"bytetrade.io/web3os/installer/pkg/plugins/storage"
@@ -39,7 +39,6 @@ func NewDarwinClusterPhase(runtime *common.KubeRuntime, manifestMap manifest.Ins
&kubesphere.DeployMiniKubeModule{},
&kubesphere.DeployModule{Skip: !runtime.Cluster.KubeSphere.Enabled},
&ksplugins.DeployKsPluginsModule{},
//&ksplugins.DeploySnapshotControllerModule{},
&ksplugins.DeployRedisModule{},
&ksplugins.CreateKubeSphereSecretModule{},
&ksplugins.DeployKsCoreConfigModule{}, // ks-core-config
@@ -99,7 +98,6 @@ func NewK3sCreateClusterPhase(runtime *common.KubeRuntime, manifestMap manifest.
&storage.DeployLocalVolumeModule{Skip: skipLocalStorage},
&kubesphere.DeployModule{Skip: !runtime.Cluster.KubeSphere.Enabled}, //
&ksplugins.DeployKsPluginsModule{},
//&ksplugins.DeploySnapshotControllerModule{},
&ksplugins.DeployRedisModule{},
&ksplugins.CreateKubeSphereSecretModule{},
&ksplugins.DeployKsCoreConfigModule{}, // ks-core-config
@@ -159,12 +157,10 @@ func NewCreateClusterPhase(runtime *common.KubeRuntime, manifestMap manifest.Ins
&certs.AutoRenewCertsModule{Skip: !runtime.Cluster.Kubernetes.EnableAutoRenewCerts()},
&kubernetes.SecurityEnhancementModule{Skip: !runtime.Arg.SecurityEnhancement},
&kubernetes.SaveKubeConfigModule{},
&plugins.DeployPluginsModule{},
&addons.AddonsModule{},
&storage.DeployLocalVolumeModule{Skip: skipLocalStorage},
&kubesphere.DeployModule{Skip: !runtime.Cluster.KubeSphere.Enabled},
&ksplugins.DeployKsPluginsModule{},
//&ksplugins.DeploySnapshotControllerModule{},
&ksplugins.DeployRedisModule{},
&ksplugins.CreateKubeSphereSecretModule{},
&ksplugins.DeployKsCoreConfigModule{}, // ! ks-core-config

View File

@@ -1,36 +0,0 @@
package mock
import (
"bytetrade.io/web3os/installer/pkg/bootstrap/hello"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/module"
"bytetrade.io/web3os/installer/pkg/core/pipeline"
)
func NewGreetingsPipeline(runtime *common.LocalRuntime) error {
m := []module.Module{
&hello.HelloModule{},
}
p := pipeline.Pipeline{
Name: "GreetingsPipeline",
Modules: m,
Runtime: runtime,
}
go p.Start()
return nil
}
func Greetings() error {
runtime, err := common.NewLocalRuntime(false, false)
if err != nil {
return err
}
if err := NewGreetingsPipeline(&runtime); err != nil {
return err
}
return nil
}

View File

@@ -1,27 +0,0 @@
package startup
import (
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/module"
"bytetrade.io/web3os/installer/pkg/core/pipeline"
"bytetrade.io/web3os/installer/pkg/kubesphere/plugins"
)
func GetMachineInfo() error {
runtime, err := common.NewKubeRuntime(common.AllInOne, common.Argument{})
if err != nil {
return err
}
m := []module.Module{
&plugins.CopyEmbed{},
}
p := pipeline.Pipeline{
Name: "Startup",
Modules: m,
Runtime: runtime,
}
return p.Start()
}

View File

@@ -1,206 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"path/filepath"
"text/template"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/images"
"github.com/lithammer/dedent"
"github.com/pkg/errors"
)
// Kata Containers is an open source community working to build a secure container runtime with lightweight virtual
// machines that feel and perform like containers, but provide stronger workload isolation using hardware virtualization
// technology as a second layer of defense.
var (
KataDeploy = template.Must(template.New("kata-deploy.yaml").Parse(
dedent.Dedent(`---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kata-label-node
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-labeler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kata-label-node-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: node-labeler
subjects:
- kind: ServiceAccount
name: kata-label-node
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kata-deploy
namespace: kube-system
spec:
selector:
matchLabels:
name: kata-deploy
template:
metadata:
labels:
name: kata-deploy
spec:
serviceAccountName: kata-label-node
containers:
- name: kube-kata
image: {{ .KataDeployImage }}
imagePullPolicy: Always
lifecycle:
preStop:
exec:
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: false
volumeMounts:
- name: crio-conf
mountPath: /etc/crio/
- name: containerd-conf
mountPath: /etc/containerd/
- name: kata-artifacts
mountPath: /opt/kata/
- name: dbus
mountPath: /var/run/dbus
- name: systemd
mountPath: /run/systemd
- name: local-bin
mountPath: /usr/local/bin/
volumes:
- name: crio-conf
hostPath:
path: /etc/crio/
- name: containerd-conf
hostPath:
path: /etc/containerd/
- name: kata-artifacts
hostPath:
path: /opt/kata/
type: DirectoryOrCreate
- name: dbus
hostPath:
path: /var/run/dbus
- name: systemd
hostPath:
path: /run/systemd
- name: local-bin
hostPath:
path: /usr/local/bin/
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-qemu
handler: kata-qemu
overhead:
podFixed:
memory: "160Mi"
cpu: "250m"
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-clh
handler: kata-clh
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-fc
handler: kata-fc
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"
`)))
)
func DeployKataTasks(d *DeployPluginsModule) []task.Interface {
generateKataDeployManifests := &task.RemoteTask{
Name: "GenerateKataDeployManifests",
Desc: "Generate kata-deploy manifests",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: &action.Template{
Name: "GenerateKataDeployManifests",
Template: KataDeploy,
Data: util.Data{
"KataDeployImage": images.GetImage(d.Runtime, d.KubeConf, "kata-deploy").ImageName(),
},
Dst: filepath.Join(common.KubeAddonsDir, KataDeploy.Name()),
},
Parallel: false,
}
deployKata := &task.RemoteTask{
Name: "ApplyKataDeployManifests",
Desc: "Apply kata-deploy manifests",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(ApplyKataDeployManifests),
}
return []task.Interface{
generateKataDeployManifests,
deployKata,
}
}
type ApplyKataDeployManifests struct {
common.KubeAction
}
func (a *ApplyKataDeployManifests) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), "apply kata-deploy manifests failed")
}
return nil
}

View File

@@ -1,35 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"bytetrade.io/web3os/installer/pkg/common"
)
type DeployPluginsModule struct {
common.KubeModule
}
func (d *DeployPluginsModule) Init() {
d.Name = "DeployPluginsModule"
d.Desc = "Deploy plugins for cluster"
if d.KubeConf.Cluster.Kubernetes.EnableKataDeploy() && (d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Containerd || d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Crio) {
d.Tasks = append(d.Tasks, DeployKataTasks(d)...)
}
if d.KubeConf.Cluster.Kubernetes.EnableNodeFeatureDiscovery() {
d.Tasks = append(d.Tasks, DeployNodeFeatureDiscoveryTasks(d)...)
}
}

View File

@@ -1,700 +0,0 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"path/filepath"
"text/template"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/action"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/task"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/images"
"github.com/lithammer/dedent"
"github.com/pkg/errors"
)
// NodeFeatureDiscovery detects hardware features available on each node in a Kubernetes cluster, and advertises those
// features using node labels.
var (
NodeFeatureDiscovery = template.Must(template.New("node-feature-discovery.yaml").Parse(
dedent.Dedent(`---
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: nodefeaturerules.nfd.k8s-sigs.io
spec:
group: nfd.k8s-sigs.io
names:
kind: NodeFeatureRule
listKind: NodeFeatureRuleList
plural: nodefeaturerules
singular: nodefeaturerule
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeFeatureRule resource specifies a configuration for feature-based customization of node objects, such as node labeling.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NodeFeatureRuleSpec describes a NodeFeatureRule.
properties:
rules:
description: Rules is a list of node customization rules.
items:
description: Rule defines a rule for node customization such as labeling.
properties:
labels:
additionalProperties:
type: string
description: Labels to create if the rule matches.
type: object
labelsTemplate:
description: LabelsTemplate specifies a template to expand for dynamically generating multiple labels. Data (after template expansion) must be keys with an optional value (<key>[=<value>]) separated by newlines.
type: string
matchAny:
description: MatchAny specifies a list of matchers one of which must match.
items:
description: MatchAnyElem specifies one sub-matcher of MatchAny.
properties:
matchFeatures:
description: MatchFeatures specifies a set of matcher terms all of which must match.
items:
description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
properties:
feature:
type: string
matchExpressions:
additionalProperties:
description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
properties:
op:
description: Op is the operator to be applied.
enum:
- In
- NotIn
- InRegexp
- Exists
- DoesNotExist
- Gt
- Lt
- GtLt
- IsTrue
- IsFalse
type: string
value:
description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
items:
type: string
type: array
required:
- op
type: object
description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
type: object
required:
- feature
- matchExpressions
type: object
type: array
required:
- matchFeatures
type: object
type: array
matchFeatures:
description: MatchFeatures specifies a set of matcher terms all of which must match.
items:
description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
properties:
feature:
type: string
matchExpressions:
additionalProperties:
description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
properties:
op:
description: Op is the operator to be applied.
enum:
- In
- NotIn
- InRegexp
- Exists
- DoesNotExist
- Gt
- Lt
- GtLt
- IsTrue
- IsFalse
type: string
value:
description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
items:
type: string
type: array
required:
- op
type: object
description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
type: object
required:
- feature
- matchExpressions
type: object
type: array
name:
description: Name of the rule.
type: string
vars:
additionalProperties:
type: string
description: Vars is the variables to store if the rule matches. Variables do not directly inflict any changes in the node object. However, they can be referenced from other rules enabling more complex rule hierarchies, without exposing intermediary output values as labels.
type: object
varsTemplate:
description: VarsTemplate specifies a template to expand for dynamically generating multiple variables. Data (after template expansion) must be keys with an optional value (<key>[=<value>]) separated by newlines.
type: string
required:
- name
type: object
type: array
required:
- rules
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- patch
- update
- list
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update
- apiGroups:
- nfd.k8s-sigs.io
resources:
- nodefeaturerules
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: v1
data:
nfd-worker.conf: |
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# featureSources: [all]
# labelSources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4"
# - "SSE42"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# # The following feature demonstrates the capabilities of the matchFeatures
# - name: "my custom rule"
# labels:
# my-ng-feature: "true"
# # matchFeatures implements a logical AND over all matcher terms in the
# # list (i.e. all of the terms, or per-feature matchers, must match)
# matchFeatures:
# - feature: cpu.cpuid
# matchExpressions:
# AVX512F: {op: Exists}
# - feature: cpu.cstate
# matchExpressions:
# enabled: {op: IsTrue}
# - feature: cpu.pstate
# matchExpressions:
# no_turbo: {op: IsFalse}
# scaling_governor: {op: In, value: ["performance"]}
# - feature: cpu.rdt
# matchExpressions:
# RDTL3CA: {op: Exists}
# - feature: cpu.sst
# matchExpressions:
# bf.enabled: {op: IsTrue}
# - feature: cpu.topology
# matchExpressions:
# hardware_multithreading: {op: IsFalse}
#
# - feature: kernel.config
# matchExpressions:
# X86: {op: Exists}
# LSM: {op: InRegexp, value: ["apparmor"]}
# - feature: kernel.loadedmodule
# matchExpressions:
# e1000e: {op: Exists}
# - feature: kernel.selinux
# matchExpressions:
# enabled: {op: IsFalse}
# - feature: kernel.version
# matchExpressions:
# major: {op: In, value: ["5"]}
# minor: {op: Gt, value: ["10"]}
#
# - feature: storage.block
# matchExpressions:
# rotational: {op: In, value: ["0"]}
# dax: {op: In, value: ["0"]}
#
# - feature: network.device
# matchExpressions:
# operstate: {op: In, value: ["up"]}
# speed: {op: Gt, value: ["100"]}
#
# - feature: memory.numa
# matchExpressions:
# node_count: {op: Gt, value: ["2"]}
# - feature: memory.nv
# matchExpressions:
# devtype: {op: In, value: ["nd_dax"]}
# mode: {op: In, value: ["memory"]}
#
# - feature: system.osrelease
# matchExpressions:
# ID: {op: In, value: ["fedora", "centos"]}
# - feature: system.name
# matchExpressions:
# nodename: {op: InRegexp, value: ["^worker-X"]}
#
# - feature: local.label
# matchExpressions:
# custom-feature-knob: {op: Gt, value: ["100"]}
#
# # The following feature demonstrates the capabilities of the matchAny
# - name: "my matchAny rule"
# labels:
# my-ng-feature-2: "my-value"
# # matchAny implements a logical IF over all elements (sub-matchers) in
# # the list (i.e. at least one feature matcher must match)
# matchAny:
# - matchFeatures:
# - feature: kernel.loadedmodule
# matchExpressions:
# driver-module-X: {op: Exists}
# - feature: pci.device
# matchExpressions:
# vendor: {op: In, value: ["8086"]}
# class: {op: In, value: ["0200"]}
# - matchFeatures:
# - feature: kernel.loadedmodule
# matchExpressions:
# driver-module-Y: {op: Exists}
# - feature: usb.device
# matchExpressions:
# vendor: {op: In, value: ["8086"]}
# class: {op: In, value: ["02"]}
#
# # The following features demonstreate label templating capabilities
# - name: "my template rule"
# labelsTemplate: |
# matchFeatures:
# - feature: system.osrelease
# matchExpressions:
# ID: {op: InRegexp, value: ["^open.*"]}
# VERSION_ID.major: {op: In, value: ["13", "15"]}
#
# - name: "my template rule 2"
# matchFeatures:
# - feature: pci.device
# matchExpressions:
# class: {op: InRegexp, value: ["^06"]}
# vendor: ["8086"]
# - feature: cpu.cpuid
# matchExpressions:
# AVX: {op: Exists}
#
# # The following examples demonstrate vars field and back-referencing
# # previous labels and vars
# - name: "my dummy kernel rule"
# labels:
# "my.kernel.feature": "true"
# matchFeatures:
# - feature: kernel.version
# matchExpressions:
# major: {op: Gt, value: ["2"]}
#
# - name: "my dummy rule with no labels"
# vars:
# "my.dummy.var": "1"
# matchFeatures:
# - feature: cpu.cpuid
# matchExpressions: {}
#
# - name: "my rule using backrefs"
# labels:
# "my.backref.feature": "true"
# matchFeatures:
# - feature: rule.matched
# matchExpressions:
# my.kernel.feature: {op: IsTrue}
# my.dummy.var: {op: Gt, value: ["0"]}
#
kind: ConfigMap
metadata:
name: nfd-worker-conf
namespace: node-feature-discovery
---
apiVersion: v1
kind: Service
metadata:
name: nfd-master
namespace: node-feature-discovery
spec:
ports:
- port: 8080
protocol: TCP
selector:
app: nfd-master
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfd
name: nfd-master
namespace: node-feature-discovery
spec:
replicas: 1
selector:
matchLabels:
app: nfd-master
template:
metadata:
labels:
app: nfd-master
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
weight: 1
- preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
weight: 1
containers:
- args: []
command:
- nfd-master
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ .NFDImage }}
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /usr/bin/grpc_health_probe
- -addr=:8080
initialDelaySeconds: 10
periodSeconds: 10
name: nfd-master
readinessProbe:
exec:
command:
- /usr/bin/grpc_health_probe
- -addr=:8080
failureThreshold: 10
initialDelaySeconds: 5
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts: []
serviceAccount: nfd-master
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: ""
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
value: ""
volumes: []
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: nfd
name: nfd-worker
namespace: node-feature-discovery
spec:
selector:
matchLabels:
app: nfd-worker
template:
metadata:
labels:
app: nfd-worker
spec:
containers:
- args:
- -server=nfd-master:8080
command:
- nfd-worker
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ .NFDImage }}
imagePullPolicy: IfNotPresent
name: nfd-worker
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /host-boot
name: host-boot
readOnly: true
- mountPath: /host-etc/os-release
name: host-os-release
readOnly: true
- mountPath: /host-sys
name: host-sys
readOnly: true
- mountPath: /host-usr/lib
name: host-usr-lib
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery/source.d/
name: source-d
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery/features.d/
name: features-d
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery
name: nfd-worker-conf
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
volumes:
- hostPath:
path: /boot
name: host-boot
- hostPath:
path: /etc/os-release
name: host-os-release
- hostPath:
path: /sys
name: host-sys
- hostPath:
path: /usr/lib
name: host-usr-lib
- hostPath:
path: /etc/kubernetes/node-feature-discovery/source.d/
name: source-d
- hostPath:
path: /etc/kubernetes/node-feature-discovery/features.d/
name: features-d
- configMap:
name: nfd-worker-conf
name: nfd-worker-conf
`)))
)
func DeployNodeFeatureDiscoveryTasks(d *DeployPluginsModule) []task.Interface {
generateNFDManifests := &task.RemoteTask{
Name: "GenerateNFDManifests",
Desc: "Generate node-feature-discovery manifests",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: &action.Template{
Name: "GenerateNFDManifests",
Template: NodeFeatureDiscovery,
Data: util.Data{
"NFDImage": images.GetImage(d.Runtime, d.KubeConf, "node-feature-discovery").ImageName(),
},
Dst: filepath.Join(common.KubeAddonsDir, NodeFeatureDiscovery.Name()),
},
Parallel: false,
}
deployNFD := &task.RemoteTask{
Name: "ApplyNFDManifests",
Desc: "Apply node-feature-discovery manifests",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(ApplyNodeFeatureDiscoveryManifests),
}
return []task.Interface{
generateNFDManifests,
deployNFD,
}
}
type ApplyNodeFeatureDiscoveryManifests struct {
common.KubeAction
}
func (a *ApplyNodeFeatureDiscoveryManifests) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/node-feature-discovery.yaml", true, false); err != nil {
return errors.Wrap(errors.WithStack(err), "apply node-feature-discovery manifests failed")
}
return nil
}

View File

@@ -1,10 +0,0 @@
package scripts
import "embed"
//go:embed files/*
var scripts embed.FS
func Assets() embed.FS {
return scripts
}

View File

@@ -1,234 +0,0 @@
#!/usr/bin/env bash
ERR_EXIT=1
CURL_TRY="--connect-timeout 30 --retry 5 --retry-delay 1 --retry-max-time 10 "
BASE_DIR=$(dirname $(realpath -s $0))
[[ -f "${BASE_DIR}/.env" && -z "$DEBUG_VERSION" ]] && . "${BASE_DIR}/.env"
if [ ! -d "/tmp/install_log" ]; then
$(mkdir -p /tmp/install_log)
fi
fd_errlog=/tmp/install_log/errlog_fd_13
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
echo "$lsb_dist"
}
get_shell_exec(){
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo && command_exists su; then
sh_c='sudo su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit $ERR_EXIT
fi
fi
}
function dpkg_locked() {
grep -q 'Could not get lock /var/lib' "$fd_errlog"
return $?
}
function retry_cmd(){
"$@"
local ret=$?
if [ $ret -ne 0 ];then
local max_retries=50
local delay=3
while [ $max_retries -gt 0 ]; do
printf "retry to execute command '%s', after %d seconds\n" "$*" $delay
((delay+=2))
sleep $delay
"$@"
ret=$?
if [ $ret -eq 0 ]; then
break
fi
((max_retries--))
done
if [ $ret -ne 0 ]; then
log_fatal "command: '$*'"
fi
fi
return $ret
}
function ensure_success() {
exec 13> "$fd_errlog"
"$@" 2>&13
local ret=$?
if [ $ret -ne 0 ]; then
local max_retries=50
local delay=3
if dpkg_locked; then
while [ $max_retries -gt 0 ]; do
printf "retry to execute command '%s', after %d seconds\n" "$*" $delay
((delay+=2))
sleep $delay
exec 13> "$fd_errlog"
"$@" 2>&13
ret=$?
local r=""
if [ $ret -eq 0 ]; then
r=y
fi
if ! dpkg_locked; then
r+=y
fi
if [[ x"$r" == x"yy" ]]; then
printf "execute command '%s' successed.\n\n" "$*"
break
fi
((max_retries--))
done
else
log_fatal "command: '$*'"
fi
fi
return $ret
}
command_exists() {
command -v "$@" > /dev/null 2>&1
}
log_info() {
local msg now
msg="$*"
now=$(date +'%Y-%m-%d %H:%M:%S.%N %z')
echo -e "\n\033[38;1m${now} [INFO] ${msg} \033[0m"
}
log_fatal() {
local msg now
msg="$*"
now=$(date +'%Y-%m-%d %H:%M:%S.%N %z')
echo -e "\n\033[31;1m${now} [FATAL] ${msg} \033[0m"
exit $ERR_EXIT
}
system_service_active() {
if [[ $# -ne 1 || x"$1" == x"" ]]; then
return 1
fi
local ret
ret=$($sh_c "systemctl is-active $1")
if [ "$ret" == "active" ]; then
return 0
fi
return 1
}
is_debian() {
lsb_release=$(lsb_release -d 2>&1 | awk -F'\t' '{print $2}')
if [ -z "$lsb_release" ]; then
echo 0
return
fi
if [[ ${lsb_release} == *Debian* ]]; then
case "$lsb_release" in
*12* | *11*)
echo 1
;;
*)
echo 0
;;
esac
else
echo 0
fi
}
is_ubuntu() {
lsb_release=$(lsb_release -d 2>&1 | awk -F'\t' '{print $2}')
if [ -z "$lsb_release" ]; then
echo 0
return
fi
if [[ ${lsb_release} == *Ubuntu* ]];then
case "$lsb_release" in
*24.*)
echo 2
;;
*22.* | *20.*)
echo 1
;;
*)
echo 0
;;
esac
else
echo 0
fi
}
is_raspbian(){
lsb_release=$(lsb_release -d 2>&1 | awk -F'\t' '{print $2}')
if [ -z "$lsb_release" ]; then
echo 0
return
fi
if [[ ${lsb_release} == *Raspbian* ]];then
case "$lsb_release" in
*11* | *12*)
echo 1
;;
*)
echo 0
;;
esac
else
echo 0
fi
}
is_wsl(){
wsl=$(uname -a 2>&1)
if [[ ${wsl} == *WSL* ]]; then
echo 1
return
fi
echo 0
}
get_distribution
get_shell_exec

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
source ./common.sh
config_system() {
echo "config system"
return
local ntpdate hwclock
# kernel printk log level
ensure_success $sh_c 'sysctl -w kernel.printk="3 3 1 7"'
# ntp sync
ntpdate=$(command -v ntpdate)
hwclock=$(command -v hwclock)
printf '#!/bin/sh\n\n%s -b -u pool.ntp.org && %s -w\n\nexit 0\n' "$ntpdate" "$hwclock" > cron.ntpdate
ensure_success $sh_c '/bin/sh cron.ntpdate'
ensure_success $sh_c 'cat cron.ntpdate > /etc/cron.daily/ntpdate && chmod 0700 /etc/cron.daily/ntpdate'
ensure_success rm -f cron.ntpdate
}
config_system

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
BASE_DIR=$(dirname $(realpath -s $0))
source $BASE_DIR/common.sh
ensure_success $sh_c "echo 'test script greetings'"
ensure_success $sh_c "echo lsb_dist=$lsb_dist"
ensure_success $sh_c "echo BASE_DIR=$BASE_DIR"

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env bash
BASE_DIR=$(dirname $(realpath -s $0))
source $BASE_DIR/common.sh
lsb_dist=$1
SOCAT_VERSION=$2
FLEX_VERSION=$3
CONNTRACK_VERSION=$4
build_socat(){
if [ -z $SOCAT_VERSION ]; then
SOCAT_VERSION="1.7.3.4"
fi
local socat_tar="${BASE_DIR}/components/socat-${SOCAT_VERSION}.tar.gz"
ensure_success $sh_c "tar xzvf socat-${SOCAT_VERSION}.tar.gz"
ensure_success $sh_c "cd socat-${SOCAT_VERSION}"
ensure_success $sh_c "./configure --prefix=/usr && make -j4 && make install && strip socat"
}
build_contrack(){
if [ -z $CONNTRACK_VERSION ]; then
CONNTRACK_VERSION="1.4.1"
fi
local contrack_tar="${BASE_DIR}/components/conntrack-tools-${CONNTRACK_VERSION}.tar.gz"
ensure_success $sh_c "tar zxvf conntrack-tools-${CONNTRACK_VERSION}.tar.gz"
ensure_success $sh_c "cd conntrack-tools-${CONNTRACK_VERSION}"
ensure_success $sh_c "./configure --prefix=/usr && make -j4 && make install"
}
install_deps() {
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="apt-transport-https ca-certificates curl"
if ! command -v gpg > /dev/null; then
pre_reqs="$pre_reqs gnupg"
fi
ensure_success $sh_c 'apt-get update -qq >/dev/null'
ensure_success $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
ensure_success $sh_c 'DEBIAN_FRONTEND=noninteractive apt-get install -y conntrack socat apache2-utils ntpdate net-tools make gcc openssh-server >/dev/null'
;;
centos|fedora|rhel)
if [ "$lsb_dist" = "fedora" ]; then
pkg_manager="dnf"
else
pkg_manager="yum"
fi
ensure_success $sh_c "$pkg_manager install -y conntrack socat httpd-tools ntpdate net-tools make gcc openssh-server >/dev/null"
;;
*)
# build from source code
build_socat
build_contrack
#TODO: install bcrypt tools
;;
esac
}
echo ">>> install_deps os: ${lsb_dist}, socat: ${SOCAT_VERSION}, flex: ${FLEX_VERSION}, conntrack: ${CONNTRACK_VERSION}"
install_deps
exit

View File

@@ -1,118 +0,0 @@
#!/usr/bin/env bash
BASE_DIR=$(dirname $(realpath -s $0))
source $BASE_DIR/common.sh
precheck_os() {
local ip os_type os_arch
# check os type and arch and os vesion
os_type=$(uname -s)
os_arch=$(uname -m)
os_verion=$(lsb_release -d 2>&1 | awk -F'\t' '{print $2}')
case "$os_arch" in
x86_64) ARCH=amd64; ;;
armv7l) ARCH=arm; ;;
aarch64) ARCH=arm64; ;;
ppc64le) ARCH=ppc64le; ;;
s390x) ARCH=s390x; ;;
*) echo "unsupported arch, exit ...";
exit -1; ;;
esac
if [ x"${os_type}" != x"Linux" ]; then
log_fatal "unsupported os type '${os_type}', only supported 'Linux' operating system"
fi
if [[ x"${os_arch}" != x"x86_64" && x"${os_arch}" != x"amd64" && x"${os_arch}" != x"aarch64" ]]; then
log_fatal "unsupported os arch '${os_arch}', only supported 'x86_64' or 'aarch64' architecture"
fi
if [[ $(is_ubuntu) -eq 0 && $(is_debian) -eq 0 && $(is_raspbian) -eq 0 ]]; then
log_fatal "unsupported os version '${os_verion}'"
fi
if [[ -f /boot/cmdline.txt || -f /boot/firmware/cmdline.txt ]]; then
# raspbian
SHOULD_RETRY=1
if ! command_exists iptables; then
ensure_success $sh_c "apt update && apt install -y iptables"
fi
systemctl disable --user gvfs-udisks2-volume-monitor
systemctl stop --user gvfs-udisks2-volume-monitor
local cpu_cgroups_enbaled=$(cat /proc/cgroups |awk '{if($1=="cpu")print $4}')
local mem_cgroups_enbaled=$(cat /proc/cgroups |awk '{if($1=="memory")print $4}')
if [[ $cpu_cgroups_enbaled -eq 0 || $mem_cgroups_enbaled -eq 0 ]]; then
log_fatal "cpu or memory cgroups disabled, please edit /boot/cmdline.txt or /boot/firmware/cmdline.txt and reboot to enable it."
fi
fi
# try to resolv hostname
ensure_success $sh_c "hostname -i >/dev/null"
ip=$(ping -c 1 "$HOSTNAME" |awk -F '[()]' '/icmp_seq/{print $2}')
printf "%s\t%s\n\n" "$ip" "$HOSTNAME"
if [[ x"$ip" == x"" || "$ip" == @("172.17.0.1"|"127.0.0.1"|"127.0.1.1") || ! "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
log_fatal "incorrect ip for hostname '$HOSTNAME', please check"
fi
local_ip="$ip"
OS_ARCH="$os_arch"
# disable local dns
case "$lsb_dist" in
ubuntu|debian|raspbian)
if system_service_active "systemd-resolved"; then
ensure_success $sh_c "systemctl stop systemd-resolved.service >/dev/null"
ensure_success $sh_c "systemctl disable systemd-resolved.service >/dev/null"
if [ -e /usr/bin/systemd-resolve ]; then
ensure_success $sh_c "mv /usr/bin/systemd-resolve /usr/bin/systemd-resolve.bak >/dev/null"
fi
if [ -L /etc/resolv.conf ]; then
ensure_success $sh_c 'unlink /etc/resolv.conf && touch /etc/resolv.conf'
fi
config_resolv_conf
else
ensure_success $sh_c "cat /etc/resolv.conf > /etc/resolv.conf.bak"
fi
;;
centos|fedora|rhel)
;;
*)
;;
esac
if ! hostname -i &>/dev/null; then
ensure_success $sh_c "echo $local_ip $HOSTNAME >> /etc/hosts"
fi
ensure_success $sh_c "hostname -i >/dev/null"
# network and dns
http_code=$(curl ${CURL_TRY} -sL -o /dev/null -w "%{http_code}" https://download.docker.com/linux/ubuntu)
if [ "$http_code" != 200 ]; then
config_resolv_conf
if [ -f /etc/resolv.conf.bak ]; then
ensure_success $sh_c "rm -rf /etc/resolv.conf.bak"
fi
fi
# ubuntu 24 upgrade apparmor
ubuntuversion=$(is_ubuntu)
if [ ${ubuntuversion} -eq 2 ]; then
aapv=$(apparmor_parser --version)
if [[ ! ${aapv} =~ "4.0.1" ]]; then
ensure_success $sh_c "dpkg -i ${BASE_DIR}/components/apparmor_4.0.1-0ubuntu1_${ARCH}.deb"
fi
fi
}
log_info 'Precheck and Installing dependencies ...'
precheck_os
exit

View File

@@ -1,343 +0,0 @@
#!/usr/bin/env bash
TERMINUS_CLI_VERSION=0.1.21
ERR_EXIT=1
CURL_TRY="--connect-timeout 30 --retry 5 --retry-delay 1 --retry-max-time 10 "
BASE_DIR="$(realpath $(dirname "$0"))/.."
RM=$(command -v rm)
KUBECTL=$(command -v kubectl)
KKE_FILE="/etc/kke/version"
STS_ACCESS_KEY=""
STS_SECRET_KEY=""
STS_TOKEN=""
STS_CLUSTER_ID=""
printf "root: ${BASE_DIR}\nversion: ${TERMINUS_CLI_VERSION}"
command_exists() {
command -v "$@" > /dev/null 2>&1
}
precheck_os() {
local ip os_type os_arch
# check os type and arch and os vesion
os_type=$(uname -s)
os_arch=$(uname -m)
os_verion=$(lsb_release -d 2>&1 | awk -F'\t' '{print $2}')
case "$os_arch" in
x86_64) ARCH=amd64; ;;
armv7l) ARCH=arm; ;;
aarch64) ARCH=arm64; ;;
ppc64le) ARCH=ppc64le; ;;
s390x) ARCH=s390x; ;;
*) echo "unsupported arch, exit ...";
exit -1; ;;
esac
OS_ARCH="$os_arch"
}
get_shell_exec(){
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo && command_exists su; then
sh_c='sudo su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit $ERR_EXIT
fi
fi
}
ensure_success() {
"$@"
local ret=$?
if [ $ret -ne 0 ]; then
echo "Fatal error, command: '$*'"
exit $ret
fi
return $ret
}
log_info() {
local msg now
msg="$*"
now=$(date +'%Y-%m-%d %H:%M:%S.%N %z')
echo -e "\n\033[38;1m${now} [INFO] ${msg} \033[0m"
}
get_kubelet_version() {
[ ! -f $KUBECTL ] && {
echo "kubectl does not exists"
exit $ERR_EXIT
}
$sh_c "${KUBECTL} get nodes -o jsonpath='{.items[0].status.nodeInfo.kubeletVersion}'"
}
find_version(){
if [ -f "$KKE_FILE" ]; then
KKE_VERSION=$(awk -F '=' '/KKE/{printf "%s",$2}' $KKE_FILE)
KUBE_VERSION=$(awk -F '=' '/KUBE/{printf "%s",$2}' $KKE_FILE)
[ x"$KKE_VERSION" != x"" ] && [ x"$KUBE_VERSION" != x"" ] && return
fi
KKE_VERSION=0.1.21 # don't need to change it, as long as it's greater than 0.1.6
local kube="$(get_kubelet_version)"
if [ x"$kube" != x"" ]; then
KUBE_VERSION="$kube"
return
fi
echo "Warning: file $KKE_FILE does not exists, and kube version not be found"
}
find_storage_key(){
STS_ACCESS_KEY=$($sh_c "${KUBECTL} get terminus terminus -o jsonpath='{.metadata.annotations.bytetrade\.io/s3-ak}'" &>/dev/null;true)
STS_SECRET_KEY=$($sh_c "${KUBECTL} get terminus terminus -o jsonpath='{.metadata.annotations.bytetrade\.io/s3-sk}'" &>/dev/null;true)
STS_TOKEN=$($sh_c "${KUBECTL} get terminus terminus -o jsonpath='{.metadata.annotations.bytetrade\.io/s3-sts}'" &>/dev/null;true)
STS_CLUSTER_ID=$($sh_c "${KUBECTL} get terminus terminus -o jsonpath='{.metadata.labels.bytetrade\.io/cluster-id}'" &>/dev/null;true)
}
remove_cluster(){
if [ x"$KUBE_VERSION" == x"" ]; then
KUBE_VERSION="v1.22.10"
fi
if [ x"$KKE_VERSION" == x"" ]; then
KKE_VERSION="0.1.21"
fi
forceUninstall="${FORCE_UNINSTALL_CLUSTER}"
log_info 'remove kubernetes cluster'
local kk_tar="${BASE_DIR}/install_wizard/components/kubekey-ext-v${KKE_VERSION}-linux-${ARCH}.tar.gz"
if [ x"$PROXY" != x"" ]; then
ensure_success $sh_c "cat /etc/resolv.conf > /etc/resolv.conf.bak"
ensure_success $sh_c "echo nameserver $PROXY > /etc/resolv.conf"
# if download failed
if [ -f "${kk_tar}" ]; then
ensure_success $sh_c "cp ${kk_tar} ${INSTALL_DIR}"
else
ensure_success $sh_c "curl ${CURL_TRY} -kLO https://github.com/beclab/kubekey-ext/releases/download/${KKE_VERSION}/kubekey-ext-v${KKE_VERSION}-linux-${ARCH}.tar.gz"
fi
ensure_success $sh_c "tar xf kubekey-ext-v${KKE_VERSION}-linux-${ARCH}.tar.gz"
ensure_success $sh_c "cat /etc/resolv.conf.bak > /etc/resolv.conf"
else
ensure_success $sh_c "curl -sfL https://raw.githubusercontent.com/beclab/kubekey-ext/master/downloadKKE.sh | VERSION=${KKE_VERSION} bash -"
fi
ensure_success $sh_c "chmod +x kk"
if [ -z "$forceUninstall" ]; then
echo
read -r -p "Are you sure to delete this cluster? [yes/no]: " ans </dev/tty
if [ x"$ans" != x"yes" ]; then
echo "exiting..."
exit
fi
fi
$sh_c "./kk delete cluster -A --with-kubernetes $KUBE_VERSION"
[ -f $KKE_FILE ] && $sh_c "${RM} -f $KKE_FILE"
if command_exists ipvsadm; then
$sh_c "ipvsadm -C"
fi
$sh_c "iptables -F"
$sh_c "killall /usr/local/bin/containerd || true"
}
docker_files=(/usr/bin/docker*
/var/lib/docker
/var/run/docker*
/var/lib/dockershim
/usr/local/bin/containerd
/etc/docker
/etc/cni/net.d)
clean_docker() {
log_info 'destroy docker'
$sh_c "rm -f /var/run/docker.sock; true"
for srv in docker containerd; do
$sh_c "systemctl stop $srv; systemctl disable $srv; true"
done
$sh_c "killall -9 containerd dockerd 2>/dev/null; true"
local pids=$(ps -fea|grep containerd|grep -v grep|awk '{print $2}')
if [ -n "$pids" ]; then
$sh_c "kill -9 $pids 2>/dev/null; true"
fi
log_info 'clean docker files'
for i in "${docker_files[@]}"; do
$sh_c "rm -rf $i >/dev/null; true"
done
}
terminus_files=(
/usr/local/bin/redis-*
/usr/bin/redis-*
/sbin/mount.juicefs
/etc/init.d/redis-server
/usr/local/bin/juicefs
/usr/local/bin/minio
/usr/local/bin/velero
/etc/systemd/system/redis-server.service
/etc/systemd/system/minio.service
/etc/systemd/system/minio-operator.service
/etc/systemd/system/juicefs.service
/etc/systemd/system/containerd.service
)
remove_storage() {
log_info 'destroy storage'
# stop and disable service
for srv in juicefs minio minio-operator redis-server; do
$sh_c "systemctl stop $srv 2>/dev/null; systemctl disable $srv 2>/dev/null; true"
done
$sh_c "killall -9 redis-server 2>/dev/null; true"
$sh_c "rm -rf /var/jfsCache /terminus/jfscache 2>/dev/null; true"
# read -r -p "Retain the stored terminus data? [default: yes]: " ans </dev/tty
# if [[ "$ans" == @("no"|"n"|"N"|"No") ]]; then
$sh_c "unlink /usr/bin/redis-server 2>/dev/null; unlink /usr/bin/redis-cli 2>/dev/null; true"
log_info 'clean terminus files'
for i in "${terminus_files[@]}"; do
$sh_c "rm -f $i 2>/dev/null; true"
done
$sh_c "rm -rf /terminus 2>/dev/null; true"
# fi
}
remove_mount() {
version="${TERMINUS_IS_CLOUD_VERSION}"
storage="${STORAGE}"
s3_bucket="${S3_BUCKET}"
if [ -z "$STS_ACCESS_KEY"]; then
STS_ACCESS_KEY=${AWS_ACCESS_KEY_ID_SETUP}
fi
if [ -z "$STS_SECRET_KEY"]; then
STS_SECRET_KEY=${AWS_SECRET_ACCESS_KEY_SETUP}
fi
if [ -z "$STS_TOKEN"]; then
STS_TOKEN=${AWS_SESSION_TOKEN_SETUP}
fi
if [ -z "$STS_CLUSTER_ID" ]; then
STS_CLUSTER_ID=${CLUSTER_ID}
fi
if [ x"$version" == x"true" ]; then
log_info 'remove juicefs s3 mount'
ensure_success $sh_c "apt install unzip"
case "$storage" in
"s3")
local awscli_file="awscli-exe-linux-x86_64.zip"
local awscli_tar="${BASE_DIR}/components/${awscli_file}"
if ! command_exists aws; then
if [ -f "${awscli_tar}" ]; then
ensure_success $sh_c "cp ${awscli_tar} ."
else
ensure_success $sh_c 'curl ${CURL_TRY} -kLO "https://awscli.amazonaws.com/${awscli_file}"'
fi
ensure_success $sh_c "unzip -q ${awscli_file}"
ensure_success $sh_c "./aws/install --update"
fi
AWS=$(command -v aws)
s3=$($sh_c "echo $s3_bucket | rev | cut -d '.' -f 5 | rev")
s3=$($sh_c "echo $s3 | sed 's/https/s3/'")
log_info 'clean juicefs s3 mount'
ensure_success $sh_c "AWS_ACCESS_KEY_ID=${STS_ACCESS_KEY} AWS_SECRET_ACCESS_KEY=${STS_SECRET_KEY} AWS_SESSION_TOKEN=${STS_TOKEN} ${AWS} s3 rm $s3/${STS_CLUSTER_ID} --recursive"
;;
"oss")
local osscli_file="ossutil-v1.7.18-linux-${ARCH}.zip"
local osscli_tar="${BASE_DIR}/components/${osscli_file}"
if ! command_exists ossutil64; then
if [ -f "${osscli_tar}" ]; then
ensure_success $sh_c "cp ${osscli_tar} ."
else
ensure_success $sh_c 'curl ${CURL_TRY} -kLO "https://github.com/aliyun/ossutil/releases/download/v1.7.18/${osscli_file}"'
fi
ensure_success $sh_c "unzip -q ${osscli_file}"
ensure_success $sh_c "mv ./ossutil-v1.7.18-linux-${ARCH}/* /usr/local/sbin/"
ensure_success $sh_c "chmod +x /usr/local/bin/ossutil*"
fi
oss=$($sh_c "echo $s3_bucket | rev | cut -d '.' -f 4 | rev")
oss=$($sh_c "echo $oss | sed 's/https/oss/'")
endpoint=$($sh_c "echo $s3_bucket | awk -F[/.] '{print \"https://\"\$(NF-2)\".\"\$(NF-1)\".\"\$NF}'")
log_info 'clean juicefs oss mount'
OSSUTIL=$(command -v ossutil64)
ensure_success $sh_c "${OSSUTIL} rm ${oss}/${STS_CLUSTER_ID}/ --endpoint=${endpoint} --access-key-id=${STS_ACCESS_KEY} --access-key-secret=${STS_SECRET_KEY} --sts-token=${STS_TOKEN} -r -f >/dev/null"
;;
*)
;;
esac
fi
}
set -o pipefail
set -e
precheck_os
get_shell_exec
INSTALL_DIR=/tmp/install_log
[[ -d ${INSTALL_DIR} ]] && $sh_c "${RM} -rf ${INSTALL_DIR}"
mkdir -p ${INSTALL_DIR} && cd ${INSTALL_DIR}
log_info 'Uninstalling OS ...'
find_version
find_storage_key
remove_cluster
remove_storage
remove_mount
[[ ! -z $CLEAN_ALL ]] && clean_docker
cd -
$sh_c "${RM} -rf /tmp/install_log"
[[ -d install-wizard ]] && ${RM} -rf install-wizard
set +o pipefail
ls |grep install-wizard*.tar.gz | while read ar; do ${RM} -f ${ar}; done
[[ -f /usr/local/bin/k3s-uninstall.sh ]] && $sh_c "/usr/local/bin/k3s-uninstall.sh"
log_info 'Uninstall OS success! '

View File

@@ -1 +0,0 @@
package scripts

View File

@@ -1 +0,0 @@
package scripts

View File

@@ -1,77 +0,0 @@
package storage
import (
"fmt"
"os/exec"
kubekeyapiv1alpha2 "bytetrade.io/web3os/installer/apis/kubekey/v1alpha2"
"bytetrade.io/web3os/installer/pkg/common"
"bytetrade.io/web3os/installer/pkg/core/connector"
"bytetrade.io/web3os/installer/pkg/core/logger"
"bytetrade.io/web3os/installer/pkg/core/util"
"bytetrade.io/web3os/installer/pkg/files"
"github.com/pkg/errors"
)
type InstallMinioClusterModule struct {
common.KubeModule
}
func (m *InstallMinioClusterModule) Init() {
m.Name = "InstallMinioCluster"
}
type InstallMinioOperator struct {
common.KubeAction
}
func (t *InstallMinioOperator) Execute(runtime connector.Runtime) error {
var systemInfo = runtime.GetSystemInfo()
var arch = systemInfo.GetOsArch()
var osType = systemInfo.GetOsType()
var osVersion = systemInfo.GetOsVersion()
var osPlatformFamily = systemInfo.GetOsPlatformFamily()
var localIp = systemInfo.GetLocalIp()
binary := files.NewKubeBinary("minio-operator", arch, osType, osVersion, osPlatformFamily, kubekeyapiv1alpha2.DefaultMinioOperatorVersion, runtime.GetWorkDir(), "")
if err := binary.CreateBaseDir(); err != nil {
return errors.Wrapf(errors.WithStack(err), "create file %s base dir failed", binary.FileName)
}
var exists = util.IsExist(binary.Path())
if exists {
p := binary.Path()
if err := binary.SHA256Check(); err != nil {
_ = exec.Command("/bin/sh", "-c", fmt.Sprintf("rm -f %s", p)).Run()
} else {
return nil
}
}
if !exists || binary.OverWrite {
logger.Infof("%s downloading %s %s %s ...", common.LocalHost, arch, binary.ID, binary.Version)
if err := binary.Download(); err != nil {
return fmt.Errorf("Failed to download %s binary: %s error: %w ", binary.ID, binary.Url, err)
}
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("tar zxvf %s", binary.Path()), false, true); err != nil {
return err
}
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("install -m 755 %s/minio-operator %s", binary.BaseDir, MinioOperatorFile), false, true); err != nil {
return err
}
var minioData, _ = t.PipelineCache.GetMustString(common.CacheMinioDataPath)
// FIXME:
var minioPassword, _ = t.PipelineCache.GetMustString(common.CacheMinioPassword)
var cmd = fmt.Sprintf("%s init --address %s --cafile /etc/ssl/etcd/ssl/ca.pem --certfile /etc/ssl/etcd/ssl/node-%s.pem --keyfile /etc/ssl/etcd/ssl/node-%s-key.pem --volume %s --password %s",
MinioOperatorFile, localIp, runtime.RemoteHost().GetName(),
runtime.RemoteHost().GetName(), minioData, minioPassword)
if _, err := runtime.GetRunner().SudoCmd(cmd, false, true); err != nil {
return err
}
return nil
}

View File

@@ -6,14 +6,6 @@ import (
"bytetrade.io/web3os/installer/pkg/core/task"
)
type DeleteTmpModule struct {
common.KubeModule
}
func (m *DeleteTmpModule) Init() {
m.Name = "DeleteTmp"
}
type InitStorageModule struct {
common.KubeModule
Skip bool

View File

@@ -1 +0,0 @@
package storage

View File

@@ -1 +0,0 @@
package storage

View File

@@ -309,18 +309,6 @@ func (t *RemoveTerminusFiles) Execute(runtime connector.Runtime) error {
return nil
}
type DeleteTmp struct {
common.KubeAction
}
func (t *DeleteTmp) Execute(runtime connector.Runtime) error {
var tmpPath = path.Join(common.RootDir, "tmp", "install_log")
if util.IsExist(tmpPath) {
util.RemoveDir(tmpPath)
}
return nil
}
type DeletePhaseFlagFile struct {
common.KubeAction
PhaseFile string

View File

@@ -134,10 +134,6 @@ func (m *CheckInstalledModule) Init() {
}
}
type InstallComponentsInClusterModule struct {
common.KubeModule
}
type GetNATGatewayIPModule struct {
common.KubeModule
}

View File

@@ -212,15 +212,6 @@ func copyWizard(wizardPath string, np string, runtime connector.Runtime) {
}
}
type DownloadFullInstaller struct {
common.KubeAction
}
func (t *DownloadFullInstaller) Execute(runtime connector.Runtime) error {
return nil
}
type PrepareFinished struct {
common.KubeAction
}

View File

@@ -162,7 +162,7 @@ spec:
priorityClassName: "system-cluster-critical"
containers:
- name: app-service
image: beclab/app-service:0.3.37
image: beclab/app-service:0.3.38
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0

View File

@@ -1,6 +1,6 @@
{{ $backupVersion := "0.3.35" }}
{{ $backupVersion := "0.3.36" }}
{{ $backup_server_rootpath := printf "%s%s" .Values.rootPath "/rootfs/backup-server" }}
---

View File

@@ -318,7 +318,7 @@ spec:
apiVersion: v1
fieldPath: spec.nodeName
- name: ingress
image: beclab/bfl-ingress:v0.3.6
image: beclab/bfl-ingress:v0.3.7
imagePullPolicy: IfNotPresent
volumeMounts:
- name: ngxlog

View File

@@ -97,7 +97,7 @@ spec:
containers:
- name: gateway
image: beclab/appdata-gateway:0.1.20
image: beclab/appdata-gateway:0.1.21
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
@@ -106,7 +106,7 @@ spec:
- containerPort: 8080
env:
- name: FILES_SERVER_TAG
value: 'beclab/files-server:v0.2.71'
value: 'beclab/files-server:v0.2.74'
- name: NAMESPACE
valueFrom:
fieldRef:
@@ -142,7 +142,7 @@ spec:
{{ end }}
- name: files
image: beclab/files-server:v0.2.71
image: beclab/files-server:v0.2.74
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true
@@ -443,7 +443,7 @@ spec:
name: check-nats
containers:
- name: files
image: beclab/files-server:v0.2.71
image: beclab/files-server:v0.2.74
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true

View File

@@ -17,7 +17,7 @@ spec:
gpu.bytetrade.io/cuda-supported: 'true'
containers:
- name: gpu-scheduler
image: beclab/gpu-scheduler:v0.1.0
image: beclab/gpu-scheduler:v0.1.1
imagePullPolicy: IfNotPresent
ports:
- name: ws

View File

@@ -4,7 +4,7 @@ nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
imagePullSecrets: []
version: "v2.5.2-share-02"
version: "v2.5.2-share-06"
# Nvidia GPU Parameters
resourceName: "nvidia.com/gpu"

View File

@@ -190,7 +190,7 @@ spec:
value: os_system_knowledge
containers:
- name: knowledge
image: "beclab/knowledge-base-api:v0.12.8"
image: "beclab/knowledge-base-api:v0.12.9"
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
@@ -346,21 +346,6 @@ spec:
- name: cache-dir
mountPath: /appCache
- name: terminus-ws-sidecar
image: 'beclab/ws-gateway:v1.0.4'
imagePullPolicy: IfNotPresent
command:
- /ws-gateway
env:
- name: WS_PORT
value: '3010'
- name: WS_URL
value: /knowledge/websocket/message
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumes:
- name: userspace-dir
hostPath:
@@ -437,7 +422,14 @@ spec:
secretKeyRef:
key: nat_password
name: knowledge-secrets
refs: []
refs:
- appName: user-service
appNamespace: user
subjects:
- name: knowledge.*
perm:
- pub
- sub
subjects:
- name: download_status
permission:
@@ -541,7 +533,7 @@ spec:
cpu: "1"
memory: 300Mi
- name: yt-dlp
image: "beclab/yt-dlp:v0.12.3"
image: "beclab/yt-dlp:v0.12.4"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
@@ -587,7 +579,7 @@ spec:
cpu: "1"
memory: 300Mi
- name: download-spider
image: "beclab/download-spider:v0.12.4"
image: "beclab/download-spider:v0.12.5"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View File

@@ -238,7 +238,7 @@ spec:
value: os_system_search3
containers:
- name: search3
image: beclab/search3:v0.0.33
image: beclab/search3:v0.0.34
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@@ -263,7 +263,7 @@ spec:
- name: NATS_SUBJECT_SYSTEM_GROUPS
value: terminus.os-system.system.groups
- name: search3monitor
image: beclab/search3monitor:v0.0.33
image: beclab/search3monitor:v0.0.34
imagePullPolicy: IfNotPresent
env:
- name: DATABASE_URL

View File

@@ -101,7 +101,7 @@ spec:
value: os_system_vault
containers:
- name: vault-server
image: beclab/vault-server:v1.3.55
image: beclab/vault-server:v1.3.71
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
@@ -136,7 +136,7 @@ spec:
valueFrom:
secretKeyRef:
key: nats_password
name: nats-secrets
name: vault-server-nats-secret
- name: NATS_SUBJECT_SYSTEM_VAULT
value: "terminus.{{ .Release.Namespace }}.system.vault"
volumeMounts:

View File

@@ -3,7 +3,7 @@ target: prebuilt
output:
containers:
-
name: beclab/hami:v2.5.2-share-01
name: beclab/hami:v2.5.2-share-06
-
name: projecthami/hami-webui-fe-oss:v1.0.5
-