Compare commits

...

18 Commits

Author SHA1 Message Date
eball
82a5cbe08b feat: add support for selecting GPU types in application installation (#2458)
* fix: failed release upgrade

* fix: helm upgrade do not use atomic param and allow upgrade failed release

* feat: add clickhouse support

* appservice image tag to 0.4.76

* feat: add icon filed to nats event

* chores: get all node gpu types

* feat: add support for selecting GPU types in application installation

* feat: enhance GPU type selection logic in application installation

* feat: replace hardcoded GPU type with constant for supported GPU selection

* feat: update app config methods to include selected GPU type and enhance validation for NVIDIA GPUs

* feat: update supported GPU handling to include default options and improve validation logic

* feat: update GPU resource handling to unset previous limits before setting new ones

* feat: refactor permission parsing to use exported function and update related calls

---------

Co-authored-by: hys <hysyeah@gmail.com>
2026-02-03 11:19:24 +08:00
hys
a88cedb0ce set appservice image tag to 0.4.77 2026-01-28 20:27:10 +08:00
hys
06d0d36042 fix: add spec ports 2026-01-28 20:27:10 +08:00
hys
67deaf16ea fix: check k8s request before into installing state 2026-01-28 20:27:10 +08:00
hys
b27854b863 fix: v2 app stop 2026-01-28 20:27:10 +08:00
hys
4fd22c4e20 feat: add icon filed to nats event 2026-01-28 20:27:10 +08:00
hys
031d8164ff fix: helm upgrade do not use atomic param and allow upgrade failed release 2026-01-28 20:27:10 +08:00
hys
0c6def8f43 fix: failed release upgrade 2026-01-28 20:27:10 +08:00
simon
ddbb13533d download-server:add download err category && modify aria2 max concurrent (#2445)
download server
2026-01-27 11:47:55 +08:00
Meow33
b65a3e3514 docs: add storage expansion via CLI (#2409)
* docs: add storage expansion method

* docs: add guide to access Olares terminal

* Update zh.ts

* fix formatting and file directory

---------

Co-authored-by: yajing wang <413741312@qq.com>
2026-01-26 17:10:23 +08:00
Power-One-2025
1f110184bd docs: update free backup storage info for Olares Space (#2428) 2026-01-23 10:58:14 +08:00
hysyeah
90eed09f10 cli: upgrade l4-bfl-proxy to v0.3.10 (#2442) 2026-01-22 23:18:34 +08:00
hysyeah
44ccf86032 l4: skip invalid expose port (#2441)
fix: skip invalid expose port (#2434)
2026-01-22 21:45:56 +08:00
hysyeah
67425162c2 appservice: add clickhouse support (#2440)
* fix: failed release upgrade

* fix: helm upgrade do not use atomic param and allow upgrade failed release

* feat: add clickhouse support

* appservice image tag to 0.4.76
2026-01-22 21:43:04 +08:00
eball
8a786c7c5a daemon: change pcap open timeout to 1 millisecond to prevent close hang (#2439) 2026-01-22 21:42:33 +08:00
hysyeah
c026e82615 tapr: add clickhouse support (#2437)
* feat: add clickhouse support

* fix: dependabot alerts

* middleware-operator 0.2.32
2026-01-22 21:42:13 +08:00
dkeven
e29c7f264e feat(gpu): supports dynamic detection of hot plugged-in GPUs (#2435) 2026-01-22 21:41:26 +08:00
Power-One-2025
3091e40ff0 docs/update/olares-space-storage-info 2026-01-21 11:29:53 +08:00
80 changed files with 1855 additions and 622 deletions

View File

@@ -0,0 +1,32 @@
package upgrade
import (
"time"
"github.com/Masterminds/semver/v3"
"github.com/beclab/Olares/cli/pkg/core/task"
)
type upgrader_1_12_5_20260122 struct {
breakingUpgraderBase
}
func (u upgrader_1_12_5_20260122) Version() *semver.Version {
return semver.MustParse("1.12.3-20260122")
}
func (u upgrader_1_12_5_20260122) UpgradeSystemComponents() []task.Interface {
pre := []task.Interface{
&task.LocalTask{
Name: "UpgradeL4BFLProxy",
Action: &upgradeL4BFLProxy{Tag: "v0.3.10"},
Retry: 3,
Delay: 5 * time.Second,
},
}
return append(pre, u.upgraderBase.UpgradeSystemComponents()...)
}
func init() {
registerDailyUpgrader(upgrader_1_12_5_20260122{})
}

View File

@@ -477,7 +477,7 @@ func (d *DSRProxy) regonfigure() error {
klog.Infof("Calico interface: %s", d.calicoInterface.Name)
var err error
d.pcapHandle, err = pcap.OpenLive(d.vipInterface.Name, 65536, false, pcap.BlockForever)
d.pcapHandle, err = pcap.OpenLive(d.vipInterface.Name, 65536, false, time.Millisecond)
if err != nil {
klog.Error("pcap openlive failed:", err)
return err

View File

@@ -642,6 +642,7 @@ const side = {
link: "/developer/install/cli/olares-cli",
collapsed: true,
items: [
{ text: "Access Olares terminal", link: "/developer/reference/access-olares-terminal" },
{
text: "backups",
link: "/developer/install/cli/backups",

View File

@@ -615,6 +615,7 @@ const side = {
link: "/zh/developer/install/cli/olares-cli",
collapsed: true,
items: [
{ text: "访问 Olares 终端", link: "/zh/developer/reference/access-olares-terminal" },
{
text: "backups",
link: "/zh/developer/install/cli/backups",

View File

@@ -0,0 +1,74 @@
---
outline: [2, 3]
description: Complete guide to accessing the Olares host terminal via SSH or Control Hub.
---
# Access the Olares Terminal
Some development and operational tasks require running commands on the Olares host, such as inspecting disks, verifying host state, or updating host-level configuration. Since Olares hosts are commonly deployed without a monitor or keyboard, terminal access is provided remotely.
You can access the host shell using one of the following methods:
- **Secure Shell (SSH)** for standard remote management.
- **Control Hub terminal** for direct root access from the Olares web interface.
## Method 1: Access via SSH
SSH is the standard protocol for operating the Olares host from a remote development machine. This method establishes a secure session over the network.
### Prerequisites
Before connecting, ensure that you have:
- Network connectivity to the Olares host:
- In most setups, your computer and the Olares host are on the same local network.
- If you need to connect outside the local network, configure VPN access first. See [Connect over VPN using LarePass](#optionl-connect-over-vpn-using-larepass).
- Host IP address, typically `192.168.x.x`.
- Valid login credentials.
### Connect over local network
1. Open a terminal on your computer.
2. Run the SSH command using the following format:
```bash
ssh <username>@<host_ip_address>
```
Example:
```bash
ssh olares@192.168.31.155
```
3. Enter the host password when prompted.
### Optionl: Connect over VPN using LarePass
If your computer is not on the same local network as your Olares device, enable LarePass VPN.
1. In Olares, open Settings, then navigate to **VPN**.
2. Enable **Allow SSH via VPN**.
3. Open the LarePass desktop client, and click your avatar in the top-left corner to open the user menu.
4. Toggle on the switch for **VPN connection**.
5. Open a terminal on your computer.
6. Run the SSH command using the following format:
```bash
ssh <username>@<host_ip_address>
```
Example:
```bash
ssh olares@192.168.31.155
```
7. Enter the host password when prompted.
## Method 2: Access via Control Hub
You can open a terminal session directly from Control Hub.
1. Open Control Hub.
2. In the left sidebar, click **Olares** in the **Terminal** section.
:::info Root access
The Control Hub terminal runs commands as `root` by default.
Do not use `sudo` before commands. For example, run `apt update` instead of `sudo apt update`.
:::

View File

@@ -1,10 +1,15 @@
---
outline: [2, 3]
description: Complete guide to expanding storage in Olares. Learn how to connect to SMB servers, use USB auto-mount, and manually mount HDDs or SSDs to increase local storage capacity and manage large AI model files efficiently.
description: Complete guide to expanding storage in Olares. Learn how to connect to SMB servers, use USB auto-mount, use CLI commands, and manually mount HDDs or SSDs to increase local storage capacity and manage large AI model files efficiently.
---
# Expand storage in Olares
This document describes how to expand storage in Olares, including connecting to an SMB server, using automatically mounted USB storage devices, and manually mounting HDDs or SSDs from the Linux hosting environment.
This document describes how to expand storage in Olares using different approaches. Choose the method that best matches your scenario:
- **Connect to an SMB server** to access shared files on a NAS or another computer over the network.
- **Use USB auto-mount** for plug-and-play external storage. No Linux commands required.
- **Manually mount an HDD or SSD** under `/olares/share` to keep a disk as independent external storage for large files.
- **Expand system storage via Olares CLI** (`disk extend`) to increase system capacity on LVM-based setups by merging new disk(s) into the system volume.
## Connect to an SMB server
@@ -26,7 +31,9 @@ For details, please refer to [Mount SMB shares](../olares/files/mount-SMB.md).
- You can access it in **Files** > **External** from both Olares and Larepass.
- When the USB device is unplugged, the system automatically unmounts it.
- The system automatically unmounts the device when you unplug it.
- You can manually eject the device via the Olares web interface. Right-click the USB drive in Files and select **Unmount**.
## Manually mount an HDD or SSD
@@ -187,4 +194,124 @@ You can unmount partitions mounted using either temporary or permanent methods.
Ensure the directory is empty and fully unmounted before deleting.
:::
You can also view and remove this directory from **Files** in Olares.
You can also view and remove this directory from **Files** in Olares.
## Expand system storage via Olares CLI
If your Olares system uses LVM-based storage, you can expand its system storage capacity using the `disk` command.
Manual mounting adds an external drive under `/olares/share`. In contrast, `disk extend` expands Olares system storage. After extension, the added drive is no longer shown as an independent mount point.
:::warning Data loss
`disk extend` will destroy all data on the selected disk.
Make sure the disk does not contain important data, or back up the data before continuing.
:::
### Before you begin
- Connect the external drive to the Olares host machine.
- [SSH](/developer/reference/access-olares-terminal.md) into the Olares terminal.
### Identify the unmounted disk
List block devices on the host:
```bash
lsblk | grep -v loop
```
Identify the newly added disk by checking its size and confirming it has no mount points. Do not select the disk that contains `/` or `/boot`.
**Example output**:
```text
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 512M 0 part /boot
└─sda2 8:2 0 931G 0 part /
nvme1n1 259:3 0 931.5G 0 disk
```
In this example, `sda` is the system drive which is mounted at `/` and `/boot`, while `nvme1n1` is the newly connected disk.
### Extend system storage
1. Verify that Olares recognizes the unmounted disk:
```bash
olares-cli disk list-unmounted
```
2. Add the disk to the system volume:
```bash
olares-cli disk extend
```
3. Type `YES` to proceed when the command prompts for confirmation.
```text
WARNING: This will DESTROY all data on /dev/<device>
Type 'YES' to continue, CTRL+C to abort:
```
**Example output**:
```text
Selected volume group to extend: olares-vg
Selected logical volume to extend: data
Selected unmounted device to use: /dev/nvme0n1
Extending logical volume data in volume group olares-vg using device /dev/nvme0n1
WARNING: This will DESTROY all data on /dev/nvme0n1
Type 'YES' to continue, CTRL+C to abort: YES
Selected device /dev/nvme0n1 has existing partitions. Cleaning up...
Deleting existing partitions on device /dev/nvme0n1...
Creating partition on device /dev/nvme0n1...
Creating physical volume on device /dev/nvme0n1...
Extending volume group olares-vg with logic volume data on device /dev/nvme0n1...
Disk extension completed successfully.
id LV VG LSize Mountpoints
1 data olares-vg <3.63t /var,/olares
2 root olares-vg 100.00g /
3 swap olares-vg 1.00g
...
```
### Verify the extension
You can verify the storage increase in both terminal and UI.
#### In terminal
- Check the size of the `/olares` directory where data is stored to confirm expansion:
```bash
df -h /olares
```
**Example output**:
```text
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/olares--vg-root 1.8T 285G 1.4T 17% /olares
```
- Confirm if the new disk is now part of the `olares--vg-data` volume:
```bash
lsblk | grep -v loop
```
**Example output**:
```text
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
nvme0n1 259:0 0 1.9T 0 disk
└─nvme0n1p1 259:2 0 1.9T 0 part
└─olares--vg-data 252:2 0 3.6T 0 lvm /olares /var
nvme1n1 259:3 0 1.9T 0 disk
├─nvme1n1p1 259:4 0 512M 0 part /boot/efi
└─nvme1n1p2 259:5 0 1.9T 0 part
├─olares--vg-root 252:1 0 100G 0 lvm /
└─olares--vg-swap 252:0 0 1G 0 lvm [SWAP]
```
#### In UI
Open Dashboard from Launchpad and confirm that total system storage capacity has increased.
![Check disk volume in Dashboard](/public/images/manual/tutorials/expand-dashboard-disk.png#bordered)
For full command usage and options, please refer to [`disk`](/developer/install/cli/disk.md).

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -5,10 +5,6 @@ description: Protect your Olares instances with cloud backup and restore feature
Olares Space is the official solution to back up snapshots for your Olares instances. You can restore an Olares to its most recent state whenever needed. This section provides instructions for managing backups and restores in Olares Space.
:::tip
Each Olares is provided with 10 GB of free backup space. Any usage beyond this will be charged according to the cloud provider's pricing.
:::
## View backup list
The backup task list shows information for each backup task, including:
@@ -17,7 +13,7 @@ The backup task list shows information for each backup task, including:
- Most recent snapshot time
- Overall storage usage
![alt text](/images/how-to/space/backup_list.jpg#bordered)
![Backup task list in Olares Space](/images/how-to/space/backup_list.jpg#bordered)
Click **View Details** on a task to see its detail page. The detail page shows the storage usage since the task was created and a list of all successful snapshots.
@@ -27,7 +23,7 @@ Currently, only restoring from the most recent snapshot is supported.
## Restore backup to the Olares Space
![alt text](/images/how-to/space/restore_backup_to_the_olares_space.jpg#bordered)
![Snapshots in Olares Space](/images/how-to/space/restore_backup_to_the_olares_space.jpg#bordered)
Restoring a snapshot to the cloud is similar to setting up a new cloud-based Olares.
@@ -39,7 +35,7 @@ Restoring a snapshot to the cloud is similar to setting up a new cloud-based Ola
c. Confirm the snapshot details and enter the backup password.
2. Understand charges for storage and bandwidth. <br>Each instance includes a certain amount of free storage and traffic. Any usage exceeding these quotas will incur charges.
2. Understand charges for storage and bandwidth. <br>Each instance includes a certain amount of free traffic. Any usage exceeding the quota will incur charges. For more information, see [Billing](billing.md).
3. Confirm the order and complete the payment. After that, the Olares begins to install.

View File

@@ -0,0 +1,75 @@
---
outline: [2, 3]
description: 通过 SSH 或 Control Hub 访问 Olares 主机终端的完整指南。
---
# 访问 Olares 主机终端
某些开发和运维任务需要在 Olares 主机上运行命令,例如检查磁盘、验证主机状态或更新主机级配置。由于 Olares 主机通常以无显示器、无键盘的方式部署,因此终端访问主要通过远程方式进行。
你可以通过以下两种方式访问主机终端:
- **Secure ShellSSH**:标准的远程管理。
- **控制面板**:通过 Olares 网页端直接获取 root 权限。
## 方式一:通过 SSH 访问
SSH 是从远程开发设备操作 Olares 主机的标准协议,可在网络上建立安全的终端会话。
### 前提条件
连接之前,请确保满足以下条件:
- 与 Olares 主机建立网络连接:
- 在大多数情况下,你的电脑和 Olares 主机位于同一局域网内。
- 如果需要从局域网外部连接,请先配置 VPN 访问。详见[通过 LarePass VPN 连接](#可选-通过-larepass-vpn-连接)。
- Olares 主机 IP 地址,通常为 `192.168.x.x`
- 有效的登录凭证。
### 通过局域网连接
1. 打开计算机上的终端。
2. 使用以下格式运行 SSH 命令:
```bash
ssh <username>@<host_ip_address>
```
示例:
```bash
ssh olares@192.168.31.155
```
3. 根据提示输入主机的登录密码。
### 可选:通过 LarePass VPN 连接
如果你的电脑与 Olares 主机不在同一局域网,执行 SSH 命令之前先启用 LarePass VPN。
1. 在 Olares 中,打开设置应用。
2. 启用**允许通过 VPN 进行 SSH 连接**。
3. 打开 LarePass 桌面客户端,点击左上角头像打开用户菜单。
4. 打开**专用网络连接**开关。
5. 打开计算机上的终端。
6. 使用以下格式运行 SSH 命令:
```bash
ssh <username>@<host_ip_address>
```
示例:
```bash
ssh olares@192.168.31.155
```
7. 根据提示输入主机密码。
## 方式二:通过控制面板访问
你可以直接通过 Olares 控制面板打开主机终端。
1. 打开控制面板。
2. 在左侧边栏的**终端**部分,点击 **Olares**。
:::info `root` 权限
通过控制面板打开的终端默认以 `root` 身份运行。
请勿在命令前使用 `sudo`。例如,直接运行 `apt update`,而不是 `sudo apt update`。
:::

View File

@@ -4,6 +4,13 @@ description: Olares 存储扩展指南,涵盖 SMB 服务器连接、USB 自动
---
# 在 Olares 中扩展存储空间
本文档介绍如何通过不同方式在 Olares 中扩展存储空间。请根据你的使用场景选择最合适的方案:
- **连接 SMB 服务器**:通过网络访问 NAS 或其他计算机上的共享文件。
- **使用 USB 自动挂载**:即插即用的外部存储方式,无需任何 Linux 命令。
- **手动挂载 HDD 或 SSD**:将硬盘挂载到 `/olares/share` 下,作为独立的外部存储,适合存放大型文件。
- **通过 Olares CLI 扩展系统存储**:在基于 LVM 的系统上使用 `disk extend` 命令,将新磁盘合并入系统卷,从而增加系统容量。
本文档介绍如何在 Olares 中扩展存储空间,包括通过 SMB 服务器连接、使用 USB 存储设备自动挂载,以及在 Linux 宿主系统中手动挂载 HDD/SSD。
## 通过 SMB 服务器连接
@@ -26,7 +33,9 @@ description: Olares 存储扩展指南,涵盖 SMB 服务器连接、USB 自动
- 你可以在 Olares 网页端或 Larepass 中,点击**文件管理器** > **外部设备**直接访问。
- 断开 USB 设备,系统会自动将其卸载
- 直接拔出 USB 设备,系统会自动完成卸载
- 你也可以在 Olares 网页端手动移除设备。在**文件管理器**中右键点击该 USB 硬盘,选择**卸载**即可断开连接。
## 手动挂载 HDD/SSD
@@ -186,4 +195,127 @@ Linux 或 Olares 重启后,挂载配置将失效。
删除前,请确认卸载已成功且目录为空。
:::
你也可以在 Olares 的**文件管理器**里查看并删除该目录。
你也可以在 Olares 的**文件管理器**里查看并删除该目录。
## 通过 Olares CLI 扩展系统存储
如果你的 Olares 系统使用基于 LVM 的存储方式,可以使用 `disk` 命令扩展系统存储容量。
手动挂载会将磁盘作为外部存储挂载到 `/olares/share` 目录下。相比之下,`disk extend` 命令用于扩展 Olares 的系统存储空间。扩展完成后,新增磁盘不再显示为独立挂载点。
:::warning 数据丢失警告
`disk extend` 命令将销毁所选磁盘上的所有数据。
在继续操作之前,请确保磁盘中没有重要数据或已完成备份。
:::
### 开始之前
- 将外部硬盘连接到 Olares 主机。
- [SSH](/zh/developer/reference/access-olares-terminal.md) 登录到 Olares 主机终端。
### 识别未挂载的磁盘
列出主机上的所有块设备:
```bash
lsblk | grep -v loop
```
通过磁盘容量判断新接入的磁盘,并确认该磁盘当前没有挂载点。请勿选择包含 `/` 或 `/boot` 的磁盘。
**示例输出**
```text
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 512M 0 part /boot
└─sda2 8:2 0 931G 0 part /
nvme1n1 259:3 0 931.5G 0 disk
```
示例中的 `sda` 是系统盘,挂载点为 `/` 和 `/boot` `nvme1n1` 是新连接的磁盘。
### 扩展系统存储
1. 确认 Olares 已识别但未挂载该磁盘:
```bash
olares-cli disk list-unmounted
```
2. 将检测到的未挂载磁盘加入系统存储:
```bash
olares-cli disk extend
```
3. 当命令行提示确认时,输入 `YES` 继续。
```text
WARNING: This will DESTROY all data on /dev/<device>
Type 'YES' to continue, CTRL+C to abort:
```
**示例输出**
```text
Selected volume group to extend: olares-vg
Selected logical volume to extend: data
Selected unmounted device to use: /dev/nvme0n1
Extending logical volume data in volume group olares-vg using device /dev/nvme0n1
WARNING: This will DESTROY all data on /dev/nvme0n1
Type 'YES' to continue, CTRL+C to abort: YES
Selected device /dev/nvme0n1 has existing partitions. Cleaning up...
Deleting existing partitions on device /dev/nvme0n1...
Creating partition on device /dev/nvme0n1...
Creating physical volume on device /dev/nvme0n1...
Extending volume group olares-vg with logic volume data on device /dev/nvme0n1...
Disk extension completed successfully.
id LV VG LSize Mountpoints
1 data olares-vg <3.63t /var,/olares
2 root olares-vg 100.00g /
3 swap olares-vg 1.00g
...
```
### 验证扩展结果
你可以在终端和 UI 界面中验证存储空间是否已增加:
#### 在终端
- 检查数据存储位置 `/olares` 目录的大小以确认扩容成功:
```bash
df -h /olares
```
**示例输出**
```text
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/olares--vg-root 1.8T 285G 1.4T 17% /olares
```
- 查看新硬盘是否已合并入 `olares--vg-data` 卷:
```bash
lsblk | grep -v loop
```
**示例输出**
```text
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
nvme0n1 259:0 0 1.9T 0 disk
└─nvme0n1p1 259:2 0 1.9T 0 part
└─olares--vg-data 252:2 0 3.6T 0 lvm /olares /var
nvme1n1 259:3 0 1.9T 0 disk
├─nvme1n1p1 259:4 0 512M 0 part /boot/efi
└─nvme1n1p2 259:5 0 1.9T 0 part
├─olares--vg-root 252:1 0 100G 0 lvm /
└─olares--vg-swap 252:0 0 1G 0 lvm [SWAP]
```
#### 在 UI 界面
从启动台打开仪表盘,确认系统总存储容量已增加。
![Check disk volume in Dashboard](/public/images/zh/manual/tutorials/expand-dashboard-disk.png#bordered)
如需查看完整用法与选项,请参考 [`disk`](/zh/developer/install/cli/disk.md)。

View File

@@ -5,10 +5,6 @@ description: 介绍 Olares Space 的数据备份工具,支持查看备份记
Olares Space 是为 Olares 实例提供快照备份的官方解决方案。你可以随时将 Olares 恢复到最近的状态。本节介绍如何在 Olares Space 中管理备份和恢复操作。
:::info 注意
每个 Olares 实例可以获得 10GB 的免费备份空间。超出部分将按照云服务商的定价收费。
:::
## 查看备份列表
备份任务列表显示每个备份任务的信息,包括:
@@ -17,7 +13,7 @@ Olares Space 是为 Olares 实例提供快照备份的官方解决方案。你
- 最近快照时间
- 总体存储用量
![alt text](/images/how-to/space/backup_list.jpg#bordered)
![Olares Space 中的任务列表](/images/how-to/space/backup_list.jpg#bordered)
点击任务的**查看详情**可以进入详情页面。详情页面展示了该任务创建以来的存储用量变化,以及所有成功的快照记录。
@@ -27,7 +23,7 @@ Olares Space 是为 Olares 实例提供快照备份的官方解决方案。你
## 将备份恢复至 Olares Space
![alt text](/images/how-to/space/restore_backup_to_the_olares_space.jpg#bordered)
![Olares Space 中的快照列表](/images/how-to/space/restore_backup_to_the_olares_space.jpg#bordered)
将快照恢复到云端的操作流程与新建云端 Olares 类似。
@@ -39,7 +35,7 @@ Olares Space 是为 Olares 实例提供快照备份的官方解决方案。你
c. 确认快照信息并输入备份密码。
2. 了解存储和带宽费用。<br>每个实例都包含一定额度的免费存储和流量配额。超出部分将产生费用。
2. 了解存储和带宽费用。<br>每个实例都包含一定额度的免费流量。超出配额部分将产生费用。更多详情,请见[计费说明](billing.md)。
3. 确认订单并完成支付。之后 Olares 开始安装。

View File

@@ -170,7 +170,7 @@ spec:
priorityClassName: "system-cluster-critical"
containers:
- name: app-service
image: beclab/app-service:0.4.75
image: beclab/app-service:0.4.77
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6755
@@ -196,7 +196,7 @@ spec:
- name: SYS_APPS
value: "market,auth,citus,desktop,did,docs,files,fsnotify,headscale,infisical,intentprovider,ksserver,message,mongo,monitoring,notifications,profile,redis,recommend,seafile,search,search-admin,settings,systemserver,tapr,vault,video,zinc,accounts,control-hub,dashboard,nitro,olares-app"
- name: KB_MIDDLEWARES
value: "mongodb,minio,mysql,mariadb,elasticsearch,rabbitmq"
value: "mongodb,minio,mysql,mariadb,elasticsearch,rabbitmq,clickhouse"
- name: GENERATED_APPS
value: "citus,mongo-cluster-cfg,mongo-cluster-mongos,mongo-cluster-rs0,frp-agent,l4-bfl-proxy,drc-redis-cluster,appdata-backend,argoworkflows,argoworkflow-workflow-controller,velero,kvrocks"
- name: WS_CONTAINER_IMAGE

View File

@@ -193,6 +193,7 @@ func (r *ApplicationManagerController) publishStateChangeEvent(am *appv1alpha1.A
RawAppName: am.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: apputils.AppTitle(am.Spec.Config),
Icon: apputils.AppIcon(am.Spec.Config),
Reason: am.Status.Reason,
Message: am.Status.Message,
})

View File

@@ -252,6 +252,7 @@ func (r *EntranceStatusManagerController) updateEntranceStatus(ctx context.Conte
RawAppName: appCopy.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: app.AppTitle(am.Spec.Config),
Icon: app.AppIcon(am.Spec.Config),
SharedEntrances: appCopy.Spec.SharedEntrances,
})
}

View File

@@ -4,9 +4,11 @@ import (
"context"
"os"
"strconv"
"strings"
"time"
appv1alpha1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appstate"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
@@ -120,7 +122,7 @@ func (r *PodAbnormalSuspendAppController) Reconcile(ctx context.Context, req ctr
if pod.Status.Reason == "Evicted" {
klog.Infof("pod evicted name=%s namespace=%s, attempting to suspend app=%s owner=%s", pod.Name, pod.Namespace, appName, owner)
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppStopDueToEvicted, "evicted pod: "+pod.Namespace+"/"+pod.Name)
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppStopDueToEvicted, "evicted pod: "+pod.Namespace+"/"+pod.Name, pod.Namespace)
if err != nil {
klog.Errorf("suspend attempt failed for app=%s owner=%s: %v", appName, owner, err)
return ctrl.Result{}, err
@@ -147,7 +149,7 @@ func (r *PodAbnormalSuspendAppController) Reconcile(ctx context.Context, req ctr
}
klog.Infof("attempting to suspend app=%s owner=%s due to pending unschedulable timeout", appName, owner)
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppUnschedulable, "pending unschedulable timeout on pod: "+pod.Namespace+"/"+pod.Name)
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppUnschedulable, "pending unschedulable timeout on pod: "+pod.Namespace+"/"+pod.Name, pod.Namespace)
if err != nil {
klog.Errorf("suspend attempt failed for app=%s owner=%s: %v", appName, owner, err)
return ctrl.Result{}, err
@@ -191,7 +193,7 @@ func pendingUnschedulableSince(pod *corev1.Pod) (time.Time, bool) {
// trySuspendApp attempts to suspend the app and returns (true, nil) if a suspend request was issued.
// If the app is not suspendable yet, returns (false, nil) to trigger a short requeue.
func (r *PodAbnormalSuspendAppController) trySuspendApp(ctx context.Context, owner, appName, reason, message string) (bool, error) {
func (r *PodAbnormalSuspendAppController) trySuspendApp(ctx context.Context, owner, appName, reason, message, podNamespace string) (bool, error) {
name, err := apputils.FmtAppMgrName(appName, owner, "")
if err != nil {
klog.Errorf("failed to format app manager name app=%s owner=%s: %v", appName, owner, err)
@@ -215,6 +217,11 @@ func (r *PodAbnormalSuspendAppController) trySuspendApp(ctx context.Context, own
return false, nil
}
isServerPod := strings.HasSuffix(podNamespace, "-shared")
if isServerPod {
am.Annotations[api.AppStopAllKey] = "true"
}
am.Spec.OpType = appv1alpha1.StopOp
if err := r.Update(ctx, &am); err != nil {
klog.Errorf("failed to update applicationmanager spec to StopOp name=%s app=%s owner=%s: %v", am.Name, appName, owner, err)

View File

@@ -126,15 +126,16 @@ type UpgradeRequest struct {
// InstallRequest represents a request to install an application.
type InstallRequest struct {
Dev bool `json:"devMode"`
RepoURL string `json:"repoUrl"`
CfgURL string `json:"cfgUrl"`
Source AppSource `json:"source"`
Images []Image `json:"images"`
Envs []sysv1alpha1.AppEnvVar `json:"envs"`
RawAppName string `json:"rawAppName"`
Title string `json:"title"`
Entrances []EntranceClone `json:"entrances"`
Dev bool `json:"devMode"`
RepoURL string `json:"repoUrl"`
CfgURL string `json:"cfgUrl"`
Source AppSource `json:"source"`
Images []Image `json:"images"`
Envs []sysv1alpha1.AppEnvVar `json:"envs"`
RawAppName string `json:"rawAppName"`
Title string `json:"title"`
Entrances []EntranceClone `json:"entrances"`
SelectedGpuType string `json:"selectedGpuType"`
}
type Image struct {

View File

@@ -3,11 +3,14 @@ package apiserver
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"strconv"
"strings"
"golang.org/x/exp/maps"
"github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
@@ -426,6 +429,11 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
now := metav1.Now()
name, _ := apputils.FmtAppMgrName(am.Spec.AppName, owner, appconfig.Namespace)
app := &v1alpha1.Application{
@@ -443,6 +451,7 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
Owner: owner,
Entrances: appconfig.Entrances,
SharedEntrances: appconfig.SharedEntrances,
Ports: appconfig.Ports,
Icon: appconfig.Icon,
Settings: map[string]string{
"title": am.Annotations[constants.ApplicationTitleLabel],
@@ -477,6 +486,8 @@ func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}
}
@@ -738,6 +749,11 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
now := metav1.Now()
app := v1alpha1.Application{
@@ -754,6 +770,7 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
Namespace: am.Spec.AppNamespace,
Owner: am.Spec.AppOwner,
Entrances: appconfig.Entrances,
Ports: appconfig.Ports,
SharedEntrances: appconfig.SharedEntrances,
Icon: appconfig.Icon,
Settings: map[string]string{
@@ -788,6 +805,8 @@ func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}
@@ -930,12 +949,37 @@ func (h *Handler) oamValues(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
gpuType, err := utils.FindGpuTypeFromNodes(&nodes)
gpuTypes, err := utils.GetAllGpuTypesFromNodes(&nodes)
if err != nil {
klog.Errorf("get gpu type failed %v", gpuType)
klog.Errorf("get gpu type failed %v", err)
api.HandleError(resp, req, err)
return
}
gpuType := "none"
selectedGpuType := req.QueryParameter("gputype")
if len(gpuTypes) > 0 {
if selectedGpuType != "" {
if _, ok := gpuTypes[selectedGpuType]; ok {
gpuType = selectedGpuType
} else {
err := fmt.Errorf("selected gpu type %s not found in cluster", selectedGpuType)
klog.Error(err)
api.HandleError(resp, req, err)
return
}
} else {
if len(gpuTypes) == 1 {
gpuType = maps.Keys(gpuTypes)[0]
} else {
err := fmt.Errorf("multiple gpu types found in cluster, please specify one")
klog.Error(err)
api.HandleError(resp, req, err)
return
}
}
}
values["GPU"] = map[string]interface{}{
"Type": gpuType,
"Cuda": os.Getenv("OLARES_SYSTEM_CUDA_VERSION"),
@@ -984,6 +1028,9 @@ func (h *Handler) oamValues(req *restful.Request, resp *restful.Response) {
values["mongodb"] = map[string]interface{}{
"databases": map[string]interface{}{},
}
values["clickhouse"] = map[string]interface{}{
"databases": map[string]interface{}{},
}
values["svcs"] = map[string]interface{}{}
values["nats"] = map[string]interface{}{
"subjects": map[string]interface{}{},

View File

@@ -1,174 +1,33 @@
package apiserver
import (
"fmt"
"sync"
"time"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/client/clientset"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
"github.com/beclab/Olares/framework/app-service/pkg/utils"
"golang.org/x/exp/maps"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/emicklei/go-restful/v3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
)
var running bool = false
var switchLock sync.Mutex
func (h *Handler) disableGpuManagedMemory(req *restful.Request, resp *restful.Response) {
if err := h.nvshareSwitch(req, false); err != nil {
api.HandleError(resp, req, &errors.StatusError{
ErrStatus: metav1.Status{Code: 400, Message: "operation failed, " + err.Error()},
})
func (h *Handler) getGpuTypes(req *restful.Request, resp *restful.Response) {
var nodes corev1.NodeList
err := h.ctrlClient.List(req.Request.Context(), &nodes, &client.ListOptions{})
if err != nil {
klog.Errorf("list node failed %v", err)
api.HandleError(resp, req, err)
return
}
resp.WriteAsJson(map[string]int{"code": 0})
}
func (h *Handler) enableGpuManagedMemory(req *restful.Request, resp *restful.Response) {
if err := h.nvshareSwitch(req, true); err != nil {
api.HandleError(resp, req, &errors.StatusError{
ErrStatus: metav1.Status{Code: 400, Message: "operation failed, " + err.Error()},
})
gpuTypes, err := utils.GetAllGpuTypesFromNodes(&nodes)
if err != nil {
klog.Errorf("get gpu type failed %v", err)
api.HandleError(resp, req, err)
return
}
resp.WriteAsJson(map[string]int{"code": 0})
}
func (h *Handler) nvshareSwitch(req *restful.Request, enable bool) error {
client := req.Attribute(constants.KubeSphereClientAttribute).(*clientset.ClientSet)
switchLock.Lock()
defer switchLock.Unlock()
if running {
return fmt.Errorf("last operation is still running")
}
deployments, err := client.KubeClient.Kubernetes().AppsV1().Deployments("").List(req.Request.Context(), metav1.ListOptions{})
if err != nil {
klog.Error("list deployment error, ", err)
return err
}
envValue := "0"
if enable {
envValue = "1"
}
for _, d := range deployments.Items {
shouldUpdate := false
for i, c := range d.Spec.Template.Spec.Containers {
found := false
for k := range c.Resources.Limits {
if k == constants.NvshareGPU {
found = true
break
}
}
if found {
// a gpu request container
addEnv := true
for n, env := range d.Spec.Template.Spec.Containers[i].Env {
if env.Name == constants.EnvNvshareManagedMemory {
addEnv = false
d.Spec.Template.Spec.Containers[i].Env[n].Value = envValue
break
}
}
if addEnv {
d.Spec.Template.Spec.Containers[i].Env =
append(d.Spec.Template.Spec.Containers[i].Env,
corev1.EnvVar{Name: constants.EnvNvshareManagedMemory, Value: envValue})
}
shouldUpdate = true
} // end found
} // end of container loop
if shouldUpdate {
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
deployment, err := client.KubeClient.Kubernetes().AppsV1().Deployments(d.Namespace).
Get(req.Request.Context(), d.Name, metav1.GetOptions{})
if err != nil {
return err
}
deployment.Spec.Template.Spec.Containers = d.Spec.Template.Spec.Containers
_, err = client.KubeClient.Kubernetes().AppsV1().Deployments(d.Namespace).
Update(req.Request.Context(), deployment, metav1.UpdateOptions{})
return err
})
if err != nil {
klog.Error("update deployment error, ", err, ", ", d.Name, ", ", d.Namespace)
return err
}
} // should update
} // end of deployment loop
// update terminus
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
terminus, err := utils.GetTerminus(req.Request.Context(), h.ctrlClient)
if err != nil {
return err
}
terminus.Spec.Settings[constants.EnvNvshareManagedMemory] = envValue
return h.ctrlClient.Update(req.Request.Context(), terminus)
})
if err != nil {
klog.Error("update terminus error, ", err)
return err
}
running = true
// delay 30s, assume the all pods will be reload in 30s.
delay := time.NewTimer(30 * time.Second)
go func() {
<-delay.C
switchLock.Lock()
defer switchLock.Unlock()
running = false
}()
return nil
}
func (h *Handler) getManagedMemoryValue(req *restful.Request, resp *restful.Response) {
terminus, err := utils.GetTerminus(req.Request.Context(), h.ctrlClient)
if err != nil {
klog.Error("get terminus value error, ", err)
api.HandleError(resp, req, &errors.StatusError{
ErrStatus: metav1.Status{Code: 400, Message: "get value error, " + err.Error()},
})
return
}
managed := true
if v, ok := terminus.Spec.Settings[constants.EnvNvshareManagedMemory]; ok && v == "0" {
managed = false
}
resp.WriteAsJson(&map[string]interface{}{
"managed_memory": managed,
"gpu_types": maps.Keys(gpuTypes),
},
)
}

View File

@@ -21,9 +21,12 @@ import (
"github.com/beclab/Olares/framework/app-service/pkg/utils"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
"github.com/beclab/Olares/framework/app-service/pkg/utils/config"
"golang.org/x/exp/maps"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/emicklei/go-restful/v3"
"helm.sh/helm/v3/pkg/time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -37,7 +40,7 @@ type depRequest struct {
type installHelperIntf interface {
getAdminUsers() (admin []string, isAdmin bool, err error)
getInstalledApps() (installed bool, app []*v1alpha1.Application, err error)
getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error)
getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion, selectedGpuType string) (err error)
setAppConfig(req *api.InstallRequest, appName string)
validate(bool, []*v1alpha1.Application) error
setAppEnv(overrides []sysv1alpha1.AppEnvVar) error
@@ -105,6 +108,36 @@ func (h *Handler) install(req *restful.Request, resp *restful.Response) {
}
}
// check selected gpu type can be supported
// if selectedGpuType != "" , then check if the gpu type exists in cluster
// if selectedGpuType == "" , and only one gpu type exists in cluster, then use it
var nodes corev1.NodeList
err = h.ctrlClient.List(req.Request.Context(), &nodes, &client.ListOptions{})
if err != nil {
klog.Errorf("list node failed %v", err)
api.HandleError(resp, req, err)
return
}
gpuTypes, err := utils.GetAllGpuTypesFromNodes(&nodes)
if err != nil {
klog.Errorf("get gpu type failed %v", err)
api.HandleError(resp, req, err)
return
}
if insReq.SelectedGpuType != "" {
if _, ok := gpuTypes[insReq.SelectedGpuType]; !ok {
klog.Errorf("selected gpu type %s not found in cluster", insReq.SelectedGpuType)
api.HandleBadRequest(resp, req, fmt.Errorf("selected gpu type %s not found in cluster", insReq.SelectedGpuType))
return
}
} else {
if len(gpuTypes) == 1 {
insReq.SelectedGpuType = maps.Keys(gpuTypes)[0]
klog.Infof("only one gpu type %s found in cluster, use it as selected gpu type", insReq.SelectedGpuType)
}
}
apiVersion, appCfg, err := apputils.GetApiVersionFromAppConfig(req.Request.Context(), &apputils.ConfigOptions{
App: app,
RawAppName: rawAppName,
@@ -112,6 +145,7 @@ func (h *Handler) install(req *restful.Request, resp *restful.Response) {
RepoURL: insReq.RepoURL,
MarketSource: marketSource,
Version: chartVersion,
SelectedGpu: insReq.SelectedGpuType,
})
klog.Infof("chartVersion: %s", chartVersion)
if err != nil {
@@ -188,7 +222,7 @@ func (h *Handler) install(req *restful.Request, resp *restful.Response) {
return
}
err = helper.getAppConfig(adminUsers, marketSource, isAdmin, appInstalled, installedApps, chartVersion)
err = helper.getAppConfig(adminUsers, marketSource, isAdmin, appInstalled, installedApps, chartVersion, insReq.SelectedGpuType)
if err != nil {
klog.Errorf("Failed to get app config err=%v", err)
return
@@ -423,7 +457,7 @@ func (h *installHandlerHelper) getInstalledApps() (installed bool, app []*v1alph
return
}
func (h *installHandlerHelper) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error) {
func (h *installHandlerHelper) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion, selectedGpuType string) (err error) {
var (
admin string
installAsAdmin bool
@@ -472,6 +506,7 @@ func (h *installHandlerHelper) getAppConfig(adminUsers []string, marketSource st
Admin: admin,
IsAdmin: installAsAdmin,
MarketSource: marketSource,
SelectedGpu: selectedGpuType,
})
if err != nil {
klog.Errorf("Failed to get appconfig err=%v", err)
@@ -685,7 +720,7 @@ func (h *installHandlerHelperV2) _validateClusterScope(isAdmin bool, installedAp
return nil
}
func (h *installHandlerHelperV2) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error) {
func (h *installHandlerHelperV2) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion, selectedGpuType string) (err error) {
klog.Info("get app config for install handler v2")
var (
@@ -713,6 +748,7 @@ func (h *installHandlerHelperV2) getAppConfig(adminUsers []string, marketSource
Admin: admin,
MarketSource: marketSource,
IsAdmin: isAdmin,
SelectedGpu: selectedGpuType,
})
if err != nil {
klog.Errorf("Failed to get appconfig err=%v", err)

View File

@@ -187,6 +187,11 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
api.HandleError(resp, req, err)
return
}
for i := range appconfig.Entrances {
if appconfig.Entrances[i].AuthLevel == "" {
appconfig.Entrances[i].AuthLevel = "private"
}
}
appconfig.SharedEntrances, err = appconfig.GenSharedEntranceURL(req.Request.Context())
if err != nil {
@@ -214,6 +219,7 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
Namespace: am.Spec.AppNamespace,
Owner: am.Spec.AppOwner,
Entrances: appconfig.Entrances,
Ports: appconfig.Ports,
SharedEntrances: appconfig.SharedEntrances,
Icon: appconfig.Icon,
Settings: map[string]string{
@@ -274,6 +280,8 @@ func (h *Handler) listBackend(req *restful.Request, resp *restful.Response) {
}
if v, ok := appsMap[a.Name]; ok {
v.Spec.Settings = a.Spec.Settings
v.Spec.Entrances = a.Spec.Entrances
v.Spec.Ports = a.Spec.Ports
}
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/beclab/Olares/framework/app-service/pkg/appinstaller"
"github.com/beclab/Olares/framework/app-service/pkg/appstate"
"github.com/beclab/Olares/framework/app-service/pkg/client/clientset"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
@@ -520,6 +521,7 @@ type applicationPermission struct {
Permissions []permission `json:"permissions"`
}
// Deprecated
func (h *Handler) applicationPermissionList(req *restful.Request, resp *restful.Response) {
owner := req.Attribute(constants.UserContextAttribute).(string)
//token := req.HeaderParameter(constants.AuthorizationTokenKey)
@@ -572,46 +574,39 @@ func (h *Handler) applicationPermissionList(req *restful.Request, resp *restful.
func (h *Handler) getApplicationPermission(req *restful.Request, resp *restful.Response) {
app := req.PathParameter(ParamAppName)
owner := req.Attribute(constants.UserContextAttribute).(string)
client, err := dynamic.NewForConfig(h.kubeConfig)
name, err := apputils.FmtAppMgrName(app, owner, "")
if err != nil {
api.HandleError(resp, req, err)
return
}
var am v1alpha1.ApplicationManager
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
if err != nil {
api.HandleError(resp, req, err)
return
}
var appConfig appcfg.ApplicationConfig
err = am.GetAppConfig(&appConfig)
if err != nil {
klog.Errorf("Failed to get app config err=%v", err)
api.HandleError(resp, req, err)
return
}
var ret *applicationPermission
apClient := provider.NewApplicationPermissionRequest(client)
namespace := fmt.Sprintf("user-system-%s", owner)
aps, err := apClient.List(req.Request.Context(), namespace, metav1.ListOptions{})
if err != nil {
api.HandleError(resp, req, err)
return
}
for _, ap := range aps.Items {
if ap.Object == nil {
continue
}
appName, _, _ := unstructured.NestedString(ap.Object, "spec", "app")
if appName == app {
perms, _, _ := unstructured.NestedSlice(ap.Object, "spec", "permissions")
permissions := appinstaller.ParseAppPermission(appConfig.Permission)
for _, ap := range permissions {
if perms, ok := ap.([]appcfg.ProviderPermission); ok {
permissions := make([]permission, 0)
for _, p := range perms {
if perm, ok := p.(map[string]interface{}); ok {
ops := make([]string, 0)
for _, op := range perm["ops"].([]interface{}) {
if opStr, ok := op.(string); ok {
ops = append(ops, opStr)
}
}
permissions = append(permissions, permission{
DataType: perm["dataType"].(string),
Group: perm["group"].(string),
Version: perm["version"].(string),
Ops: ops,
})
}
permissions = append(permissions, permission{
DataType: p.ProviderName,
Group: p.AppName,
})
}
ret = &applicationPermission{
App: appName,
App: am.Spec.AppName,
Owner: owner,
Permissions: permissions,
}
@@ -642,6 +637,7 @@ type opApi struct {
URI string `json:"uri"`
}
// Deprecated
func (h *Handler) getProviderRegistry(req *restful.Request, resp *restful.Response) {
dataTypeReq := req.PathParameter(ParamDataType)
groupReq := req.PathParameter(ParamGroup)
@@ -708,56 +704,44 @@ func (h *Handler) getProviderRegistry(req *restful.Request, resp *restful.Respon
func (h *Handler) getApplicationProviderList(req *restful.Request, resp *restful.Response) {
owner := req.Attribute(constants.UserContextAttribute).(string)
app := req.PathParameter(ParamAppName)
client, err := dynamic.NewForConfig(h.kubeConfig)
name, err := apputils.FmtAppMgrName(app, owner, "")
if err != nil {
api.HandleError(resp, req, err)
return
}
var am v1alpha1.ApplicationManager
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
if err != nil {
api.HandleError(resp, req, err)
return
}
var appConfig appcfg.ApplicationConfig
err = am.GetAppConfig(&appConfig)
if err != nil {
klog.Errorf("Failed to get app config err=%v", err)
api.HandleError(resp, req, err)
return
}
ret := make([]providerRegistry, 0)
rClient := provider.NewRegistryRequest(client)
namespace := fmt.Sprintf("user-system-%s", owner)
prs, err := rClient.List(req.Request.Context(), namespace, metav1.ListOptions{})
if err != nil {
api.HandleError(resp, req, err)
return
}
for _, ap := range prs.Items {
if ap.Object == nil {
continue
}
deployment, _, _ := unstructured.NestedString(ap.Object, "spec", "deployment")
kind, _, _ := unstructured.NestedString(ap.Object, "spec", "kind")
if app == deployment && kind == "provider" {
dataType, _, _ := unstructured.NestedString(ap.Object, "spec", "dataType")
group, _, _ := unstructured.NestedString(ap.Object, "spec", "group")
description, _, _ := unstructured.NestedString(ap.Object, "spec", "description")
endpoint, _, _ := unstructured.NestedString(ap.Object, "spec", "endpoint")
ns, _, _ := unstructured.NestedString(ap.Object, "spec", "namespace")
version, _, _ := unstructured.NestedString(ap.Object, "spec", "version")
opApis := make([]opApi, 0)
opApiList, _, _ := unstructured.NestedSlice(ap.Object, "spec", "opApis")
for _, op := range opApiList {
if aop, ok := op.(map[string]interface{}); ok {
opApis = append(opApis, opApi{
Name: aop["name"].(string),
URI: aop["uri"].(string),
})
}
}
ret = append(ret, providerRegistry{
DataType: dataType,
Deployment: deployment,
Description: description,
Endpoint: endpoint,
Kind: kind,
Group: group,
Namespace: ns,
OpApis: opApis,
Version: version,
ns := am.Spec.AppNamespace
for _, ap := range appConfig.Provider {
dataType := ap.Name
endpoint := ap.Entrance
opApis := make([]opApi, 0)
for _, op := range ap.Paths {
opApis = append(opApis, opApi{
URI: op,
})
}
ret = append(ret, providerRegistry{
DataType: dataType,
Endpoint: endpoint,
Namespace: ns,
OpApis: opApis,
})
}
resp.WriteAsJson(ret)
}

View File

@@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
@@ -308,36 +307,21 @@ func (h *Handler) gpuLimitMutate(ctx context.Context, req *admissionv1.Admission
return resp
}
GPUType, err := h.findNvidiaGpuFromNodes(ctx)
if err != nil && !errors.Is(err, api.ErrGPUNodeNotFound) {
return h.sidecarWebhook.AdmissionError(req.UID, err)
}
GPUType := appcfg.GetSelectedGpuTypeValue()
// no gpu found, no need to inject env, just return.
if GPUType == "" {
if GPUType == "none" || GPUType == "" {
return resp
}
terminus, err := utils.GetTerminus(ctx, h.ctrlClient)
if err != nil {
return h.sidecarWebhook.AdmissionError(req.UID, err)
}
nvshareManagedMemory := ""
if terminus.Spec.Settings != nil {
nvshareManagedMemory = terminus.Spec.Settings[constants.EnvNvshareManagedMemory]
envs := []webhook.EnvKeyValue{
{
Key: constants.EnvGPUType,
Value: GPUType,
},
}
envs := []webhook.EnvKeyValue{}
if nvshareManagedMemory != "" {
envs = append(envs, webhook.EnvKeyValue{
Key: constants.EnvNvshareManagedMemory,
Value: nvshareManagedMemory,
})
}
envs = append(envs, webhook.EnvKeyValue{Key: "NVSHARE_DEBUG", Value: "1"})
patchBytes, err := webhook.CreatePatchForDeployment(tpl, req.Namespace, gpuRequired, GPUType, envs)
patchBytes, err := webhook.CreatePatchForDeployment(tpl, h.getGPUResourceTypeKey(GPUType), envs)
if err != nil {
klog.Errorf("create patch error %v", err)
return h.sidecarWebhook.AdmissionError(req.UID, err)
@@ -347,33 +331,17 @@ func (h *Handler) gpuLimitMutate(ctx context.Context, req *admissionv1.Admission
return resp
}
func (h *Handler) findNvidiaGpuFromNodes(ctx context.Context) (string, error) {
var nodes corev1.NodeList
err := h.ctrlClient.List(ctx, &nodes, &client.ListOptions{})
if err != nil {
return "", err
func (h *Handler) getGPUResourceTypeKey(gpuType string) string {
switch gpuType {
case utils.NvidiaCardType:
return constants.NvidiaGPU
case utils.GB10ChipType:
return constants.NvidiaGB10GPU
case utils.AmdApuCardType:
return constants.AMDAPU
default:
return ""
}
// return nvshare gpu or virtaitech gpu in priority
gtype := ""
for _, n := range nodes.Items {
if _, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
if _, ok = n.Status.Capacity[constants.NvshareGPU]; ok {
return constants.NvshareGPU, nil
}
gtype = constants.NvidiaGPU
}
if _, ok := n.Status.Capacity[constants.VirtAiTechVGPU]; ok {
return constants.VirtAiTechVGPU, nil
}
}
if gtype != "" {
return gtype, nil
}
return "", api.ErrGPUNodeNotFound
}
func (h *Handler) providerRegistryValidate(req *restful.Request, resp *restful.Response) {

View File

@@ -340,7 +340,9 @@ func GetClusterResource(kubeConfig *rest.Config, token string) (*prometheus.Clus
arches.Insert(n.Labels["kubernetes.io/arch"])
if quantity, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.VirtAiTechVGPU]; ok {
} else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.AMDAPU]; ok {
total += quantity.AsApproximateFloat64()
}
}

View File

@@ -254,21 +254,9 @@ func addServiceToContainer(c *restful.Container, handler *Handler) error {
Param(ws.PathParameter(ParamEntranceName, "the name of a application entrance")).
Returns(http.StatusOK, "Success to set the application entrance policy", nil))
ws.Route(ws.POST("/gpu/disable/managed-memory").
To(handler.disableGpuManagedMemory).
Doc("disable nvshare's managed memory ").
Metadata(restfulspec.KeyOpenAPITags, MODULE_TAGS).
Returns(http.StatusOK, "Success to disable", nil))
ws.Route(ws.POST("/gpu/enable/managed-memory").
To(handler.enableGpuManagedMemory).
Doc("enable nvshare's managed memory ").
Metadata(restfulspec.KeyOpenAPITags, MODULE_TAGS).
Returns(http.StatusOK, "Success to enable", nil))
ws.Route(ws.GET("/gpu/managed-memory").
To(handler.getManagedMemoryValue).
Doc("get nvshare's managed memory enabled or not").
ws.Route(ws.GET("/gpu/types").
To(handler.getGpuTypes).
Doc("get all gpu types in the cluster").
Metadata(restfulspec.KeyOpenAPITags, MODULE_TAGS).
Returns(http.StatusOK, "Success to get ", &ResultResponse{}))

View File

@@ -56,14 +56,19 @@ type AppSpec struct {
Developer string `yaml:"developer" json:"developer"`
RequiredMemory string `yaml:"requiredMemory" json:"requiredMemory"`
RequiredDisk string `yaml:"requiredDisk" json:"requiredDisk"`
SupportClient SupportClient `yaml:"supportClient" json:"supportClient"`
RequiredGPU string `yaml:"requiredGpu" json:"requiredGpu"`
RequiredCPU string `yaml:"requiredCpu" json:"requiredCpu"`
LimitedMemory string `yaml:"limitedMemory" json:"limitedMemory"`
LimitedDisk string `yaml:"limitedDisk" json:"limitedDisk"`
LimitedGPU string `yaml:"limitedGPU" json:"limitedGPU"`
LimitedCPU string `yaml:"limitedCPU" json:"limitedCPU"`
SupportClient SupportClient `yaml:"supportClient" json:"supportClient"`
RunAsUser bool `yaml:"runAsUser" json:"runAsUser"`
RunAsInternal bool `yaml:"runAsInternal" json:"runAsInternal"`
PodGPUConsumePolicy string `yaml:"podGpuConsumePolicy" json:"podGpuConsumePolicy"`
SubCharts []Chart `yaml:"subCharts" json:"subCharts"`
Hardware Hardware `yaml:"hardware" json:"hardware"`
SupportedGpu []any `yaml:"supportedGpu,omitempty" json:"supportedGpu,omitempty"`
}
type Hardware struct {
@@ -188,6 +193,17 @@ type Provider struct {
Verbs []string `yaml:"verbs" json:"verbs"`
}
type SpecialResource struct {
RequiredMemory *string `yaml:"requiredMemory,omitempty" json:"requiredMemory,omitempty"`
RequiredDisk *string `yaml:"requiredDisk,omitempty" json:"requiredDisk,omitempty"`
RequiredGPU *string `yaml:"requiredGpu,omitempty" json:"requiredGpu,omitempty"`
RequiredCPU *string `yaml:"requiredCpu,omitempty" json:"requiredCpu,omitempty"`
LimitedMemory *string `yaml:"limitedMemory,omitempty" json:"limitedMemory,omitempty"`
LimitedDisk *string `yaml:"limitedDisk,omitempty" json:"limitedDisk,omitempty"`
LimitedGPU *string `yaml:"limitedGPU,omitempty" json:"limitedGPU,omitempty"`
LimitedCPU *string `yaml:"limitedCPU,omitempty" json:"limitedCPU,omitempty"`
}
func (c *Chart) Namespace(owner string) string {
if c.Shared {
return fmt.Sprintf("%s-%s", c.Name, "shared")

View File

@@ -100,6 +100,7 @@ type ApplicationConfig struct {
PodsSelectors []metav1.LabelSelector
HardwareRequirement Hardware
SharedEntrances []v1alpha1.Entrance
SelectedGpuType string
}
func (c *ApplicationConfig) IsMiddleware() bool {
@@ -159,6 +160,13 @@ func (c *ApplicationConfig) GenSharedEntranceURL(ctx context.Context) ([]v1alpha
return app.GenSharedEntranceURL(ctx)
}
func (c *ApplicationConfig) GetSelectedGpuTypeValue() string {
if c.SelectedGpuType == "" {
return "none"
}
return c.SelectedGpuType
}
func (p *ProviderPermission) GetNamespace(ownerName string) string {
if p.Namespace != "" {
if p.Namespace == "user-space" || p.Namespace == "user-system" {

View File

@@ -51,6 +51,7 @@ var (
tapr.TypeElasticsearch.String(),
tapr.TypeMariaDB.String(),
tapr.TypeMySQL.String(),
tapr.TypeClickHouse.String(),
}
)
@@ -563,7 +564,7 @@ func (h *HelmOps) WaitForStartUp() (bool, error) {
}
return true, nil
}
if errors.Is(err, errcode.ErrPodPending) {
if errors.Is(err, errcode.ErrPodPending) || errors.Is(err, errcode.ErrServerSidePodPending) {
return false, err
}
@@ -575,11 +576,41 @@ func (h *HelmOps) WaitForStartUp() (bool, error) {
}
func (h *HelmOps) isStartUp() (bool, error) {
pods, err := h.findAppSelectedPods()
if h.app.IsV2() && h.app.IsMultiCharts() {
serverPods, err := h.findServerPods()
if err != nil {
return false, err
}
podNames := make([]string, 0)
for _, p := range serverPods {
podNames = append(podNames, p.Name)
}
klog.Infof("podSErvers: %v", podNames)
serverStarted, err := checkIfStartup(serverPods, true)
if err != nil {
klog.Errorf("v2 app %s server pods not ready: %v", h.app.AppName, err)
return false, err
}
if !serverStarted {
klog.Infof("v2 app %s server pods not started yet, waiting...", h.app.AppName)
return false, nil
}
klog.Infof("v2 app %s server pods started, checking client pods", h.app.AppName)
}
clientPods, err := h.findV1OrClientPods()
if err != nil {
return false, err
}
return checkIfStartup(pods)
clientStarted, err := checkIfStartup(clientPods, false)
if err != nil {
return false, err
}
return clientStarted, nil
}
func (h *HelmOps) findAppSelectedPods() (*corev1.PodList, error) {
@@ -609,15 +640,49 @@ func (h *HelmOps) findAppSelectedPods() (*corev1.PodList, error) {
return pods, nil
}
func checkIfStartup(pods *corev1.PodList) (bool, error) {
if len(pods.Items) == 0 {
func (h *HelmOps) findV1OrClientPods() ([]corev1.Pod, error) {
podList, err := h.client.KubeClient.Kubernetes().CoreV1().Pods(h.app.Namespace).List(h.ctx, metav1.ListOptions{})
if err != nil {
klog.Errorf("app %s get pods err %v", h.app.AppName, err)
return nil, err
}
return podList.Items, nil
}
func (h *HelmOps) findServerPods() ([]corev1.Pod, error) {
pods := make([]corev1.Pod, 0)
for _, c := range h.app.SubCharts {
if !c.Shared {
continue
}
ns := c.Namespace(h.app.OwnerName)
podList, err := h.client.KubeClient.Kubernetes().CoreV1().Pods(ns).List(h.ctx, metav1.ListOptions{})
if err != nil {
klog.Errorf("app %s get pods err %v", h.app.AppName, err)
return nil, err
}
pods = append(pods, podList.Items...)
}
return pods, nil
}
func checkIfStartup(pods []corev1.Pod, isServerSide bool) (bool, error) {
if len(pods) == 0 {
return false, errors.New("no pod found")
}
for _, pod := range pods.Items {
startedPods := 0
totalPods := len(pods)
for _, pod := range pods {
creationTime := pod.GetCreationTimestamp()
pendingDuration := time.Since(creationTime.Time)
if pod.Status.Phase == corev1.PodPending && pendingDuration > time.Minute*10 {
if isServerSide {
return false, errcode.ErrServerSidePodPending
}
return false, errcode.ErrPodPending
}
totalContainers := len(pod.Spec.Containers)
@@ -629,9 +694,12 @@ func checkIfStartup(pods *corev1.PodList) (bool, error) {
}
}
if startedContainers == totalContainers {
return true, nil
startedPods++
}
}
if totalPods == startedPods {
return true, nil
}
return false, nil
}
@@ -684,7 +752,7 @@ func getApplicationPolicy(policies []appcfg.AppPolicy, entrances []appv1alpha1.E
return string(policyStr), nil
}
func parseAppPermission(data []appcfg.AppPermission) []appcfg.AppPermission {
func ParseAppPermission(data []appcfg.AppPermission) []appcfg.AppPermission {
permissions := make([]appcfg.AppPermission, 0)
for _, p := range data {
switch perm := p.(type) {
@@ -795,7 +863,7 @@ func (h *HelmOps) Install() error {
return nil
}
ok, err := h.WaitForStartUp()
if err != nil && errors.Is(err, errcode.ErrPodPending) {
if err != nil && (errors.Is(err, errcode.ErrPodPending) || errors.Is(err, errcode.ErrServerSidePodPending)) {
return err
}
if !ok {

View File

@@ -78,7 +78,7 @@ func (h *HelmOps) Uninstall_(client kubernetes.Interface, actionConfig *action.C
return err
}
h.app.Permission = parseAppPermission(h.app.Permission)
h.app.Permission = ParseAppPermission(h.app.Permission)
var perm []appcfg.ProviderPermission
for _, p := range h.app.Permission {
if t, ok := p.([]appcfg.ProviderPermission); ok {

View File

@@ -50,7 +50,7 @@ func (h *HelmOps) SetValues() (values map[string]interface{}, err error) {
values["domain"] = entries
userspace := make(map[string]interface{})
h.app.Permission = parseAppPermission(h.app.Permission)
h.app.Permission = ParseAppPermission(h.app.Permission)
for _, p := range h.app.Permission {
switch perm := p.(type) {
case appcfg.AppDataPermission, appcfg.AppCachePermission, appcfg.UserDataPermission:
@@ -170,17 +170,12 @@ func (h *HelmOps) SetValues() (values map[string]interface{}, err error) {
values["cluster"] = map[string]interface{}{
"arch": arch,
}
gpuType, err := utils.FindGpuTypeFromNodes(nodes)
if err != nil {
klog.Errorf("Failed to get gpuType err=%v", err)
return values, err
}
values["GPU"] = map[string]interface{}{
"Type": gpuType,
"Type": h.app.GetSelectedGpuTypeValue(),
"Cuda": os.Getenv("OLARES_SYSTEM_CUDA_VERSION"),
}
values["gpu"] = gpuType
values["gpu"] = h.app.GetSelectedGpuTypeValue()
if h.app.OIDC.Enabled {
err = h.createOIDCClient(values, zone, h.app.Namespace)

View File

@@ -51,7 +51,7 @@ func (h *HelmOpsV2) ApplyEnv() error {
}
ok, err := h.WaitForStartUp()
if err != nil && errors.Is(err, errcode.ErrPodPending) {
if err != nil && (errors.Is(err, errcode.ErrPodPending) || errors.Is(err, errcode.ErrServerSidePodPending)) {
return err
}

View File

@@ -119,7 +119,7 @@ func (h *HelmOpsV2) Install() error {
return nil
}
ok, err := h.WaitForStartUp()
if err != nil && errors.Is(err, errcode.ErrPodPending) {
if err != nil && (errors.Is(err, errcode.ErrPodPending) || errors.Is(err, errcode.ErrServerSidePodPending)) {
klog.Errorf("App %s is pending, err=%v", h.App().AppName, err)
return err
}

View File

@@ -91,7 +91,7 @@ func (h *HelmOpsV2) Upgrade() error {
}
ok, err := h.WaitForStartUp()
if err != nil && errors.Is(err, errcode.ErrPodPending) {
if err != nil && (errors.Is(err, errcode.ErrPodPending) || errors.Is(err, errcode.ErrServerSidePodPending)) {
return err
}

View File

@@ -13,9 +13,9 @@ import (
"github.com/beclab/Olares/framework/app-service/pkg/images"
"github.com/beclab/Olares/framework/app-service/pkg/kubesphere"
"github.com/beclab/Olares/framework/app-service/pkg/utils"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -47,6 +47,29 @@ func (r *downloadingInProgressApp) WaitAsync(ctx context.Context) {
return
}
// Check Kubernetes request resources before transitioning to Installing
var appConfig *appcfg.ApplicationConfig
if err := json.Unmarshal([]byte(r.manager.Spec.Config), &appConfig); err != nil {
klog.Errorf("failed to unmarshal app config for %s: %v", r.manager.Spec.AppName, err)
updateErr := r.updateStatus(context.TODO(), r.manager, appsv1.InstallFailed, nil, fmt.Sprintf("invalid app config: %v", err), "")
if updateErr != nil {
klog.Errorf("update app manager %s to %s state failed %v", r.manager.Name, appsv1.InstallFailed.String(), updateErr)
}
return
}
_, conditionType, checkErr := apputils.CheckAppK8sRequestResource(appConfig, r.manager.Spec.OpType)
if checkErr != nil {
klog.Errorf("k8s request resource check failed for app %s: %v", r.manager.Spec.AppName, checkErr)
opRecord := makeRecord(r.manager, appsv1.InstallFailed, checkErr.Error())
updateErr := r.updateStatus(context.TODO(), r.manager, appsv1.InstallFailed, opRecord, checkErr.Error(), string(conditionType))
if updateErr != nil {
klog.Errorf("update app manager %s to %s state failed %v", r.manager.Name, appsv1.InstallFailed.String(), updateErr)
}
return
}
updateErr := r.updateStatus(context.TODO(), r.manager, appsv1.Installing, nil, appsv1.Installing.String(), "")
if updateErr != nil {
klog.Errorf("update app manager %s to %s state failed %v", r.manager.Name, appsv1.Installing.String(), updateErr)
@@ -152,19 +175,8 @@ func (p *DownloadingApp) exec(ctx context.Context) error {
},
}
var nodes corev1.NodeList
err = p.client.List(ctx, &nodes, &client.ListOptions{})
if err != nil {
klog.Errorf("list node failed %v", err)
return err
}
gpuType, err := utils.FindGpuTypeFromNodes(&nodes)
if err != nil {
klog.Errorf("get gpu type failed %v", gpuType)
return err
}
values["GPU"] = map[string]interface{}{
"Type": gpuType,
"Type": appConfig.GetSelectedGpuTypeValue(),
"Cuda": os.Getenv("OLARES_SYSTEM_CUDA_VERSION"),
}

View File

@@ -16,6 +16,7 @@ import (
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -104,8 +105,37 @@ func (p *InstallingApp) Exec(ctx context.Context) (StatefulInProgressApp, error)
err = ops.Install()
if err != nil {
klog.Errorf("install app %s failed %v", p.manager.Spec.AppName, err)
if errors.Is(err, errcode.ErrPodPending) {
if errors.Is(err, errcode.ErrServerSidePodPending) {
p.finally = func() {
klog.Infof("app %s server side pods is pending, set stop-all annotation and update app state to stopping", p.manager.Spec.AppName)
var am appsv1.ApplicationManager
if err := p.client.Get(context.TODO(), types.NamespacedName{Name: p.manager.Name}, &am); err != nil {
klog.Errorf("failed to get application manager: %v", err)
return
}
if am.Annotations == nil {
am.Annotations = make(map[string]string)
}
am.Annotations[api.AppStopAllKey] = "true"
if err := p.client.Update(ctx, &am); err != nil {
klog.Errorf("failed to set stop-all annotation: %v", err)
return
}
updateErr := p.updateStatus(ctx, &am, appsv1.Stopping, nil, err.Error(), constants.AppUnschedulable)
if updateErr != nil {
klog.Errorf("update status failed %v", updateErr)
return
}
}
return
}
if errors.Is(err, errcode.ErrPodPending) {
p.finally = func() {
klog.Infof("app %s pods is still pending, update app state to stopping", p.manager.Spec.AppName)
updateErr := p.updateStatus(context.TODO(), p.manager, appsv1.Stopping, nil, err.Error(), constants.AppUnschedulable)

View File

@@ -2,13 +2,11 @@ package appstate
import (
"context"
"encoding/json"
"fmt"
"time"
appsv1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
"github.com/beclab/Olares/framework/app-service/pkg/kubeblocks"
"github.com/beclab/Olares/framework/app-service/pkg/users/userspace"
@@ -60,41 +58,24 @@ func (p *ResumingApp) Exec(ctx context.Context) (StatefulInProgressApp, error) {
}
func (p *ResumingApp) exec(ctx context.Context) error {
err := suspendOrResumeApp(ctx, p.client, p.manager, int32(1))
if err != nil {
klog.Errorf("resume %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("resume app %s failed %w", p.manager.Spec.AppName, err)
}
// If resume-all is requested, also resume v2 server-side shared charts by scaling them up
if p.manager.Annotations[api.AppResumeAllKey] == "true" {
var appCfg *appcfg.ApplicationConfig
if err := json.Unmarshal([]byte(p.manager.Spec.Config), &appCfg); err != nil {
klog.Errorf("unmarshal to appConfig failed %v", err)
return err
// Check if resume-all is requested for V2 apps to also resume server-side shared charts
resumeServer := p.manager.Annotations[api.AppResumeAllKey] == "true"
if resumeServer {
err := resumeV2AppAll(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("resume v2 app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("resume v2 app %s failed %w", p.manager.Spec.AppName, err)
}
if appCfg != nil && appCfg.IsV2() && appCfg.HasClusterSharedCharts() {
for _, chart := range appCfg.SubCharts {
if !chart.Shared {
continue
}
ns := chart.Namespace(appCfg.OwnerName)
// create a shallow copy with target namespace/name for scaling logic
amCopy := p.manager.DeepCopy()
amCopy.Spec.AppNamespace = ns
amCopy.Spec.AppName = chart.Name
klog.Infof("resume-amCopy.Spec.AppNamespace: %s", ns)
klog.Infof("resume-amCopy.Spec.AppName: %s", chart.Name)
if err := suspendOrResumeApp(ctx, p.client, amCopy, int32(1)); err != nil {
klog.Errorf("failed to resume shared chart %s in namespace %s: %v", chart.Name, ns, err)
return err
}
}
} else {
err := resumeV1AppOrV2AppClient(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("resume v2 app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("resume v2 app %s failed %w", p.manager.Spec.AppName, err)
}
}
if p.manager.Spec.Type == "middleware" && userspace.IsKbMiddlewares(p.manager.Spec.AppName) {
err = p.execMiddleware(ctx)
err := p.execMiddleware(ctx)
if err != nil {
klog.Errorf("failed to resume middleware %s,err=%v", p.manager.Spec.AppName, err)
return err

View File

@@ -2,10 +2,13 @@ package appstate
import (
"context"
"fmt"
"time"
appsv1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/kubeblocks"
"github.com/beclab/Olares/framework/app-service/pkg/users/userspace"
kbopv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1"
"k8s.io/klog/v2"
@@ -44,20 +47,30 @@ func (p *SuspendFailedApp) Exec(ctx context.Context) (StatefulInProgressApp, err
}
func (p *SuspendFailedApp) StateReconcile(ctx context.Context) error {
err := suspendOrResumeApp(ctx, p.client, p.manager, int32(0))
if err != nil {
klog.Errorf("stop-failed-app %s state reconcile failed %v", p.manager.Spec.AppName, err)
return err
stopServer := p.manager.Annotations[api.AppStopAllKey] == "true"
if stopServer {
err := suspendV2AppAll(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("suspend v2 app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("suspend v2 app %s failed %w", p.manager.Spec.AppName, err)
}
} else {
err := suspendV1AppOrV2Client(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("suspend app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("suspend app %s failed %w", p.manager.Spec.AppName, err)
}
}
if p.manager.Spec.Type == "middleware" {
if p.manager.Spec.Type == "middleware" && userspace.IsKbMiddlewares(p.manager.Spec.AppName) {
op := kubeblocks.NewOperation(ctx, kbopv1alpha1.StopType, p.manager, p.client)
err = op.Stop()
err := op.Stop()
if err != nil {
klog.Errorf("stop-failed-middleware %s state reconcile failed %v", p.manager.Spec.AppName, err)
return err
}
}
return err
return nil
}
func (p *SuspendFailedApp) Cancel(ctx context.Context) error {

View File

@@ -2,17 +2,17 @@ package appstate
import (
"context"
"encoding/json"
"fmt"
"strconv"
"time"
appsv1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/beclab/Olares/framework/app-service/pkg/constants"
"github.com/beclab/Olares/framework/app-service/pkg/kubeblocks"
"github.com/beclab/Olares/framework/app-service/pkg/users/userspace"
"github.com/beclab/Olares/framework/app-service/pkg/utils"
apputils "github.com/beclab/Olares/framework/app-service/pkg/utils/app"
kbopv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -78,35 +78,68 @@ func (p *SuspendingApp) Exec(ctx context.Context) (StatefulInProgressApp, error)
}
func (p *SuspendingApp) exec(ctx context.Context) error {
err := suspendOrResumeApp(ctx, p.client, p.manager, int32(0))
if err != nil {
klog.Errorf("suspend %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("suspend app %s failed %w", p.manager.Spec.AppName, err)
}
// If stop-all is requested, also stop v2 server-side shared charts by scaling them down
if p.manager.Annotations[api.AppStopAllKey] == "true" {
var appCfg *appcfg.ApplicationConfig
if err := json.Unmarshal([]byte(p.manager.Spec.Config), &appCfg); err != nil {
klog.Errorf("unmarshal to appConfig failed %v", err)
return err
// Check if stop-all is requested for V2 apps to also stop server-side shared charts
stopServer := p.manager.Annotations[api.AppStopAllKey] == "true"
if stopServer {
err := suspendV2AppAll(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("suspend v2 app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("suspend v2 app %s failed %w", p.manager.Spec.AppName, err)
}
if appCfg != nil && appCfg.IsV2() && appCfg.HasClusterSharedCharts() {
for _, chart := range appCfg.SubCharts {
if !chart.Shared {
} else {
err := suspendV1AppOrV2Client(ctx, p.client, p.manager)
if err != nil {
klog.Errorf("suspend app %s %s failed %v", p.manager.Spec.Type, p.manager.Spec.AppName, err)
return fmt.Errorf("suspend app %s failed %w", p.manager.Spec.AppName, err)
}
}
if stopServer {
// For V2 cluster-scoped apps, when server is down, stop all other users' clients
// because they share the same server and cannot function without it
klog.Infof("stopping other users' clients for v2 app %s", p.manager.Spec.AppName)
var appManagerList appsv1.ApplicationManagerList
if err := p.client.List(ctx, &appManagerList); err != nil {
klog.Errorf("failed to list application managers: %v", err)
} else {
// find all ApplicationManagers with same AppName but different AppOwner
for _, am := range appManagerList.Items {
// Skip if same owner (already handled) or different app
if am.Spec.AppName != p.manager.Spec.AppName || am.Spec.AppOwner == p.manager.Spec.AppOwner {
continue
}
ns := chart.Namespace(appCfg.OwnerName)
// create a shallow copy with target namespace/name for scaling logic
amCopy := p.manager.DeepCopy()
amCopy.Spec.AppNamespace = ns
amCopy.Spec.AppName = chart.Name
klog.Infof("amCopy.Spec.AppNamespace: %s", ns)
klog.Infof("amCopy.Spec.AppName: %s", chart.Name)
if err := suspendOrResumeApp(ctx, p.client, amCopy, int32(0)); err != nil {
klog.Errorf("failed to stop shared chart %s in namespace %s: %v", chart.Name, ns, err)
if am.Spec.Type != appsv1.App && am.Spec.Type != appsv1.Middleware {
continue
}
if am.Status.State == appsv1.Stopped || am.Status.State == appsv1.Stopping {
klog.Infof("app %s owner %s already in stopped/stopping state, skip", am.Spec.AppName, am.Spec.AppOwner)
continue
}
if !IsOperationAllowed(am.Status.State, appsv1.StopOp) {
klog.Infof("app %s owner %s not allowed do stop operation, skip", am.Spec.AppName, am.Spec.AppOwner)
continue
}
opID := strconv.FormatInt(time.Now().Unix(), 10)
now := metav1.Now()
status := appsv1.ApplicationManagerStatus{
OpType: appsv1.StopOp,
OpID: opID,
State: appsv1.Stopping,
StatusTime: &now,
UpdateTime: &now,
Reason: p.manager.Status.Reason,
Message: p.manager.Status.Message,
}
if _, err := apputils.UpdateAppMgrStatus(am.Name, status); err != nil {
return err
}
klog.Infof("stopping client for user %s, app %s", am.Spec.AppOwner, am.Spec.AppName)
}
}
}

View File

@@ -22,7 +22,6 @@ import (
"github.com/pkg/errors"
"helm.sh/helm/v3/pkg/action"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -214,19 +213,8 @@ func (p *UpgradingApp) exec(ctx context.Context) error {
"username": p.manager.Spec.AppOwner,
},
}
var nodes corev1.NodeList
err = p.client.List(ctx, &nodes, &client.ListOptions{})
if err != nil {
klog.Errorf("list node failed %v", err)
return err
}
gpuType, err := utils.FindGpuTypeFromNodes(&nodes)
if err != nil {
klog.Errorf("get gpu type failed %v", gpuType)
return err
}
values["GPU"] = map[string]interface{}{
"Type": gpuType,
"Type": appConfig.GetSelectedGpuTypeValue(),
"Cuda": os.Getenv("OLARES_SYSTEM_CUDA_VERSION"),
}

View File

@@ -2,10 +2,13 @@ package appstate
import (
"context"
"encoding/json"
"fmt"
appv1alpha1 "github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
"github.com/beclab/Olares/framework/app-service/pkg/apiserver/api"
"github.com/beclab/Olares/framework/app-service/pkg/appcfg"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -21,25 +24,25 @@ import (
const suspendAnnotation = "bytetrade.io/suspend-by"
const suspendCauseAnnotation = "bytetrade.io/suspend-cause"
func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager, replicas int32) error {
suspend := func(list client.ObjectList) error {
namespace := am.Spec.AppNamespace
err := cli.List(ctx, list, client.InNamespace(namespace))
if err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("Failed to get workload namespace=%s err=%v", namespace, err)
// suspendOrResumeApp suspends or resumes an application.
func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager, replicas int32, stopOrResumeServer bool) error {
suspendOrResume := func(list client.ObjectList, targetNamespace, targetAppName string) error {
err := cli.List(ctx, list, client.InNamespace(targetNamespace))
if err != nil {
klog.Errorf("Failed to get workload namespace=%s err=%v", targetNamespace, err)
return err
}
listObjects, err := apimeta.ExtractList(list)
if err != nil {
klog.Errorf("Failed to extract list namespace=%s err=%v", namespace, err)
klog.Errorf("Failed to extract list namespace=%s err=%v", targetNamespace, err)
return err
}
check := func(appName, deployName string) bool {
if namespace == fmt.Sprintf("user-space-%s", am.Spec.AppOwner) ||
namespace == fmt.Sprintf("user-system-%s", am.Spec.AppOwner) ||
namespace == "os-platform" ||
namespace == "os-framework" {
if targetNamespace == fmt.Sprintf("user-space-%s", am.Spec.AppOwner) ||
targetNamespace == fmt.Sprintf("user-system-%s", am.Spec.AppOwner) ||
targetNamespace == "os-platform" ||
targetNamespace == "os-framework" {
if appName == deployName {
return true
}
@@ -54,7 +57,7 @@ func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.
workloadName := ""
switch workload := w.(type) {
case *appsv1.Deployment:
if check(am.Spec.AppName, workload.Name) {
if check(targetAppName, workload.Name) {
if workload.Annotations == nil {
workload.Annotations = make(map[string]string)
}
@@ -64,7 +67,7 @@ func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.
workloadName = workload.Namespace + "/" + workload.Name
}
case *appsv1.StatefulSet:
if check(am.Spec.AppName, workload.Name) {
if check(targetAppName, workload.Name) {
if workload.Annotations == nil {
workload.Annotations = make(map[string]string)
}
@@ -92,15 +95,79 @@ func suspendOrResumeApp(ctx context.Context, cli client.Client, am *appv1alpha1.
} // end of suspend func
var deploymentList appsv1.DeploymentList
err := suspend(&deploymentList)
err := suspendOrResume(&deploymentList, am.Spec.AppNamespace, am.Spec.AppName)
if err != nil {
return err
}
var stsList appsv1.StatefulSetList
err = suspend(&stsList)
err = suspendOrResume(&stsList, am.Spec.AppNamespace, am.Spec.AppName)
if err != nil {
return err
}
return err
// If stopOrResumeServer is true, also suspend/resume shared server charts for V2 apps
if stopOrResumeServer {
var appCfg *appcfg.ApplicationConfig
if err := json.Unmarshal([]byte(am.Spec.Config), &appCfg); err != nil {
klog.Warningf("failed to unmarshal app config for stopServer check: %v", err)
return err
}
if appCfg != nil && appCfg.IsV2() && appCfg.HasClusterSharedCharts() {
for _, chart := range appCfg.SubCharts {
if !chart.Shared {
continue
}
ns := chart.Namespace(am.Spec.AppOwner)
if replicas == 0 {
klog.Infof("suspending shared chart %s in namespace %s", chart.Name, ns)
} else {
klog.Infof("resuming shared chart %s in namespace %s", chart.Name, ns)
}
var sharedDeploymentList appsv1.DeploymentList
if err := suspendOrResume(&sharedDeploymentList, ns, chart.Name); err != nil {
klog.Errorf("failed to scale deployments in shared chart %s namespace %s: %v", chart.Name, ns, err)
return err
}
var sharedStsList appsv1.StatefulSetList
if err := suspendOrResume(&sharedStsList, ns, chart.Name); err != nil {
klog.Errorf("failed to scale statefulsets in shared chart %s namespace %s: %v", chart.Name, ns, err)
return err
}
}
}
// Reset the stop-all/resume-all annotation after processing
if am.Annotations != nil {
delete(am.Annotations, api.AppStopAllKey)
delete(am.Annotations, api.AppResumeAllKey)
if err := cli.Update(ctx, am); err != nil {
klog.Warningf("failed to reset stop-all/resume-all annotations for app=%s owner=%s: %v", am.Spec.AppName, am.Spec.AppOwner, err)
// Don't return error, operation already succeeded
}
}
}
return nil
}
func suspendV1AppOrV2Client(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager) error {
return suspendOrResumeApp(ctx, cli, am, 0, false)
}
func suspendV2AppAll(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager) error {
return suspendOrResumeApp(ctx, cli, am, 0, true)
}
func resumeV1AppOrV2AppClient(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager) error {
return suspendOrResumeApp(ctx, cli, am, 1, false)
}
func resumeV2AppAll(ctx context.Context, cli client.Client, am *appv1alpha1.ApplicationManager) error {
return suspendOrResumeApp(ctx, cli, am, 1, true)
}
func isStartUp(am *appv1alpha1.ApplicationManager, cli client.Client) (bool, error) {

View File

@@ -78,13 +78,15 @@ const (
SidecarInitContainerName = "olares-sidecar-init"
EnvoyConfigWorkDirName = "envoy-config"
ByteTradeAuthor = "bytetrade.io"
NvshareGPU = "nvshare.com/gpu"
NvidiaGPU = "nvidia.com/gpu"
VirtAiTechVGPU = "virtaitech.com/gpu"
PatchOpAdd = "add"
PatchOpReplace = "replace"
EnvNvshareManagedMemory = "NVSHARE_MANAGED_MEMORY"
ByteTradeAuthor = "bytetrade.io"
PatchOpAdd = "add"
PatchOpReplace = "replace"
EnvGPUType = "GPU_TYPE"
// gpu resource keys
NvidiaGPU = "nvidia.com/gpu"
NvidiaGB10GPU = "nvidia.com/gb10"
AMDAPU = "amd.com/apu"
AuthorizationLevelOfPublic = "public"
AuthorizationLevelOfPrivate = "private"

View File

@@ -3,5 +3,6 @@ package errcode
import "errors"
var (
ErrPodPending = errors.New("pod is pending")
ErrServerSidePodPending = errors.New("server side pod is pending")
ErrPodPending = errors.New("pod is pending")
)

View File

@@ -160,6 +160,7 @@ func PublishAppEventToQueue(p utils.EventParams) {
return p.RawAppName
}(),
Title: p.Title,
Icon: p.Icon,
Reason: p.Reason,
Message: p.Message,
SharedEntrances: p.SharedEntrances,

View File

@@ -233,6 +233,7 @@ func (imc *ImageManagerClient) updateProgress(ctx context.Context, am *appv1alph
RawAppName: am.Spec.RawAppName,
Type: am.Spec.Type.String(),
Title: apputils.AppTitle(am.Spec.Config),
Icon: apputils.AppIcon(am.Spec.Config),
})
}
klog.Infof("app %s download progress.... %v", am.Spec.AppName, progressStr)

View File

@@ -42,6 +42,9 @@ const (
// TypeMySQL indicates the middleware is mysql
TypeMySQL MiddlewareType = "mysql"
// TypeClickHouse indicates the middleware is ClickHouse
TypeClickHouse MiddlewareType = "clickhouse"
)
func (mr MiddlewareType) String() string {
@@ -323,6 +326,27 @@ func Apply(middleware *Middleware, kubeConfig *rest.Config, appName, appNamespac
}
klog.Infof("values.mysql: %v", vals["mysql"])
}
if middleware.ClickHouse != nil {
username := fmt.Sprintf("%s-%s-%s", middleware.ClickHouse.Username, ownerName, appName)
err := process(kubeConfig, appName, appNamespace, namespace, username, TypeClickHouse, ownerName, middleware)
if err != nil {
return err
}
resp, err := getMiddlewareRequest(TypeClickHouse)
if err != nil {
klog.Errorf("failed to get clickHouse middleware request info %v", err)
return err
}
vals["clickhouse"] = map[string]interface{}{
"host": resp.Host,
"port": resp.Port,
"username": resp.UserName,
"password": resp.Password,
"databases": resp.Databases,
}
klog.Infof("values.clickhouse: %v", vals["clickhouse"])
}
return nil
}
@@ -383,6 +407,8 @@ func getPassword(middleware *Middleware, middlewareType MiddlewareType) (string,
return middleware.MariaDB.Password, nil
case TypeMySQL:
return middleware.MySQL.Password, nil
case TypeClickHouse:
return middleware.ClickHouse.Password, nil
}
return "", fmt.Errorf("unsupported middleware type %v", middlewareType)
}

View File

@@ -287,6 +287,32 @@ spec:
user: {{ .Middleware.Username }}
`
const clickHouseRequest = `apiVersion: apr.bytetrade.io/v1alpha1
kind: MiddlewareRequest
metadata:
name: {{ .AppName }}-clickhouse
namespace: {{ .Namespace }}
spec:
app: {{ .AppName }}
appNamespace: {{ .AppNamespace }}
middleware: clickhouse
clickhouse:
databases:
{{- range $k, $v := .Middleware.Databases }}
- name: {{ $v.Name }}
{{- end }}
password:
{{- if not (eq .Middleware.Password "") }}
value: {{ .Middleware.Password }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .AppName }}-{{ .Namespace }}-clickhouse-password
key: "password"
{{- end }}
user: {{ .Middleware.Username }}
`
type RequestParams struct {
MiddlewareType MiddlewareType
AppName string
@@ -318,6 +344,8 @@ func GenMiddleRequest(p RequestParams) ([]byte, error) {
return genMariadbRequest(p)
case TypeMySQL:
return genMysqlRequest(p)
case TypeClickHouse:
return genClickHouseRequest(p)
default:
return []byte{}, fmt.Errorf("unsupported middleware type: %s", p.MiddlewareType)
}
@@ -512,3 +540,22 @@ func genElasticsearchRequest(p RequestParams) ([]byte, error) {
}
return renderTemplate(elasticsearchRequest, data)
}
func genClickHouseRequest(p RequestParams) ([]byte, error) {
data := struct {
AppName string
AppNamespace string
Namespace string
Middleware *ClickHouseConfig
}{
AppName: p.AppName,
AppNamespace: p.AppNamespace,
Namespace: p.Namespace,
Middleware: &ClickHouseConfig{
Username: p.Username,
Password: p.Password,
Databases: p.Middleware.ClickHouse.Databases,
},
}
return renderTemplate(clickHouseRequest, data)
}

View File

@@ -12,6 +12,7 @@ type Middleware struct {
MariaDB *MariaDBConfig `yaml:"mariadb,omitempty"`
MySQL *MySQLConfig `yaml:"mysql,omitempty"`
Argo *ArgoConfig `yaml:"argo,omitempty"`
ClickHouse *ClickHouseConfig `yaml:"clickHouse,omitempty"`
}
// Database specify database name and if distributed.
@@ -92,6 +93,13 @@ type MySQLConfig struct {
Databases []Database `yaml:"databases" json:"databases"`
}
// ClickHouseConfig contains fields for clickhouse config.
type ClickHouseConfig struct {
Username string `yaml:"username" json:"username"`
Password string `yaml:"password,omitempty" json:"password"`
Databases []Database `yaml:"databases" json:"databases"`
}
type NatsConfig struct {
Username string `yaml:"username" json:"username"`
Password string `yaml:"password,omitempty" json:"password,omitempty"`

View File

@@ -273,11 +273,7 @@ func (c *Creator) installSysApps(ctx context.Context, bflPod *corev1.Pod) error
"arch": arch,
}
gpuType, err := utils.FindGpuTypeFromNodes(&nodes)
if err != nil {
return err
}
vals["gpu"] = gpuType
vals["gpu"] = "none" // unused currently
userIndex, userSubnet, err := c.getUserSubnet(ctx)
if err != nil {

View File

@@ -16,6 +16,7 @@ import (
corev1 "k8s.io/api/core/v1"
sysv1alpha1 "github.com/beclab/Olares/framework/app-service/api/sys.bytetrade.io/v1alpha1"
"github.com/go-viper/mapstructure/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/beclab/Olares/framework/app-service/api/app.bytetrade.io/v1alpha1"
@@ -674,6 +675,7 @@ type ConfigOptions struct {
MarketSource string
IsAdmin bool
RawAppName string
SelectedGpu string
}
// GetAppConfig get app installation configuration from app store
@@ -740,7 +742,7 @@ func getAppConfigFromRepo(ctx context.Context, options *ConfigOptions) (*appcfg.
return getAppConfigFromConfigurationFile(options, chartPath)
}
func toApplicationConfig(app, chart, rawAppName string, cfg *appcfg.AppConfiguration) (*appcfg.ApplicationConfig, string, error) {
func toApplicationConfig(app, chart, rawAppName, selectedGpu string, cfg *appcfg.AppConfiguration) (*appcfg.ApplicationConfig, string, error) {
var permission []appcfg.AppPermission
if cfg.Permission.AppData {
permission = append(permission, appcfg.AppDataRW)
@@ -788,6 +790,57 @@ func toApplicationConfig(app, chart, rawAppName string, cfg *appcfg.AppConfigura
return nil, chart, err
}
// set suppertedGpu to ["nvidia","nvidia-gb10"] by default
if len(cfg.Spec.SupportedGpu) == 0 {
cfg.Spec.SupportedGpu = []interface{}{utils.NvidiaCardType, utils.GB10ChipType}
}
// try to get selected GPU type special resource requirement
if selectedGpu != "" {
found := false
for _, supportedGpu := range cfg.Spec.SupportedGpu {
if str, ok := supportedGpu.(string); ok && str == selectedGpu {
found = true
break
}
if supportedGpuResourceMap, ok := supportedGpu.(map[string]interface{}); ok {
if resourceRequirement, ok := supportedGpuResourceMap[selectedGpu].(map[string]interface{}); ok {
found = true
var specialResource appcfg.SpecialResource
err := mapstructure.Decode(resourceRequirement, &specialResource)
if err != nil {
return nil, chart, fmt.Errorf("failed to decode special resource for selected GPU type %s: %v", selectedGpu, err)
}
for _, resSetter := range []struct {
v **resource.Quantity
s *string
}{
{v: &mem, s: specialResource.RequiredMemory},
{v: &disk, s: specialResource.RequiredDisk},
{v: &cpu, s: specialResource.RequiredCPU},
{v: &gpu, s: specialResource.RequiredGPU},
} {
if resSetter.s != nil && *resSetter.s != "" {
*resSetter.v, err = valuePtr(resource.ParseQuantity(*resSetter.s))
if err != nil {
return nil, chart, fmt.Errorf("failed to parse special resource quantity %s: %v", *resSetter.s, err)
}
}
}
break
} // end if selected gpu's resource requirement found
} // end if supportedGpu is map
} // end for supportedGpu
if !found {
return nil, chart, fmt.Errorf("selected GPU type %s is not supported", selectedGpu)
}
}
// transform from Policy to AppPolicy
var policies []appcfg.AppPolicy
for _, p := range cfg.Options.Policies {
@@ -877,6 +930,7 @@ func toApplicationConfig(app, chart, rawAppName string, cfg *appcfg.AppConfigura
PodsSelectors: podSelectors,
HardwareRequirement: cfg.Spec.Hardware,
SharedEntrances: cfg.SharedEntrances,
SelectedGpuType: selectedGpu,
}, chart, nil
}
@@ -890,7 +944,7 @@ func getAppConfigFromConfigurationFile(opt *ConfigOptions, chartPath string) (*a
return nil, chartPath, err
}
return toApplicationConfig(opt.App, chartPath, opt.RawAppName, &cfg)
return toApplicationConfig(opt.App, chartPath, opt.RawAppName, opt.SelectedGpu, &cfg)
}
func checkVersionFormat(constraint string) error {
@@ -1080,13 +1134,28 @@ func IsClonedApp(appName, rawAppName string) bool {
}
func AppTitle(config string) string {
var cfg appcfg.ApplicationConfig
err := json.Unmarshal([]byte(config), &cfg)
if err != nil {
cfg := unmarshalApplicationConfig(config)
if cfg == nil {
return ""
}
return cfg.Title
}
func AppIcon(config string) string {
cfg := unmarshalApplicationConfig(config)
if cfg == nil {
return ""
}
return cfg.Icon
}
func unmarshalApplicationConfig(config string) *appcfg.ApplicationConfig {
var cfg appcfg.ApplicationConfig
err := json.Unmarshal([]byte(config), &cfg)
if err != nil {
return nil
}
return &cfg
}
func GetRawAppName(AppName, rawAppName string) string {
if rawAppName == "" {

View File

@@ -24,6 +24,7 @@ import (
"github.com/beclab/Olares/framework/app-service/pkg/utils"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -233,7 +234,9 @@ func CheckAppRequirement(token string, appConfig *appcfg.ApplicationConfig, op v
return constants.CPU, constants.SystemCPUPressure, fmt.Errorf(constants.SystemCPUPressureMessage, op)
}
}
if appConfig.Requirement.GPU != nil {
// only support nvidia gpu managment by HAMi for now
if appConfig.Requirement.GPU != nil && appConfig.GetSelectedGpuTypeValue() == utils.NvidiaCardType {
if !appConfig.Requirement.GPU.IsZero() && metrics.GPU.Total <= 0 {
return constants.GPU, constants.SystemGPUNotAvailable, fmt.Errorf(constants.SystemGPUNotAvailableMessage, op)
@@ -260,42 +263,10 @@ func CheckAppRequirement(token string, appConfig *appcfg.ApplicationConfig, op v
}
}
allocatedResources, err := getRequestResources()
if err != nil {
return "", "", err
}
if len(allocatedResources) == 1 {
sufficientCPU, sufficientMemory := false, false
if appConfig.Requirement.CPU == nil {
sufficientCPU = true
}
if appConfig.Requirement.Memory == nil {
sufficientMemory = true
}
for _, v := range allocatedResources {
if appConfig.Requirement.CPU != nil {
if v.cpu.allocatable.Cmp(*appConfig.Requirement.CPU) > 0 {
sufficientCPU = true
}
}
if appConfig.Requirement.Memory != nil {
if v.memory.allocatable.Cmp(*appConfig.Requirement.Memory) > 0 {
sufficientMemory = true
}
}
}
if !sufficientCPU {
return constants.CPU, constants.K8sRequestCPUPressure, fmt.Errorf(constants.K8sRequestCPUPressureMessage, op)
}
if !sufficientMemory {
return constants.Memory, constants.K8sRequestMemoryPressure, fmt.Errorf(constants.K8sRequestMemoryPressureMessage, op)
}
}
return "", "", nil
return CheckAppK8sRequestResource(appConfig, op)
}
func getRequestResources() (map[string]resources, error) {
func GetRequestResources() (map[string]resources, error) {
config, err := ctrl.GetConfig()
if err != nil {
return nil, err
@@ -429,7 +400,9 @@ func GetClusterResource(token string) (*prometheus.ClusterMetrics, []string, err
arches.Insert(n.Labels["kubernetes.io/arch"])
if quantity, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.VirtAiTechVGPU]; ok {
} else if quantity, ok = n.Status.Capacity[constants.NvidiaGB10GPU]; ok {
total += quantity.AsApproximateFloat64()
} else if quantity, ok = n.Status.Capacity[constants.AMDAPU]; ok {
total += quantity.AsApproximateFloat64()
}
}
@@ -960,3 +933,83 @@ func CheckCloneEntrances(ctrlClient client.Client, appConfig *appcfg.Application
return nil, nil
}
func GetClusterAvailableResource() (*resources, error) {
config, err := ctrl.GetConfig()
if err != nil {
return nil, err
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
initAllocatable := resource.MustParse("0")
availableResources := resources{
cpu: usage{allocatable: &initAllocatable},
memory: usage{allocatable: &initAllocatable},
}
nodeList := make([]corev1.Node, 0)
for _, node := range nodes.Items {
if !utils.IsNodeReady(&node) || node.Spec.Unschedulable {
continue
}
nodeList = append(nodeList, node)
}
if len(nodeList) == 0 {
return nil, errors.New("cluster has no suitable node to schedule")
}
for _, node := range nodeList {
availableResources.cpu.allocatable.Add(*node.Status.Allocatable.Cpu())
availableResources.memory.allocatable.Add(*node.Status.Allocatable.Memory())
fieldSelector := fmt.Sprintf("spec.nodeName=%s,status.phase!=Failed,status.phase!=Succeeded", node.Name)
pods, err := client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
FieldSelector: fieldSelector,
})
if err != nil {
return nil, err
}
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
availableResources.cpu.allocatable.Sub(*container.Resources.Requests.Cpu())
availableResources.memory.allocatable.Sub(*container.Resources.Requests.Memory())
}
}
}
return &availableResources, nil
}
func CheckAppK8sRequestResource(appConfig *appcfg.ApplicationConfig, op v1alpha1.OpType) (constants.ResourceType, constants.ResourceConditionType, error) {
availableResources, err := GetClusterAvailableResource()
if err != nil {
return "", "", err
}
if appConfig == nil {
return "", "", errors.New("nil appConfig")
}
sufficientCPU, sufficientMemory := false, false
if appConfig.Requirement.CPU == nil {
sufficientCPU = true
}
if appConfig.Requirement.Memory == nil {
sufficientMemory = true
}
if appConfig.Requirement.CPU != nil && availableResources.cpu.allocatable.Cmp(*appConfig.Requirement.CPU) > 0 {
sufficientCPU = true
}
if appConfig.Requirement.Memory != nil && availableResources.memory.allocatable.Cmp(*appConfig.Requirement.Memory) > 0 {
sufficientMemory = true
}
if !sufficientCPU {
return constants.CPU, constants.K8sRequestCPUPressure, fmt.Errorf(constants.K8sRequestCPUPressureMessage, op)
}
if !sufficientMemory {
return constants.Memory, constants.K8sRequestMemoryPressure, fmt.Errorf(constants.K8sRequestMemoryPressureMessage, op)
}
return "", "", nil
}

View File

@@ -0,0 +1,12 @@
package utils
const (
NodeGPUTypeLabel = "gpu.bytetrade.io/type"
)
const (
NvidiaCardType = "nvidia" // handling by HAMi
AmdGpuCardType = "amd-gpu" //
AmdApuCardType = "amd-apu" // AMD APU with integrated GPU , AI Max 395 etc.
GB10ChipType = "nvidia-gb10" // NVIDIA GB10 Superchip & unified system memory
)

View File

@@ -162,6 +162,9 @@ func GetResourceListFromChart(chartPath string, values map[string]interface{}) (
values["elasticsearch"] = map[string]interface{}{
"indexes": map[string]interface{}{},
}
values["clickhouse"] = map[string]interface{}{
"databases": map[string]interface{}{},
}
values["svcs"] = map[string]interface{}{}
values["nats"] = map[string]interface{}{
"subjects": map[string]interface{}{},

View File

@@ -103,24 +103,37 @@ func GetAllNodesTunnelIPCIDRs() (cidrs []string) {
return cidrs
}
func FindGpuTypeFromNodes(nodes *corev1.NodeList) (string, error) {
gpuType := "none"
// func FindGpuTypeFromNodes(nodes *corev1.NodeList) (string, error) {
// gpuType := "none"
// if nodes == nil {
// return gpuType, errors.New("empty node list")
// }
// for _, n := range nodes.Items {
// if _, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
// if _, ok = n.Status.Capacity[constants.NvshareGPU]; ok {
// return "nvshare", nil
// }
// gpuType = "nvidia"
// }
// if _, ok := n.Status.Capacity[constants.VirtAiTechVGPU]; ok {
// return "virtaitech", nil
// }
// }
// return gpuType, nil
// }
func GetAllGpuTypesFromNodes(nodes *corev1.NodeList) (map[string]struct{}, error) {
gpuTypes := make(map[string]struct{})
if nodes == nil {
return gpuType, errors.New("empty node list")
return gpuTypes, errors.New("empty node list")
}
for _, n := range nodes.Items {
if _, ok := n.Status.Capacity[constants.NvidiaGPU]; ok {
if _, ok = n.Status.Capacity[constants.NvshareGPU]; ok {
return "nvshare", nil
}
gpuType = "nvidia"
}
if _, ok := n.Status.Capacity[constants.VirtAiTechVGPU]; ok {
return "virtaitech", nil
if typeLabel, ok := n.Labels[NodeGPUTypeLabel]; ok {
gpuTypes[typeLabel] = struct{}{} // TODO: add driver version info
}
}
return gpuType, nil
return gpuTypes, nil
}
func IsNodeReady(node *corev1.Node) bool {

View File

@@ -25,6 +25,7 @@ type Event struct {
User string `json:"user"`
EntranceStatuses []v1alpha1.EntranceStatus `json:"entranceStatuses,omitempty"`
Title string `json:"title,omitempty"`
Icon string `json:"icon,omitempty"`
Reason string `json:"reason,omitempty"`
Message string `json:"message,omitempty"`
SharedEntrances []v1alpha1.Entrance `json:"sharedEntrances,omitempty"`
@@ -45,6 +46,7 @@ type EventParams struct {
Reason string
Message string
SharedEntrances []v1alpha1.Entrance
Icon string
}
func PublishEvent(nc *nats.Conn, subject string, data interface{}) error {

View File

@@ -30,7 +30,6 @@ import (
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
@@ -544,16 +543,21 @@ type EnvKeyValue struct {
}
// CreatePatchForDeployment add gpu env for deployment and returns patch bytes.
func CreatePatchForDeployment(tpl *corev1.PodTemplateSpec, namespace string, gpuRequired *resource.Quantity, typeKey string, envKeyValues []EnvKeyValue) ([]byte, error) {
patches, err := addResourceLimits(tpl, namespace, gpuRequired, typeKey, envKeyValues)
func CreatePatchForDeployment(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues []EnvKeyValue) ([]byte, error) {
patches, err := addResourceLimits(tpl, typeKey, envKeyValues)
if err != nil {
return []byte{}, err
}
return json.Marshal(patches)
}
func addResourceLimits(tpl *corev1.PodTemplateSpec, namespace string, gpuRequired *resource.Quantity, typeKey string, envKeyValues []EnvKeyValue) (patch []patchOp, err error) {
if typeKey == constants.NvidiaGPU || typeKey == constants.NvshareGPU {
func addResourceLimits(tpl *corev1.PodTemplateSpec, typeKey string, envKeyValues []EnvKeyValue) (patch []patchOp, err error) {
if typeKey == "" {
klog.Warning("No gpu type selected, skip adding resource limits")
return patch, nil
}
if typeKey == constants.NvidiaGPU || typeKey == constants.NvidiaGB10GPU {
if tpl.Spec.RuntimeClassName != nil {
patch = append(patch, patchOp{
Op: constants.PatchOpReplace,
@@ -584,7 +588,10 @@ func addResourceLimits(tpl *corev1.PodTemplateSpec, namespace string, gpuRequire
t := make(map[string]map[string]string)
t["limits"] = map[string]string{}
for k, v := range container.Resources.Limits {
if k.String() == constants.NvidiaGPU || k.String() == constants.NvshareGPU || k.String() == constants.VirtAiTechVGPU {
if k.String() == constants.NvidiaGPU ||
k.String() == constants.NvidiaGB10GPU ||
k.String() == constants.AMDAPU {
// unset all previous gpu limits
continue
}
t["limits"][k.String()] = v.String()

View File

@@ -304,7 +304,7 @@ spec:
- name: BACKUP_SERVER
value: backup-server.os-framework:8082
- name: L4_PROXY_IMAGE_VERSION
value: v0.3.9
value: v0.3.10
- name: L4_PROXY_SERVICE_ACCOUNT
value: os-network-internal
- name: L4_PROXY_NAMESPACE

View File

@@ -180,7 +180,7 @@ spec:
memory: 300Mi
- name: download-server
image: "beclab/download-server:v0.1.17"
image: "beclab/download-server:v0.1.20"
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0

View File

@@ -3,5 +3,5 @@ target: prebuilt
output:
containers:
-
name: beclab/l4-bfl-proxy:v0.3.9
name: beclab/l4-bfl-proxy:v0.3.10
# must have blank new line

View File

@@ -906,6 +906,9 @@ func (s *Server) generateStreamServers() ([]StreamServer, error) {
if bflHost == "" {
return nil, fmt.Errorf("can not find bfl service for user=%s", app.Spec.Owner)
}
if p.ExposePort < 1 || p.ExposePort > 65535 {
continue
}
server := StreamServer{
Protocol: p.Protocol,
Port: p.ExposePort,

View File

@@ -67,6 +67,9 @@ spec:
{{- toYaml . | nindent 6 }}
{{- end }}
volumes:
- name: hostdev
hostPath:
path: /dev
- name: "pod-gpu-resources"
hostPath:
path: '{{ .Values.dcgmExporter.kubeletPath }}'
@@ -124,6 +127,8 @@ spec:
- name: "metrics"
containerPort: {{ .Values.dcgmExporter.service.port }}
volumeMounts:
- name: hostdev
mountPath: /dev
- name: "pod-gpu-resources"
readOnly: true
mountPath: "/var/lib/kubelet/pod-resources"

View File

@@ -80,6 +80,8 @@ spec:
resources:
{{- toYaml .Values.devicePlugin.resources | nindent 12 }}
volumeMounts:
- name: hostdev
mountPath: /dev
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
- name: lib
@@ -120,6 +122,8 @@ spec:
resources:
{{- toYaml .Values.devicePlugin.vgpuMonitor.resources | nindent 12 }}
volumeMounts:
- name: hostdev
mountPath: /dev
- name: ctrs
mountPath: {{ .Values.devicePlugin.monitorctrPath }}
- name: dockers
@@ -133,6 +137,9 @@ spec:
- name: hosttmp
mountPath: /tmp
volumes:
- name: hostdev
hostPath:
path: /dev
- name: ctrs
hostPath:
path: {{ .Values.devicePlugin.monitorctrPath }}

View File

@@ -4,7 +4,7 @@ nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
imagePullSecrets: []
version: "v2.6.9"
version: "v2.6.10"
# Nvidia GPU Parameters
resourceName: "nvidia.com/gpu"

View File

@@ -3,7 +3,7 @@ target: prebuilt
output:
containers:
-
name: beclab/hami:v2.6.9
name: beclab/hami:v2.6.10
-
name: beclab/hami-webui-fe-oss:v1.0.8
-

View File

@@ -139,4 +139,28 @@ spec:
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext2
installable:
autoInstall: true
type: Helm
---
apiVersion: extensions.kubeblocks.io/v1alpha1
kind: Addon
metadata:
annotations:
addon.kubeblocks.io/kubeblocks-version: '>=1.0.0'
labels:
addon.kubeblocks.io/model: RDBMS
addon.kubeblocks.io/name: clickhouse
addon.kubeblocks.io/provider: community
addon.kubeblocks.io/version: 1.0.1
name: clickhouse
spec:
defaultInstallValues:
- enabled: true
description: ClickHouse is an open-source column-oriented OLAP database management
system. Use it to boost your database performance while providing linear scalability
and hardware efficiency.
helm:
chartLocationURL: file:///clickhouse-1.0.1.tgz
chartsImage: beclab/kubeblock-addon-charts:v1.0.1-ext3
installable:
autoInstall: true
type: Helm

View File

@@ -54,6 +54,55 @@ spec:
type: string
appNamespace:
type: string
clickhouse:
properties:
databases:
items:
properties:
name:
type: string
required:
- name
type: object
type: array
password:
properties:
value:
description: Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
secretKeyRef:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: |-
Name of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: object
user:
type: string
required:
- databases
- user
type: object
elasticsearch:
properties:
allowNamespaceIndexes:

View File

@@ -57,7 +57,7 @@ spec:
path: '{{ $dbbackup_rootpath }}/pg_backup'
containers:
- name: operator-api
image: beclab/middleware-operator:0.2.31
image: beclab/middleware-operator:0.2.32
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080

View File

@@ -2,24 +2,24 @@ package app
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"strconv"
aprv1 "bytetrade.io/web3os/tapr/pkg/apis/apr/v1alpha1"
"bytetrade.io/web3os/tapr/pkg/constants"
"bytetrade.io/web3os/tapr/pkg/workload/citus"
"bytetrade.io/web3os/tapr/pkg/workload/clickhouse"
"bytetrade.io/web3os/tapr/pkg/workload/elasticsearch"
"bytetrade.io/web3os/tapr/pkg/workload/mariadb"
wmysql "bytetrade.io/web3os/tapr/pkg/workload/mysql"
"bytetrade.io/web3os/tapr/pkg/workload/nats"
"bytetrade.io/web3os/tapr/pkg/workload/minio"
"bytetrade.io/web3os/tapr/pkg/workload/mongodb"
wmysql "bytetrade.io/web3os/tapr/pkg/workload/mysql"
"bytetrade.io/web3os/tapr/pkg/workload/nats"
"bytetrade.io/web3os/tapr/pkg/workload/rabbitmq"
rediscluster "bytetrade.io/web3os/tapr/pkg/workload/redis-cluster"
"bytetrade.io/web3os/tapr/pkg/workload/zinc"
"github.com/gofiber/fiber/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
@@ -426,6 +426,31 @@ func (s *Server) handleListMiddlewares(ctx *fiber.Ctx) error {
}
clusterResp = append(clusterResp, &cres)
}
case string(aprv1.TypeClickHouse):
dbs, err := clickhouse.ListClickHouseClusters(ctx.UserContext(), s.ctrlClient, "")
if err != nil {
return err
}
for _, m := range dbs {
user, pwd, err := clickhouse.FindClickHouseAdminUser(ctx.UserContext(), s.k8sClientSet, m.Namespace)
if err != nil {
return err
}
cres := MiddlewareClusterResp{
MiddlewareType: TypeClickHouse,
MetaInfo: MetaInfo{
Name: m.Name,
Namespace: m.Namespace,
},
AdminUser: user,
Password: pwd,
Proxy: Proxy{
Endpoint: "clickhouse-svc." + m.Namespace + ":" + "9000",
Size: m.Spec.ComponentSpecs[0].Replicas,
},
}
clusterResp = append(clusterResp, &cres)
}
default:
return fiber.ErrNotFound
}

View File

@@ -30,6 +30,9 @@ const (
// TypeMySQL indicates the middleware is mysql
TypeMySQL MiddlewareType = "mysql"
// TypeClickHouse indicates the middleware is clickhouse
TypeClickHouse MiddlewareType = "clickhouse"
)
type MiddlewareReq struct {

View File

@@ -5,6 +5,7 @@ import (
aprv1 "bytetrade.io/web3os/tapr/pkg/apis/apr/v1alpha1"
"bytetrade.io/web3os/tapr/pkg/workload/citus"
"bytetrade.io/web3os/tapr/pkg/workload/clickhouse"
"bytetrade.io/web3os/tapr/pkg/workload/elasticsearch"
"bytetrade.io/web3os/tapr/pkg/workload/mariadb"
"bytetrade.io/web3os/tapr/pkg/workload/minio"
@@ -235,6 +236,20 @@ func (s *Server) getMiddlewareInfo(ctx *fiber.Ctx, mwReq *MiddlewareReq, m *aprv
}
return resp, nil
case aprv1.TypeClickHouse:
resp.UserName = m.Spec.ClickHouse.User
resp.Password, err = m.Spec.ClickHouse.Password.GetVarValue(ctx.UserContext(), s.k8sClientSet, mwReq.Namespace)
if err != nil {
klog.Error("get middleware clickhouse password error, ", err)
return nil, err
}
resp.Port = 9000
resp.Host = "clickhouse-svc.clickhouse-middleware"
resp.Databases = make(map[string]string)
for _, v := range m.Spec.ClickHouse.Databases {
resp.Databases[v.Name] = clickhouse.GetDatabaseName(m.Spec.AppNamespace, v.Name)
}
return resp, nil
} // end of middleware type

View File

@@ -144,6 +144,20 @@ func (c *controller) handler(action Action, obj interface{}) error {
return err
}
}
case aprv1.TypeClickHouse:
switch action {
case ADD, UPDATE:
klog.Infof("create clickhouse user name: %s", request.Name)
if err := c.createOrUpdateClickHouseRequest(request); err != nil {
klog.Errorf("failed to process clickhouse create or update request %v", err)
return err
}
case DELETE:
if err := c.deleteClickHouseRequest(request); err != nil {
klog.Errorf("failed to process clickhouse delete request %v", err)
return err
}
}
}
return nil

View File

@@ -0,0 +1,122 @@
package middlewarerequest
import (
"context"
"fmt"
"time"
aprv1 "bytetrade.io/web3os/tapr/pkg/apis/apr/v1alpha1"
wck "bytetrade.io/web3os/tapr/pkg/workload/clickhouse"
"github.com/ClickHouse/clickhouse-go/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
)
const clickhouseNamespace = "clickhouse-middleware"
func (c *controller) createOrUpdateClickHouseRequest(req *aprv1.MiddlewareRequest) error {
adminUser, adminPassword, err := wck.FindClickHouseAdminUser(c.ctx, c.k8sClientSet, clickhouseNamespace)
if err != nil {
klog.Errorf("failed to get clickhouse admin user %v", err)
return err
}
userPassword, err := req.Spec.ClickHouse.Password.GetVarValue(c.ctx, c.k8sClientSet, req.Namespace)
if err != nil {
klog.Errorf("failed to get clickhouse user password %v", err)
return err
}
conn, err := c.newClickHouseConn(c.ctx, adminUser, adminPassword)
if err != nil {
return err
}
defer func() { _ = conn.Close() }()
// create user if not exists
createUserSQL := fmt.Sprintf("CREATE USER IF NOT EXISTS `%s` IDENTIFIED WITH plaintext_password BY '%s'", req.Spec.ClickHouse.User, userPassword)
if err := conn.Exec(c.ctx, createUserSQL); err != nil {
klog.Errorf("failed to create clickhouse user %v", err)
return err
}
// create databases and grant privileges
for _, d := range req.Spec.ClickHouse.Databases {
dbName := wck.GetDatabaseName(req.Spec.AppNamespace, d.Name)
createDBSQL := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbName)
if err := conn.Exec(c.ctx, createDBSQL); err != nil {
klog.Errorf("failed to create database %s %v", dbName, err)
return err
}
grantSQL := fmt.Sprintf("GRANT ALL ON `%s`.* TO `%s`", dbName, req.Spec.ClickHouse.User)
if err := conn.Exec(c.ctx, grantSQL); err != nil {
klog.Errorf("failed to grant privileges on %s %v", dbName, err)
return err
}
}
return nil
}
func (c *controller) deleteClickHouseRequest(req *aprv1.MiddlewareRequest) error {
adminUser, adminPassword, err := wck.FindClickHouseAdminUser(c.ctx, c.k8sClientSet, clickhouseNamespace)
if err != nil {
klog.Errorf("failed to get clickhouse admin user %v", err)
if apierrors.IsNotFound(err) {
// ClickHouse admin secret missing, service likely already removed. No-op.
klog.Infof("clickhouse admin secret not found, skipping deletion for user %s", req.Spec.ClickHouse.User)
return nil
}
return err
}
conn, err := c.newClickHouseConn(c.ctx, adminUser, adminPassword)
if err != nil {
return err
}
defer func() { _ = conn.Close() }()
// drop user
dropUserSQL := fmt.Sprintf("DROP USER IF EXISTS `%s`", req.Spec.ClickHouse.User)
if err := conn.Exec(c.ctx, dropUserSQL); err != nil {
klog.Errorf("failed to drop user %s %v", req.Spec.ClickHouse.User, err)
return err
}
// drop databases
for _, d := range req.Spec.ClickHouse.Databases {
dbName := wck.GetDatabaseName(req.Spec.AppNamespace, d.Name)
dropDBSQL := fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", dbName)
if err := conn.Exec(c.ctx, dropDBSQL); err != nil {
klog.Errorf("failed to drop database %s %v", dbName, err)
return err
}
}
return nil
}
func (c *controller) newClickHouseConn(ctx context.Context, user, password string) (clickhouse.Conn, error) {
addr := fmt.Sprintf("clickhouse-svc.%s.svc.cluster.local:9000", clickhouseNamespace)
conn, err := clickhouse.Open(&clickhouse.Options{
Addr: []string{addr},
Auth: clickhouse.Auth{
Database: "default",
Username: user,
Password: password,
},
DialTimeout: 10 * time.Second,
ConnOpenStrategy: clickhouse.ConnOpenInOrder,
Compression: &clickhouse.Compression{
Method: clickhouse.CompressionLZ4,
},
})
if err != nil {
klog.Errorf("open clickhouse native connection error %v", err)
return nil, err
}
if pingErr := conn.Ping(ctx); pingErr != nil {
klog.Errorf("clickhouse ping error %v", pingErr)
_ = conn.Close()
return nil, pingErr
}
return conn, nil
}

View File

@@ -54,6 +54,55 @@ spec:
type: string
appNamespace:
type: string
clickhouse:
properties:
databases:
items:
properties:
name:
type: string
required:
- name
type: object
type: array
password:
properties:
value:
description: Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
secretKeyRef:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: |-
Name of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: object
user:
type: string
required:
- databases
- user
type: object
elasticsearch:
properties:
allowNamespaceIndexes:

View File

@@ -1,6 +1,6 @@
# Build the manager binary
FROM golang:1.23.5 as builder
FROM golang:1.24.0 as builder
WORKDIR /workspace
RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \
apt-get update && \

View File

@@ -1,13 +1,11 @@
module bytetrade.io/web3os/tapr
go 1.23.0
toolchain go1.23.3
go 1.24.0
require (
github.com/ClickHouse/clickhouse-go/v2 v2.42.0
github.com/apecloud/kubeblocks v1.0.0
github.com/coredns/corefile-migration/migration v1.0.1
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/elastic/go-elasticsearch/v8 v8.15.0
github.com/emicklei/go-restful v2.16.0+incompatible
github.com/go-redis/redis/v8 v8.11.5
@@ -15,7 +13,7 @@ require (
github.com/go-sql-driver/mysql v1.7.1
github.com/gofiber/contrib/websocket v1.2.2
github.com/gofiber/fiber/v2 v2.52.9
github.com/golang-jwt/jwt v3.2.2+incompatible
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/uuid v1.6.0
github.com/jmoiron/sqlx v1.3.5
github.com/json-iterator/go v1.1.12
@@ -24,7 +22,7 @@ require (
github.com/minio/madmin-go v1.7.5
github.com/minio/minio-go/v7 v7.0.95
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4
github.com/nats-io/nats-server v1.4.1
github.com/nats-io/nats-server/v2 v2.2.0
github.com/nats-io/nats.go v1.36.0
github.com/percona/percona-backup-mongodb v0.0.0-20221024072933-3ec38a5fc670
github.com/percona/percona-server-mongodb-operator v1.14.0
@@ -35,7 +33,7 @@ require (
github.com/thoas/go-funk v0.9.3
github.com/vmware-tanzu/velero v1.13.2
go.mongodb.org/mongo-driver v1.11.7
golang.org/x/crypto v0.39.0
golang.org/x/crypto v0.46.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.29.14
k8s.io/apimachinery v0.29.14
@@ -50,10 +48,11 @@ require (
require (
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/azure-storage-blob-go v0.15.0 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/ClickHouse/ch-go v0.69.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/aws/aws-sdk-go v1.50.8 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
@@ -63,8 +62,10 @@ require (
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fasthttp/websocket v1.5.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.7.1 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
@@ -79,7 +80,7 @@ require (
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/imdario/mergo v0.3.14 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
@@ -107,8 +108,10 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/nats-io/nkeys v0.4.9 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/paulmach/orb v0.12.0 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
github.com/prometheus/client_model v0.6.1 // indirect
@@ -117,11 +120,13 @@ require (
github.com/rs/xid v1.6.0 // indirect
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
github.com/secure-io/sio-go v0.3.1 // indirect
github.com/segmentio/asm v1.2.1 // indirect
github.com/shirou/gopsutil/v3 v3.23.6 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
@@ -129,20 +134,23 @@ require (
github.com/xdg-go/scram v1.1.1 // indirect
github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/otel v1.25.0 // indirect
go.opentelemetry.io/otel/metric v1.25.0 // indirect
go.opentelemetry.io/otel/trace v1.25.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/oauth2 v0.19.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.8.0 // indirect
golang.org/x/tools v0.33.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

View File

@@ -28,6 +28,10 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/ClickHouse/ch-go v0.69.0 h1:nO0OJkpxOlN/eaXFj0KzjTz5p7vwP1/y3GN4qc5z/iM=
github.com/ClickHouse/ch-go v0.69.0/go.mod h1:9XeZpSAT4S0kVjOpaJ5186b7PY/NH/hhF8R6u0WIjwg=
github.com/ClickHouse/clickhouse-go/v2 v2.42.0 h1:MdujEfIrpXesQUH0k0AnuVtJQXk6RZmxEhsKUCcv5xk=
github.com/ClickHouse/clickhouse-go/v2 v2.42.0/go.mod h1:riWnuo4YMVdajYll0q6FzRBomdyCrXyFY3VXeXczA8s=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -40,8 +44,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/apecloud/kubeblocks v1.0.0 h1:ditcH7RNZyDiWiHP1V7IuXWHdcBJv3/X5N89Qlz+xew=
github.com/apecloud/kubeblocks v1.0.0/go.mod h1:Mk5xRLm2MpxoTNZKEdDcrIY3I1EpokQBU3Q9Zwse8MI=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@@ -61,8 +65,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/coredns/corefile-migration/migration v1.0.1 h1:M4oUj3atca7xruJcjY12SmulfqTkKS/X5ahzNLfh2SY=
@@ -88,7 +92,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
@@ -136,6 +139,10 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg=
github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -144,8 +151,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
@@ -228,8 +235,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -249,6 +254,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -263,6 +269,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@@ -299,8 +306,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@@ -342,6 +349,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
@@ -405,6 +414,8 @@ github.com/michaelklishin/rabbit-hole/v3 v3.2.0 h1:N4YdHFj36MP5059Csze9B4TTZPS6j
github.com/michaelklishin/rabbit-hole/v3 v3.2.0/go.mod h1:LTyucfaAV/Y++Y6aVfAmsc6lvKw3y0WEyQa+yPAXcXc=
github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg=
github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/madmin-go v1.7.5 h1:IF8j2HR0jWc7msiOcy0KJ8EyY7Q3z+j+lsmSDksQm+I=
github.com/minio/madmin-go v1.7.5/go.mod h1:3SO8SROxHN++tF6QxdTii2SSUaYSrr8lnE9EJWjvz0k=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@@ -440,10 +451,36 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nats-io/nats-server v1.4.1 h1:Ul1oSOGNV/L8kjr4v6l2f9Yet6WY+LevH1/7cRZ/qyA=
github.com/nats-io/nats-server v1.4.1/go.mod h1:c8f/fHd2B6Hgms3LtCaI7y6pC4WD1f4SUxcCud5vhBc=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/jwt v0.3.3-0.20200519195258-f2bf5ce574c7/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M=
github.com/nats-io/jwt v1.1.0/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M=
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
github.com/nats-io/jwt/v2 v2.0.0-20200916203241-1f8ce17dff02/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ=
github.com/nats-io/jwt/v2 v2.0.0-20201015190852-e11ce317263c/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ=
github.com/nats-io/jwt/v2 v2.0.0-20210125223648-1c24d462becc/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ=
github.com/nats-io/jwt/v2 v2.0.0-20210208203759-ff814ca5f813/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ=
github.com/nats-io/jwt/v2 v2.0.1/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
github.com/nats-io/nats-server/v2 v2.1.8-0.20200524125952-51ebd92a9093/go.mod h1:rQnBf2Rv4P9adtAs/Ti6LfFmVtFG6HLhl/H7cVshcJU=
github.com/nats-io/nats-server/v2 v2.1.8-0.20200601203034-f8d6dd992b71/go.mod h1:Nan/1L5Sa1JRW+Thm4HNYcIDcVRFc5zK9OpSZeI2kk4=
github.com/nats-io/nats-server/v2 v2.1.8-0.20200929001935-7f44d075f7ad/go.mod h1:TkHpUIDETmTI7mrHN40D1pzxfzHZuGmtMbtb83TGVQw=
github.com/nats-io/nats-server/v2 v2.1.8-0.20201129161730-ebe63db3e3ed/go.mod h1:XD0zHR/jTXdZvWaQfS5mQgsXj6x12kMjKLyAk/cOGgY=
github.com/nats-io/nats-server/v2 v2.1.8-0.20210205154825-f7ab27f7dad4/go.mod h1:kauGd7hB5517KeSqspW2U1Mz/jhPbTrE8eOXzUPk1m0=
github.com/nats-io/nats-server/v2 v2.1.8-0.20210227190344-51550e242af8/go.mod h1:/QQ/dpqFavkNhVnjvMILSQ3cj5hlmhB66adlgNbjuoA=
github.com/nats-io/nats-server/v2 v2.2.0 h1:QNeFmJRBq+O2zF8EmsR/JSvtL2zXb3GwICloHgskYBU=
github.com/nats-io/nats-server/v2 v2.2.0/go.mod h1:eKlAaGmSQHZMFQA6x56AaP5/Bl9N3mWF4awyT2TTpzc=
github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE=
github.com/nats-io/nats.go v1.10.1-0.20200531124210-96f2130e4d55/go.mod h1:ARiFsjW9DVxk48WJbO3OSZ2DG8fjkMi7ecLmXoY/n9I=
github.com/nats-io/nats.go v1.10.1-0.20200606002146-fc6fed82929a/go.mod h1:8eAIv96Mo9QW6Or40jUHejS7e4VwZ3VRYD6Sf0BTDp4=
github.com/nats-io/nats.go v1.10.1-0.20201021145452-94be476ad6e0/go.mod h1:VU2zERjp8xmF+Lw2NH4u2t5qWZxwc7jB3+7HVMWQXPI=
github.com/nats-io/nats.go v1.10.1-0.20210127212649-5b4924938a9a/go.mod h1:Sa3kLIonafChP5IF0b55i9uvGR10I3hPETFbi4+9kOI=
github.com/nats-io/nats.go v1.10.1-0.20210211000709-75ded9c77585/go.mod h1:uBWnCKg9luW1g7hgzPxUjHFRI40EuTSX7RCzgnc74Jk=
github.com/nats-io/nats.go v1.10.1-0.20210228004050-ed743748acac/go.mod h1:hxFvLNbNmT6UppX5B5Tr/r3g+XSwGjJzFn6mxPNJEHc=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@@ -470,6 +507,9 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/paulmach/orb v0.12.0 h1:z+zOwjmG3MyEEqzv92UN49Lg1JFYx0L9GpGKNVDKk1s=
github.com/paulmach/orb v0.12.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
@@ -482,6 +522,8 @@ github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -522,8 +564,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.6 h1:Sovz9sDSwbOz9tgUy8JpT+KgCkPYJEN/oYzlJiYTNLg=
github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
@@ -533,6 +575,8 @@ github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee h1:8Iv5m6xEo1NR1Avp
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g=
github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc=
github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs=
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08=
github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU=
@@ -540,6 +584,8 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -579,18 +625,20 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw=
github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
@@ -613,13 +661,16 @@ github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCO
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk=
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
@@ -627,17 +678,20 @@ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mongodb.org/mongo-driver v1.11.7 h1:LIwYxASDLGUg/8wOhgOOZhX8tQa/9tgZPgzZoVqJvcs=
go.mongodb.org/mongo-driver v1.11.7/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo=
go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw=
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -648,6 +702,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -655,15 +711,18 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@@ -673,8 +732,8 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -699,17 +758,18 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -718,8 +778,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -727,6 +787,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -760,12 +821,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -773,11 +836,12 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -798,8 +862,12 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -826,6 +894,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=

View File

@@ -135,6 +135,9 @@ type MiddlewareSpec struct {
// +optional
Mysql Mysql `json:"mysql,omitempty"`
// +optional
ClickHouse ClickHouse `json:"clickhouse,omitempty"`
}
type Redis struct {
@@ -223,6 +226,16 @@ type MariaDatabase struct {
Name string `json:"name"`
}
type ClickHouse struct {
User string `json:"user"`
Password PasswordVar `json:"password,omitempty"`
Databases []ClickHouseDatabase `json:"databases"`
}
type ClickHouseDatabase struct {
Name string `json:"name"`
}
type Subject struct {
Name string `json:"name"`
//// default allow for appName equals spec.App, others is deny
@@ -283,6 +296,7 @@ const (
TypeElasticsearch MiddlewareType = "elasticsearch"
TypeMariaDB MiddlewareType = "mariadb"
TypeMysql MiddlewareType = "mysql"
TypeClickHouse MiddlewareType = "clickhouse"
)
func (c *CitusDatabase) IsDistributed() bool { return c.Distributed != nil && *c.Distributed }

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"bytetrade.io/web3os/tapr/pkg/constants"
"github.com/dgrijalva/jwt-go"
"github.com/golang-jwt/jwt/v4"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"

View File

@@ -8,7 +8,7 @@ import (
"bytetrade.io/web3os/tapr/pkg/constants"
"github.com/gofiber/fiber/v2"
"github.com/golang-jwt/jwt"
"github.com/golang-jwt/jwt/v4"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"

View File

@@ -0,0 +1,37 @@
package clickhouse
import (
"context"
"fmt"
kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func ListClickHouseClusters(ctx context.Context, ctrlClient client.Client, namespace string) (clusters []kbappsv1.Cluster, err error) {
var clusterList kbappsv1.ClusterList
err = ctrlClient.List(ctx, &clusterList)
if err != nil {
return nil, err
}
for _, c := range clusterList.Items {
if c.Labels != nil && (c.Labels["clusterdefinition.kubeblocks.io/name"] == "clickhouse" || (c.Name == "clickhouse" && c.Spec.ClusterDef == "clickhouse")) {
clusters = append(clusters, c)
}
}
return clusters, nil
}
func FindClickHouseAdminUser(ctx context.Context, k8sClient *kubernetes.Clientset, namespace string) (user, password string, err error) {
secret, err := k8sClient.CoreV1().Secrets(namespace).Get(ctx, "clickhouse-account-info", metav1.GetOptions{})
if err != nil {
return "", "", err
}
return "admin", string(secret.Data["password"]), nil
}
func GetDatabaseName(appNamespace, name string) string {
return fmt.Sprintf("%s_%s", appNamespace, name)
}

View File

@@ -7,7 +7,7 @@ import (
"text/template"
"github.com/mitchellh/mapstructure"
load "github.com/nats-io/nats-server/conf"
load "github.com/nats-io/nats-server/v2/conf"
"k8s.io/klog/v2"
)