diff --git a/.github/workflows/windows-signed-artifacts.yml b/.github/workflows/windows-signed-artifacts.yml new file mode 100644 index 00000000..149290e5 --- /dev/null +++ b/.github/workflows/windows-signed-artifacts.yml @@ -0,0 +1,122 @@ +name: Windows Signed Artifacts + +on: + workflow_dispatch: + inputs: + ref: + description: Git ref to build + required: false + type: string + +permissions: + contents: read + +jobs: + build-and-sign-windows: + name: Build and sign Windows artifacts + runs-on: windows-latest + env: + TAURI_TARGET: x86_64-pc-windows-msvc + BUN_TARGET: bun-windows-x64 + WINDOWS_SIGNING_CERT_PASSWORD: ${{ secrets.WINDOWS_CERT_PASSWORD }} + WINDOWS_TIMESTAMP_URL: ${{ secrets.WINDOWS_TIMESTAMP_URL || 'http://timestamp.digicert.com' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.ref || github.ref }} + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.27.0 + + - name: Setup Bun + uses: oven-sh/setup-bun@v1 + with: + bun-version: 1.3.10 + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + targets: x86_64-pc-windows-msvc + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Prepare sidecars + run: pnpm -C apps/desktop prepare:sidecar + + - name: Import Windows signing certificate + shell: pwsh + env: + WINDOWS_CERT_PFX_BASE64: ${{ secrets.WINDOWS_CERT_PFX_BASE64 }} + run: | + if ([string]::IsNullOrWhiteSpace($env:WINDOWS_CERT_PFX_BASE64)) { + throw "WINDOWS_CERT_PFX_BASE64 is required for Windows signing." + } + if ([string]::IsNullOrWhiteSpace($env:WINDOWS_SIGNING_CERT_PASSWORD)) { + throw "WINDOWS_CERT_PASSWORD is required for Windows signing." + } + $bytes = [Convert]::FromBase64String($env:WINDOWS_CERT_PFX_BASE64) + $certPath = Join-Path $env:RUNNER_TEMP "windows-codesign.pfx" + [IO.File]::WriteAllBytes($certPath, $bytes) + "WINDOWS_CERT_PATH=$certPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + + - name: Sign bundled Windows sidecars + shell: pwsh + run: | + $targets = @( + "apps/desktop/src-tauri/sidecars/opencode-$env:TAURI_TARGET.exe", + "apps/desktop/src-tauri/sidecars/opencode-router-$env:TAURI_TARGET.exe", + "apps/desktop/src-tauri/sidecars/openwork-server-v2-$env:TAURI_TARGET.exe" + ) + foreach ($target in $targets) { + if (!(Test-Path $target)) { + throw "Expected Windows sidecar missing: $target" + } + signtool sign /fd SHA256 /td SHA256 /tr $env:WINDOWS_TIMESTAMP_URL /f $env:WINDOWS_CERT_PATH /p $env:WINDOWS_SIGNING_CERT_PASSWORD $target + } + + - name: Build embedded Server V2 runtime + run: pnpm --filter openwork-server-v2 build:bin:embedded:windows --bundle-dir ../desktop/src-tauri/sidecars + working-directory: apps/server-v2 + + - name: Sign Server V2 executable + shell: pwsh + run: | + $serverPath = "apps/server-v2/dist/bin/openwork-server-v2-$env:BUN_TARGET.exe" + if (!(Test-Path $serverPath)) { + throw "Expected Server V2 executable missing: $serverPath" + } + signtool sign /fd SHA256 /td SHA256 /tr $env:WINDOWS_TIMESTAMP_URL /f $env:WINDOWS_CERT_PATH /p $env:WINDOWS_SIGNING_CERT_PASSWORD $serverPath + signtool verify /pa /v $serverPath + + - name: Build desktop Windows bundle + run: pnpm --filter @openwork/desktop exec tauri build --target x86_64-pc-windows-msvc + + - name: Sign desktop Windows artifacts + shell: pwsh + run: | + $artifacts = Get-ChildItem -Path "apps/desktop/src-tauri/target/x86_64-pc-windows-msvc/release/bundle" -Recurse -Include *.exe,*.msi + if ($artifacts.Count -eq 0) { + throw "No Windows desktop artifacts were produced to sign." + } + foreach ($artifact in $artifacts) { + signtool sign /fd SHA256 /td SHA256 /tr $env:WINDOWS_TIMESTAMP_URL /f $env:WINDOWS_CERT_PATH /p $env:WINDOWS_SIGNING_CERT_PASSWORD $artifact.FullName + signtool verify /pa /v $artifact.FullName + } + + - name: Upload signed artifacts + uses: actions/upload-artifact@v4 + with: + name: windows-signed-artifacts + path: | + apps/server-v2/dist/bin/openwork-server-v2-*.exe + apps/desktop/src-tauri/target/x86_64-pc-windows-msvc/release/bundle/**/*.exe + apps/desktop/src-tauri/target/x86_64-pc-windows-msvc/release/bundle/**/*.msi diff --git a/.gitignore b/.gitignore index 21d0773a..c8b245a9 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,8 @@ apps/desktop/src-tauri/sidecars/ # Bun build artifacts *.bun-build apps/server/cli +apps/server-v2/openapi/openapi.json +packages/openwork-server-sdk/generated/ # pnpm store (created by Docker volume mounts) .pnpm-store/ diff --git a/apps/server-v2/bin/openwork-server-v2.mjs b/apps/server-v2/bin/openwork-server-v2.mjs new file mode 100644 index 00000000..76170677 --- /dev/null +++ b/apps/server-v2/bin/openwork-server-v2.mjs @@ -0,0 +1,23 @@ +#!/usr/bin/env node + +import { spawn } from "node:child_process"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; + +const binDir = dirname(fileURLToPath(import.meta.url)); +const packageDir = resolve(binDir, ".."); + +const child = spawn("bun", ["src/cli.ts", ...process.argv.slice(2)], { + cwd: packageDir, + stdio: "inherit", + env: process.env, +}); + +child.once("exit", (code, signal) => { + if (signal) { + process.kill(process.pid, signal); + return; + } + + process.exit(code ?? 1); +}); diff --git a/apps/server-v2/package.json b/apps/server-v2/package.json new file mode 100644 index 00000000..b178f19d --- /dev/null +++ b/apps/server-v2/package.json @@ -0,0 +1,43 @@ +{ + "name": "openwork-server-v2", + "version": "0.11.206", + "private": true, + "type": "module", + "bin": { + "openwork-server-v2": "bin/openwork-server-v2.mjs" + }, + "scripts": { + "dev": "OPENWORK_DEV_MODE=1 bun --watch src/cli.ts", + "start": "bun src/cli.ts", + "openapi:generate": "bun ./scripts/generate-openapi.ts", + "openapi:watch": "node ./scripts/watch-openapi.mjs", + "test": "bun test", + "typecheck": "tsc -p tsconfig.json --noEmit", + "build:bin": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2", + "build:bin:windows": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2 --target bun-windows-x64", + "build:bin:embedded": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2 --embed-runtime", + "build:bin:embedded:windows": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2 --embed-runtime --target bun-windows-x64", + "build:bin:all": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2 --target bun-darwin-arm64 --target bun-darwin-x64-baseline --target bun-linux-x64-baseline --target bun-linux-arm64 --target bun-windows-x64", + "build:bin:embedded:all": "bun ./script/build.ts --outdir dist/bin --filename openwork-server-v2 --embed-runtime --target bun-darwin-arm64 --target bun-darwin-x64-baseline --target bun-linux-x64-baseline --target bun-linux-arm64 --target bun-windows-x64", + "prepublishOnly": "pnpm openapi:generate && pnpm build:bin" + }, + "files": [ + "bin", + "openapi", + "src" + ], + "dependencies": { + "@opencode-ai/sdk": "1.2.27", + "hono": "4.12.12", + "hono-openapi": "1.3.0", + "jsonc-parser": "^3.3.1", + "yaml": "^2.8.1", + "zod": "^4.3.6" + }, + "devDependencies": { + "@types/node": "^22.10.2", + "bun-types": "^1.3.6", + "typescript": "^5.6.3" + }, + "packageManager": "pnpm@10.27.0" +} diff --git a/apps/server-v2/script/build.ts b/apps/server-v2/script/build.ts new file mode 100644 index 00000000..16e1173b --- /dev/null +++ b/apps/server-v2/script/build.ts @@ -0,0 +1,267 @@ +import { mkdtempSync, mkdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs"; +import os from "node:os"; +import { join, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { spawnSync } from "node:child_process"; + +const bunRuntime = (globalThis as typeof globalThis & { + Bun?: { + argv?: string[]; + }; +}).Bun; + +if (!bunRuntime?.argv) { + console.error("This script must be run with Bun."); + process.exit(1); +} + +type BuildOptions = { + bundleDir: string | null; + embedRuntime: boolean; + filename: string; + outdir: string; + targets: string[]; +}; + +type RuntimeAssetPaths = { + manifestPath: string; + opencodePath: string; + routerPath: string; +}; + +const TARGET_TRIPLES: Record = { + "bun-darwin-arm64": "aarch64-apple-darwin", + "bun-darwin-x64": "x86_64-apple-darwin", + "bun-darwin-x64-baseline": "x86_64-apple-darwin", + "bun-linux-arm64": "aarch64-unknown-linux-gnu", + "bun-linux-x64": "x86_64-unknown-linux-gnu", + "bun-linux-x64-baseline": "x86_64-unknown-linux-gnu", + "bun-windows-arm64": "aarch64-pc-windows-msvc", + "bun-windows-x64": "x86_64-pc-windows-msvc", + "bun-windows-x64-baseline": "x86_64-pc-windows-msvc", +}; + +function readPackageVersion() { + const packageJsonPath = resolve("package.json"); + const contents = readFileSync(packageJsonPath, "utf8"); + const parsed = JSON.parse(contents) as { version?: unknown }; + const version = typeof parsed.version === "string" ? parsed.version.trim() : ""; + if (!version) { + throw new Error(`Missing package version in ${packageJsonPath}`); + } + return version; +} + +function fileExists(filePath: string) { + try { + return statSync(filePath).isFile(); + } catch { + return false; + } +} + +function readArgs(argv: string[]): BuildOptions { + const options: BuildOptions = { + bundleDir: process.env.OPENWORK_SERVER_V2_BUNDLE_DIR?.trim() ? resolve(process.env.OPENWORK_SERVER_V2_BUNDLE_DIR.trim()) : null, + embedRuntime: false, + filename: "openwork-server-v2", + outdir: resolve("dist", "bin"), + targets: [], + }; + + for (let index = 0; index < argv.length; index += 1) { + const value = argv[index]; + if (!value) continue; + + if (value === "--embed-runtime") { + options.embedRuntime = true; + continue; + } + + if (value === "--bundle-dir") { + const next = argv[index + 1]; + if (next) { + options.bundleDir = resolve(next); + index += 1; + } + continue; + } + + if (value.startsWith("--bundle-dir=")) { + const next = value.slice("--bundle-dir=".length).trim(); + if (next) options.bundleDir = resolve(next); + continue; + } + + if (value === "--target") { + const next = argv[index + 1]; + if (next) { + options.targets.push(next); + index += 1; + } + continue; + } + + if (value.startsWith("--target=")) { + const next = value.slice("--target=".length).trim(); + if (next) options.targets.push(next); + continue; + } + + if (value === "--outdir") { + const next = argv[index + 1]; + if (next) { + options.outdir = resolve(next); + index += 1; + } + continue; + } + + if (value.startsWith("--outdir=")) { + const next = value.slice("--outdir=".length).trim(); + if (next) options.outdir = resolve(next); + continue; + } + + if (value === "--filename") { + const next = argv[index + 1]; + if (next) { + options.filename = next; + index += 1; + } + continue; + } + + if (value.startsWith("--filename=")) { + const next = value.slice("--filename=".length).trim(); + if (next) options.filename = next; + } + } + + return options; +} + +function outputName(filename: string, target?: string) { + const needsExe = target ? target.includes("windows") : process.platform === "win32"; + const suffix = target ? `-${target}` : ""; + const ext = needsExe ? ".exe" : ""; + return `${filename}${suffix}${ext}`; +} + +function runtimeAssetCandidates(bundleDir: string, target?: string): RuntimeAssetPaths { + const triple = target ? TARGET_TRIPLES[target] ?? null : null; + const canonicalManifest = join(bundleDir, "manifest.json"); + const targetManifest = triple ? join(bundleDir, `manifest.json-${triple}`) : null; + const manifestPath = [targetManifest, canonicalManifest].find((candidate) => candidate && fileExists(candidate)) ?? null; + + const opencodeCandidates = [ + triple ? join(bundleDir, `opencode-${triple}${triple.includes("windows") ? ".exe" : ""}`) : null, + join(bundleDir, process.platform === "win32" || target?.includes("windows") ? "opencode.exe" : "opencode"), + ]; + const routerCandidates = [ + triple ? join(bundleDir, `opencode-router-${triple}${triple.includes("windows") ? ".exe" : ""}`) : null, + join(bundleDir, process.platform === "win32" || target?.includes("windows") ? "opencode-router.exe" : "opencode-router"), + ]; + + const opencodePath = opencodeCandidates.find((candidate) => candidate && fileExists(candidate)) ?? null; + const routerPath = routerCandidates.find((candidate) => candidate && fileExists(candidate)) ?? null; + + if (!manifestPath || !opencodePath || !routerPath) { + throw new Error( + `Missing runtime assets for embedded build in ${bundleDir} (target=${target ?? "current"}, manifest=${manifestPath ?? "missing"}, opencode=${opencodePath ?? "missing"}, router=${routerPath ?? "missing"}).`, + ); + } + + return { + manifestPath, + opencodePath, + routerPath, + }; +} + +function createEmbeddedEntrypoint(assets: RuntimeAssetPaths) { + const buildDir = mkdtempSync(join(os.tmpdir(), "openwork-server-v2-build-")); + const embeddedModulePath = join(buildDir, "embedded-runtime.ts"); + const entrypointPath = join(buildDir, "entry.ts"); + + writeFileSync( + embeddedModulePath, + [ + `import manifestPath from ${JSON.stringify(assets.manifestPath)} with { type: "file" };`, + `import opencodePath from ${JSON.stringify(assets.opencodePath)} with { type: "file" };`, + `import routerPath from ${JSON.stringify(assets.routerPath)} with { type: "file" };`, + "", + "export const embeddedRuntimeBundle = {", + " manifestPath,", + " opencodePath,", + " routerPath,", + "};", + "", + ].join("\n"), + "utf8", + ); + + writeFileSync( + entrypointPath, + [ + `import { registerEmbeddedRuntimeBundle } from ${JSON.stringify(resolve("src", "runtime", "embedded.ts"))};`, + `import { embeddedRuntimeBundle } from ${JSON.stringify(embeddedModulePath)};`, + "", + "registerEmbeddedRuntimeBundle(embeddedRuntimeBundle);", + "void (async () => {", + ` await import(${JSON.stringify(resolve("src", "cli.ts"))});`, + "})();", + "", + ].join("\n"), + "utf8", + ); + + return { + cleanup() { + rmSync(buildDir, { force: true, recursive: true }); + }, + entrypointPath, + }; +} + +function buildOnce(options: BuildOptions, target?: string) { + mkdirSync(options.outdir, { recursive: true }); + const outfile = join(options.outdir, outputName(options.filename, target)); + const version = readPackageVersion(); + const embedded = options.embedRuntime + ? createEmbeddedEntrypoint(runtimeAssetCandidates( + options.bundleDir ?? resolve("..", "desktop", "src-tauri", "sidecars"), + target, + )) + : null; + const entrypoint = embedded?.entrypointPath ?? resolve("src", "cli.ts"); + + const args = [ + "build", + entrypoint, + "--compile", + "--minify", + "--bytecode", + "--sourcemap", + "--outfile", + outfile, + "--define", + `__OPENWORK_SERVER_V2_VERSION__=${JSON.stringify(version)}`, + ]; + if (target) { + args.push("--target", target); + } + + const result = spawnSync("bun", args, { stdio: "inherit" }); + embedded?.cleanup(); + if (result.status !== 0) { + process.exit(result.status ?? 1); + } +} + +const options = readArgs(bunRuntime.argv.slice(2)); +const targets = options.targets.length ? options.targets : [undefined]; + +for (const target of targets) { + buildOnce(options, target); +} diff --git a/apps/server-v2/scripts/generate-openapi.ts b/apps/server-v2/scripts/generate-openapi.ts new file mode 100644 index 00000000..00383fe6 --- /dev/null +++ b/apps/server-v2/scripts/generate-openapi.ts @@ -0,0 +1,58 @@ +import { mkdir, mkdtemp, readFile, rm, writeFile } from "node:fs/promises"; +import os from "node:os"; +import { dirname, join, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { createAppDependencies } from "../src/context/app-dependencies.js"; +import { createApp } from "../src/app-factory.js"; +import { resolveServerV2Version } from "../src/version.js"; + +const scriptDir = dirname(fileURLToPath(import.meta.url)); +const packageDir = resolve(scriptDir, ".."); +const outputPath = resolve(packageDir, "openapi/openapi.json"); + +async function writeIfChanged(filePath: string, contents: string) { + try { + const current = await readFile(filePath, "utf8"); + if (current === contents) { + return false; + } + } catch { + // ignore missing file + } + + await mkdir(dirname(filePath), { recursive: true }); + await writeFile(filePath, contents, "utf8"); + return true; +} + +async function main() { + const workingDirectory = await mkdtemp(join(os.tmpdir(), "openwork-server-v2-openapi-")); + const dependencies = createAppDependencies({ + environment: "test", + inMemory: true, + version: resolveServerV2Version(), + workingDirectory, + }); + try { + const app = createApp({ dependencies }); + const response = await app.request("http://openwork.local/openapi.json"); + + if (!response.ok) { + throw new Error(`Failed to generate OpenAPI document: ${response.status} ${response.statusText}`); + } + + const document = await response.json(); + const contents = `${JSON.stringify(document, null, 2)}\n`; + const changed = await writeIfChanged(outputPath, contents); + + process.stdout.write(`[openwork-server-v2] ${changed ? "wrote" : "verified"} ${outputPath}\n`); + } finally { + await dependencies.close(); + await rm(workingDirectory, { force: true, recursive: true }); + } +} + +main().catch((error) => { + process.stderr.write(`${error instanceof Error ? error.stack ?? error.message : String(error)}\n`); + process.exit(1); +}); diff --git a/apps/server-v2/scripts/watch-openapi.mjs b/apps/server-v2/scripts/watch-openapi.mjs new file mode 100644 index 00000000..f1c32fa2 --- /dev/null +++ b/apps/server-v2/scripts/watch-openapi.mjs @@ -0,0 +1,72 @@ +import { spawn } from "node:child_process"; +import { watch } from "node:fs"; +import path from "node:path"; +import process from "node:process"; +import { fileURLToPath } from "node:url"; + +const scriptDir = path.dirname(fileURLToPath(import.meta.url)); +const packageDir = path.resolve(scriptDir, ".."); +const watchedDir = path.join(packageDir, "src"); + +let activeChild = null; +let queued = false; +let timer = null; + +function runGenerate() { + if (activeChild) { + queued = true; + return; + } + + activeChild = spawn("bun", ["./scripts/generate-openapi.ts"], { + cwd: packageDir, + env: process.env, + stdio: "inherit", + }); + + activeChild.once("exit", (code) => { + activeChild = null; + + if (code && code !== 0) { + process.stderr.write(`[openwork-server-v2] OpenAPI generation failed with exit code ${code}.\n`); + } + + if (queued) { + queued = false; + scheduleGenerate(); + } + }); +} + +function scheduleGenerate() { + if (timer) { + clearTimeout(timer); + } + + timer = setTimeout(() => { + timer = null; + runGenerate(); + }, 120); +} + +runGenerate(); + +const watcher = watch(watchedDir, { recursive: true }, (_eventType, filename) => { + if (!filename || String(filename).includes(".DS_Store")) { + return; + } + + scheduleGenerate(); +}); + +for (const signal of ["SIGINT", "SIGTERM"]) { + process.on(signal, () => { + watcher.close(); + + if (activeChild && activeChild.exitCode === null) { + activeChild.kill("SIGTERM"); + } + + process.exit(0); + }); +} diff --git a/apps/server-v2/src/adapters/opencode/local.ts b/apps/server-v2/src/adapters/opencode/local.ts new file mode 100644 index 00000000..e2906022 --- /dev/null +++ b/apps/server-v2/src/adapters/opencode/local.ts @@ -0,0 +1,289 @@ +import { createOpencodeClient } from "@opencode-ai/sdk/v2/client"; +import { createBoundedOutputCollector, formatRuntimeOutput, type RuntimeOutputSnapshot } from "../../runtime/output-buffer.js"; + +type LocalOpencodeClient = ReturnType; + +export type CreateLocalOpencodeOptions = { + binary?: string; + client?: { + directory?: string; + fetch?: typeof fetch; + headers?: Record; + responseStyle?: "data"; + throwOnError?: boolean; + }; + config?: Record; + cwd?: string; + env?: Record; + hostname?: string; + port?: number; + signal?: AbortSignal; + timeout?: number; +}; + +export type LocalProcessExit = { + at: string; + code: number | null; + signal: string | null; +}; + +export type LocalOpencodeHandle = { + client: LocalOpencodeClient; + server: { + close(): void; + getOutput(): RuntimeOutputSnapshot; + proc: Bun.Subprocess<"ignore", "pipe", "pipe">; + url: string; + waitForExit(): Promise; + }; +}; + +export class LocalOpencodeStartupError extends Error { + constructor( + message: string, + readonly code: "aborted" | "early_exit" | "missing_binary" | "spawn_failed" | "timeout", + readonly binary: string, + readonly output: RuntimeOutputSnapshot, + ) { + super(message); + this.name = "LocalOpencodeStartupError"; + } +} + +function normalizeBinary(binary: string | undefined) { + const value = binary?.trim() ?? ""; + if (!value) { + throw new LocalOpencodeStartupError( + "Failed to start OpenCode: no explicit binary path was provided.", + "missing_binary", + value, + { combined: [], stderr: [], stdout: [], totalLines: 0, truncated: false }, + ); + } + return value; +} + +function parseReadinessUrl(line: string) { + const match = line.match(/https?:\/\/\S+/); + return match?.[0] ?? null; +} + +function buildSpawnErrorMessage(binary: string, error: unknown) { + const text = error instanceof Error ? error.message : String(error); + if (text.includes("ENOENT") || text.includes("executable file not found") || text.includes("No such file")) { + return `Failed to start OpenCode: executable not found at ${binary}`; + } + return `Failed to start OpenCode from ${binary}: ${text}`; +} + +export async function createLocalOpencode(options: CreateLocalOpencodeOptions = {}): Promise { + const binary = normalizeBinary(options.binary); + const hostname = options.hostname ?? "127.0.0.1"; + const port = options.port ?? 4096; + const timeoutMs = options.timeout ?? 5_000; + let resolveReady: ((url: string) => void) | null = null; + const output = createBoundedOutputCollector({ + maxBytes: 16_384, + maxLines: 200, + onLine(line) { + const readinessUrl = parseReadinessUrl(line.text); + if (readinessUrl && /listening/i.test(line.text)) { + resolveReady?.(readinessUrl); + } + }, + }); + + const args = [ + binary, + "serve", + `--hostname=${hostname}`, + `--port=${port}`, + ]; + + if (typeof options.config?.logLevel === "string" && options.config.logLevel.trim()) { + args.push(`--log-level=${options.config.logLevel.trim()}`); + } + + let proc: Bun.Subprocess<"ignore", "pipe", "pipe">; + try { + proc = Bun.spawn(args, { + cwd: options.cwd, + env: { + ...process.env, + ...options.env, + OPENCODE_CONFIG_CONTENT: JSON.stringify(options.config ?? {}), + }, + stderr: "pipe", + stdin: "ignore", + stdout: "pipe", + }); + } catch (error) { + throw new LocalOpencodeStartupError(buildSpawnErrorMessage(binary, error), "spawn_failed", binary, output.snapshot()); + } + + let settled = false; + let timeoutHandle: ReturnType | null = null; + let abortListener: (() => void) | null = null; + + const waitForExit = async (): Promise => { + const code = await proc.exited; + return { + at: new Date().toISOString(), + code, + signal: "signalCode" in proc && typeof proc.signalCode === "string" ? proc.signalCode : null, + }; + }; + + const pump = async (streamName: "stdout" | "stderr", stream: ReadableStream | null) => { + if (!stream) { + return; + } + + const reader = stream.getReader(); + const decoder = new TextDecoder(); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + output.finish(streamName); + return; + } + const text = decoder.decode(value, { stream: true }); + output.pushChunk(streamName, text); + } + } finally { + output.finish(streamName); + reader.releaseLock(); + } + }; + + const startup = await new Promise<{ client: LocalOpencodeClient; url: string }>((resolve, reject) => { + const rejectOnce = (error: LocalOpencodeStartupError) => { + if (settled) { + return; + } + settled = true; + if (timeoutHandle) { + clearTimeout(timeoutHandle); + } + if (abortListener && options.signal) { + options.signal.removeEventListener("abort", abortListener); + } + reject(error); + }; + + const resolveOnce = (url: string) => { + if (settled) { + return; + } + settled = true; + if (timeoutHandle) { + clearTimeout(timeoutHandle); + } + if (abortListener && options.signal) { + options.signal.removeEventListener("abort", abortListener); + } + resolve({ + client: createOpencodeClient({ + baseUrl: url, + directory: options.client?.directory, + fetch: options.client?.fetch, + headers: options.client?.headers, + responseStyle: options.client?.responseStyle ?? "data", + throwOnError: options.client?.throwOnError ?? true, + }), + url, + }); + }; + + resolveReady = resolveOnce; + void pump("stdout", proc.stdout); + void pump("stderr", proc.stderr); + + void waitForExit().then((exit) => { + if (settled) { + return; + } + + const snapshot = output.snapshot(); + rejectOnce( + new LocalOpencodeStartupError( + `OpenCode exited before becoming ready (${exit.code === null ? "no exit code" : `exit code ${exit.code}`}).\nCollected output:\n${formatRuntimeOutput(snapshot)}`, + "early_exit", + binary, + snapshot, + ), + ); + }); + + timeoutHandle = setTimeout(() => { + if (settled) { + return; + } + + proc.kill(); + const snapshot = output.snapshot(); + rejectOnce( + new LocalOpencodeStartupError( + `OpenCode did not become ready within ${timeoutMs}ms.\nCollected output:\n${formatRuntimeOutput(snapshot)}`, + "timeout", + binary, + snapshot, + ), + ); + }, timeoutMs); + + if (options.signal) { + if (options.signal.aborted) { + proc.kill(); + rejectOnce( + new LocalOpencodeStartupError( + `OpenCode startup aborted for ${binary}.`, + "aborted", + binary, + output.snapshot(), + ), + ); + return; + } + + abortListener = () => { + proc.kill(); + rejectOnce( + new LocalOpencodeStartupError( + `OpenCode startup aborted for ${binary}.`, + "aborted", + binary, + output.snapshot(), + ), + ); + }; + options.signal.addEventListener("abort", abortListener, { once: true }); + } + }).catch((error) => { + if (!settled) { + proc.kill(); + } + + if (error instanceof LocalOpencodeStartupError) { + throw error; + } + + throw new LocalOpencodeStartupError(buildSpawnErrorMessage(binary, error), "spawn_failed", binary, output.snapshot()); + }); + + return { + client: startup.client, + server: { + close() { + proc.kill(); + }, + getOutput() { + return output.snapshot(); + }, + proc, + url: startup.url, + waitForExit, + }, + }; +} diff --git a/apps/server-v2/src/adapters/process-info.ts b/apps/server-v2/src/adapters/process-info.ts new file mode 100644 index 00000000..4d6d69b6 --- /dev/null +++ b/apps/server-v2/src/adapters/process-info.ts @@ -0,0 +1,21 @@ +import os from "node:os"; + +export type ProcessInfoAdapter = { + environment: string; + hostname: string; + pid: number; + platform: NodeJS.Platform; + runtime: "bun"; + runtimeVersion: string | null; +}; + +export function createProcessInfoAdapter(environment: string = process.env.NODE_ENV ?? "development"): ProcessInfoAdapter { + return { + environment, + hostname: os.hostname(), + pid: process.pid, + platform: process.platform, + runtime: "bun", + runtimeVersion: globalThis.Bun?.version ?? null, + }; +} diff --git a/apps/server-v2/src/adapters/remote-openwork.ts b/apps/server-v2/src/adapters/remote-openwork.ts new file mode 100644 index 00000000..0a72cd2d --- /dev/null +++ b/apps/server-v2/src/adapters/remote-openwork.ts @@ -0,0 +1,158 @@ +import { HTTPException } from "hono/http-exception"; +import type { ServerRecord, WorkspaceRecord } from "../database/types.js"; +import { RouteError } from "../http.js"; + +function encodeBasicAuth(username: string, password: string) { + return Buffer.from(`${username}:${password}`, "utf8").toString("base64"); +} + +function pickString(record: Record | null | undefined, keys: string[]) { + for (const key of keys) { + const value = record?.[key]; + if (typeof value === "string" && value.trim()) { + return value.trim(); + } + } + return null; +} + +export function buildRemoteOpenworkHeaders(server: ServerRecord) { + const auth = server.auth && typeof server.auth === "object" ? server.auth as Record : null; + const headers: Record = { + Accept: "application/json", + }; + const bearer = pickString(auth, ["openworkClientToken", "openworkToken", "authToken", "token", "bearerToken"]); + const hostToken = pickString(auth, ["openworkHostToken", "hostToken"]); + const username = pickString(auth, ["username", "user"]); + const password = pickString(auth, ["password", "pass"]); + + if (bearer) { + headers.Authorization = `Bearer ${bearer}`; + } else if (username && password) { + headers.Authorization = `Basic ${encodeBasicAuth(username, password)}`; + } + + if (hostToken) { + headers["X-OpenWork-Host-Token"] = hostToken; + } + + return headers; +} + +function normalizeBaseUrl(value: string) { + return value.replace(/\/+$/, ""); +} + +function unwrapEnvelope(payload: unknown): T { + if (payload && typeof payload === "object" && "ok" in (payload as Record)) { + const record = payload as Record; + if (record.ok === true && "data" in record) { + return record.data as T; + } + if (record.ok === false) { + const error = record.error && typeof record.error === "object" ? record.error as Record : {}; + const code = typeof error.code === "string" ? error.code : "bad_gateway"; + const message = typeof error.message === "string" ? error.message : "Remote OpenWork request failed."; + throw new RouteError(502, code as any, message); + } + } + return payload as T; +} + +export function resolveRemoteWorkspaceTarget(server: ServerRecord, workspace: WorkspaceRecord) { + const serverBaseUrl = server.baseUrl?.trim(); + if (!serverBaseUrl) { + throw new RouteError(502, "bad_gateway", `Remote server ${server.id} is missing a base URL.`); + } + const remoteWorkspaceId = workspace.remoteWorkspaceId?.trim(); + if (!remoteWorkspaceId) { + throw new RouteError(502, "bad_gateway", `Remote workspace ${workspace.id} is missing a remote workspace identifier.`); + } + return { + remoteWorkspaceId, + serverBaseUrl: normalizeBaseUrl(serverBaseUrl), + }; +} + +export async function requestRemoteOpenwork(input: { + body?: unknown; + method?: string; + path: string; + server: ServerRecord; + timeoutMs?: number; +}): Promise { + const baseUrl = input.server.baseUrl?.trim(); + if (!baseUrl) { + throw new RouteError(502, "bad_gateway", `Remote server ${input.server.id} is missing a base URL.`); + } + + const response = await fetch(`${normalizeBaseUrl(baseUrl)}${input.path}`, { + body: input.body === undefined ? undefined : JSON.stringify(input.body), + headers: { + ...buildRemoteOpenworkHeaders(input.server), + ...(input.body === undefined ? {} : { "Content-Type": "application/json" }), + }, + method: input.method ?? (input.body === undefined ? "GET" : "POST"), + signal: AbortSignal.timeout(input.timeoutMs ?? 10_000), + }); + + const text = await response.text(); + const payload = text.trim() ? JSON.parse(text) : null; + + if (response.status === 404) { + throw new HTTPException(404, { message: typeof (payload as any)?.message === "string" ? (payload as any).message : "Remote resource not found." }); + } + if (response.status === 401) { + throw new RouteError(502, "bad_gateway", "Remote OpenWork server rejected the stored credentials."); + } + if (response.status === 403) { + throw new RouteError(502, "bad_gateway", "Remote OpenWork server rejected the stored permissions."); + } + if (!response.ok) { + const message = typeof (payload as any)?.error?.message === "string" + ? (payload as any).error.message + : typeof (payload as any)?.message === "string" + ? (payload as any).message + : `Remote OpenWork request failed with status ${response.status}.`; + throw new RouteError(502, "bad_gateway", message); + } + + return unwrapEnvelope(payload); +} + +export async function requestRemoteOpenworkRaw(input: { + body?: BodyInit | null; + contentType?: string | null; + method?: string; + path: string; + server: ServerRecord; + timeoutMs?: number; +}) { + const baseUrl = input.server.baseUrl?.trim(); + if (!baseUrl) { + throw new RouteError(502, "bad_gateway", `Remote server ${input.server.id} is missing a base URL.`); + } + + const response = await fetch(`${normalizeBaseUrl(baseUrl)}${input.path}`, { + body: input.body ?? undefined, + headers: { + ...buildRemoteOpenworkHeaders(input.server), + ...(input.contentType ? { "Content-Type": input.contentType } : {}), + }, + method: input.method ?? (input.body ? "POST" : "GET"), + signal: AbortSignal.timeout(input.timeoutMs ?? 15_000), + }); + + if (response.status === 404) { + throw new HTTPException(404, { message: "Remote resource not found." }); + } + if (response.status === 401 || response.status === 403) { + throw new RouteError(502, "bad_gateway", "Remote OpenWork server rejected the stored credentials."); + } + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new RouteError(502, "bad_gateway", text.trim() || `Remote OpenWork request failed with status ${response.status}.`); + } + + return response; +} diff --git a/apps/server-v2/src/adapters/sessions/local-opencode.ts b/apps/server-v2/src/adapters/sessions/local-opencode.ts new file mode 100644 index 00000000..70b11ec5 --- /dev/null +++ b/apps/server-v2/src/adapters/sessions/local-opencode.ts @@ -0,0 +1,23 @@ +import { RouteError } from "../../http.js"; +import type { RuntimeService } from "../../services/runtime-service.js"; +import type { WorkspaceRecord } from "../../database/types.js"; +import { createOpenCodeSessionBackend } from "./opencode-backend.js"; + +export function createLocalOpencodeSessionAdapter(input: { + runtime: RuntimeService; + workspace: WorkspaceRecord; +}) { + const runtimeHealth = input.runtime.getOpencodeHealth(); + if (!runtimeHealth.baseUrl || !runtimeHealth.running) { + throw new RouteError( + 503, + "service_unavailable", + "Local OpenCode runtime is not available for session operations.", + ); + } + + return createOpenCodeSessionBackend({ + baseUrl: runtimeHealth.baseUrl, + directory: input.workspace.dataDir, + }); +} diff --git a/apps/server-v2/src/adapters/sessions/opencode-backend.ts b/apps/server-v2/src/adapters/sessions/opencode-backend.ts new file mode 100644 index 00000000..7ddf86be --- /dev/null +++ b/apps/server-v2/src/adapters/sessions/opencode-backend.ts @@ -0,0 +1,293 @@ +import { createOpencodeClient } from "@opencode-ai/sdk/v2/client"; +import type { + SessionMessageRecord, + SessionRecord, + SessionSnapshotRecord, + SessionStatusRecord, + SessionTodoRecord, + WorkspaceEventRecord, +} from "../../schemas/sessions.js"; +import { + parseSessionData, + parseSessionListData, + parseSessionMessageData, + parseSessionMessagesData, + parseSessionStatusesData, + parseSessionTodosData, + parseWorkspaceEventData, +} from "../../schemas/sessions.js"; + +export class OpenCodeBackendError extends Error { + constructor( + readonly status: number, + readonly code: string, + message: string, + readonly details?: unknown, + ) { + super(message); + this.name = "OpenCodeBackendError"; + } +} + +type OpenCodeBackendOptions = { + baseUrl: string; + directory?: string | null; + headers?: Record; +}; + +type RequestOptions = { + body?: unknown; + method?: string; + query?: Record; + signal?: AbortSignal; +}; + +function buildDirectoryHeader(directory?: string | null) { + const trimmed = directory?.trim() ?? ""; + if (!trimmed) { + return null; + } + return /[^\x00-\x7F]/.test(trimmed) ? encodeURIComponent(trimmed) : trimmed; +} + +function buildUrl(baseUrl: string, path: string, query?: RequestOptions["query"]) { + const url = new URL(path, `${baseUrl.replace(/\/+$/, "")}/`); + for (const [key, value] of Object.entries(query ?? {})) { + if (value === undefined) { + continue; + } + url.searchParams.set(key, String(value)); + } + return url; +} + +async function parseJsonResponse(response: Response) { + const text = await response.text(); + if (!text.trim()) { + return null; + } + try { + return JSON.parse(text) as unknown; + } catch { + return text; + } +} + +function toBackendError(response: Response, payload: unknown) { + const record = payload && typeof payload === "object" ? payload as Record : null; + const code = typeof record?.code === "string" ? record.code : "opencode_request_failed"; + const message = typeof record?.message === "string" ? record.message : response.statusText || "OpenCode request failed."; + const details = record?.details; + return new OpenCodeBackendError(response.status, code, message, details); +} + +export type OpenCodeSessionBackend = ReturnType; + +export function createOpenCodeSessionBackend(options: OpenCodeBackendOptions) { + const normalizedBaseUrl = options.baseUrl.replace(/\/+$/, ""); + const baseHeaders = { ...(options.headers ?? {}) }; + const directoryHeader = buildDirectoryHeader(options.directory); + if (directoryHeader) { + baseHeaders["x-opencode-directory"] = directoryHeader; + } + + const eventClient = createOpencodeClient({ + baseUrl: normalizedBaseUrl, + directory: options.directory ?? undefined, + headers: Object.keys(baseHeaders).length ? baseHeaders : undefined, + responseStyle: "data", + throwOnError: true, + }); + + async function requestJson(path: string, request: RequestOptions = {}) { + const url = buildUrl(normalizedBaseUrl, path, request.query); + const response = await fetch(url, { + method: request.method ?? "GET", + headers: { + ...(request.body !== undefined ? { "Content-Type": "application/json" } : {}), + ...baseHeaders, + }, + body: request.body !== undefined ? JSON.stringify(request.body) : undefined, + signal: request.signal, + }); + + const payload = await parseJsonResponse(response); + if (!response.ok) { + throw toBackendError(response, payload); + } + return payload; + } + + async function requestVoid(path: string, request: RequestOptions = {}) { + const url = buildUrl(normalizedBaseUrl, path, request.query); + const response = await fetch(url, { + method: request.method ?? "POST", + headers: { + ...(request.body !== undefined ? { "Content-Type": "application/json" } : {}), + ...baseHeaders, + }, + body: request.body !== undefined ? JSON.stringify(request.body) : undefined, + signal: request.signal, + }); + + if (!response.ok) { + throw toBackendError(response, await parseJsonResponse(response)); + } + } + + return { + async abortSession(sessionId: string) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/abort`, { method: "POST" }); + }, + + async command(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/command`, { body, method: "POST" }); + }, + + async createSession(body: Record) { + return parseSessionData(await requestJson("/session", { body, method: "POST" })); + }, + + async deleteMessage(sessionId: string, messageId: string) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/message/${encodeURIComponent(messageId)}`, { + method: "DELETE", + }); + }, + + async deleteMessagePart(sessionId: string, messageId: string, partId: string) { + await requestVoid( + `/session/${encodeURIComponent(sessionId)}/message/${encodeURIComponent(messageId)}/part/${encodeURIComponent(partId)}`, + { method: "DELETE" }, + ); + }, + + async deleteSession(sessionId: string) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}`, { method: "DELETE" }); + }, + + async forkSession(sessionId: string, body: Record) { + return parseSessionData(await requestJson(`/session/${encodeURIComponent(sessionId)}/fork`, { body, method: "POST" })); + }, + + async getMessage(sessionId: string, messageId: string) { + return parseSessionMessageData( + await requestJson(`/session/${encodeURIComponent(sessionId)}/message/${encodeURIComponent(messageId)}`), + ); + }, + + async getSession(sessionId: string) { + return parseSessionData(await requestJson(`/session/${encodeURIComponent(sessionId)}`)); + }, + + async getSessionSnapshot(sessionId: string, input?: { limit?: number }) { + const [session, messages, todos, statuses] = await Promise.all([ + this.getSession(sessionId), + this.listMessages(sessionId, input), + this.listTodos(sessionId), + this.listStatuses(), + ]); + + return { + messages, + session, + status: statuses[sessionId] ?? { type: "idle" }, + todos, + } satisfies SessionSnapshotRecord; + }, + + async initSession(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/init`, { body, method: "POST" }); + }, + + async listMessages(sessionId: string, input?: { limit?: number }) { + return parseSessionMessagesData( + await requestJson(`/session/${encodeURIComponent(sessionId)}/message`, { + query: { limit: input?.limit }, + }), + ); + }, + + async listSessions(input?: { limit?: number; roots?: boolean; search?: string; start?: number }) { + return parseSessionListData(await requestJson("/session", { query: input })); + }, + + async listStatuses() { + return parseSessionStatusesData(await requestJson("/session/status")); + }, + + async listTodos(sessionId: string) { + return parseSessionTodosData(await requestJson(`/session/${encodeURIComponent(sessionId)}/todo`)); + }, + + async promptAsync(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/prompt_async`, { body, method: "POST" }); + }, + + async revert(sessionId: string, body: { messageID: string }) { + return parseSessionData(await requestJson(`/session/${encodeURIComponent(sessionId)}/revert`, { body, method: "POST" })); + }, + + async sendMessage(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/message`, { body, method: "POST" }); + }, + + async shareSession(sessionId: string) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/share`, { method: "POST" }); + }, + + async shell(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/shell`, { body, method: "POST" }); + }, + + async streamEvents(signal?: AbortSignal): Promise> { + const subscription = await eventClient.event.subscribe(undefined, { signal }); + const source = subscription.stream as AsyncIterable; + const iterator = async function* () { + for await (const event of source) { + if (!event || typeof event !== "object") { + continue; + } + + const record = event as Record; + if (typeof record.type === "string") { + yield parseWorkspaceEventData({ + properties: record.properties, + type: record.type, + }); + continue; + } + + const payload = record.payload; + if (payload && typeof payload === "object" && typeof (payload as Record).type === "string") { + yield parseWorkspaceEventData(payload); + } + } + }; + + return iterator(); + }, + + async summarizeSession(sessionId: string, body: Record) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/summarize`, { body, method: "POST" }); + }, + + async unshareSession(sessionId: string) { + await requestVoid(`/session/${encodeURIComponent(sessionId)}/share`, { method: "DELETE" }); + }, + + async unrevert(sessionId: string) { + return parseSessionData(await requestJson(`/session/${encodeURIComponent(sessionId)}/unrevert`, { method: "POST" })); + }, + + async updateMessagePart(sessionId: string, messageId: string, partId: string, body: Record) { + await requestVoid( + `/session/${encodeURIComponent(sessionId)}/message/${encodeURIComponent(messageId)}/part/${encodeURIComponent(partId)}`, + { body, method: "PATCH" }, + ); + }, + + async updateSession(sessionId: string, body: Record) { + return parseSessionData(await requestJson(`/session/${encodeURIComponent(sessionId)}`, { body, method: "PATCH" })); + }, + }; +} diff --git a/apps/server-v2/src/adapters/sessions/remote-openwork.ts b/apps/server-v2/src/adapters/sessions/remote-openwork.ts new file mode 100644 index 00000000..1ad6452f --- /dev/null +++ b/apps/server-v2/src/adapters/sessions/remote-openwork.ts @@ -0,0 +1,33 @@ +import { RouteError } from "../../http.js"; +import type { ServerRecord, WorkspaceRecord } from "../../database/types.js"; +import { createOpenCodeSessionBackend } from "./opencode-backend.js"; +import { buildRemoteOpenworkHeaders } from "../remote-openwork.js"; + +export function createRemoteOpenworkSessionAdapter(input: { + server: ServerRecord; + workspace: WorkspaceRecord; +}) { + if (!input.server.baseUrl) { + throw new RouteError(502, "bad_gateway", "Remote workspace server is missing a base URL."); + } + + const remoteType = input.workspace.notes?.remoteType === "opencode" ? "opencode" : "openwork"; + const remoteWorkspaceId = input.workspace.remoteWorkspaceId?.trim() ?? ""; + + if (remoteType === "openwork") { + if (!remoteWorkspaceId) { + throw new RouteError(502, "bad_gateway", "Remote OpenWork workspace is missing its remote workspace identifier."); + } + + return createOpenCodeSessionBackend({ + baseUrl: `${input.server.baseUrl.replace(/\/+$/, "")}/w/${encodeURIComponent(remoteWorkspaceId)}/opencode`, + headers: buildRemoteOpenworkHeaders(input.server), + }); + } + + return createOpenCodeSessionBackend({ + baseUrl: input.server.baseUrl, + directory: typeof input.workspace.notes?.directory === "string" ? input.workspace.notes.directory : undefined, + headers: buildRemoteOpenworkHeaders(input.server), + }); +} diff --git a/apps/server-v2/src/app-factory.ts b/apps/server-v2/src/app-factory.ts new file mode 100644 index 00000000..da78b8ef --- /dev/null +++ b/apps/server-v2/src/app-factory.ts @@ -0,0 +1,44 @@ +import { Hono } from "hono"; +import type { AppDependencies } from "./context/app-dependencies.js"; +import { createAppDependencies } from "./context/app-dependencies.js"; +import type { AppBindings } from "./context/request-context.js"; +import { requestContextMiddleware } from "./context/request-context.js"; +import { buildErrorResponse } from "./http.js"; +import { errorHandlingMiddleware } from "./middleware/error-handler.js"; +import { requestLoggerMiddleware } from "./middleware/request-logger.js"; +import { requestIdMiddleware } from "./middleware/request-id.js"; +import { responseFinalizerMiddleware } from "./middleware/response-finalizer.js"; +import { registerRoutes } from "./routes/index.js"; + +export type CreateAppOptions = { + dependencies?: AppDependencies; +}; + +export function createApp(options: CreateAppOptions = {}) { + const dependencies = options.dependencies ?? createAppDependencies(); + const app = new Hono(); + + app.use("*", requestIdMiddleware); + app.use("*", requestContextMiddleware(dependencies)); + app.use("*", responseFinalizerMiddleware); + app.use("*", requestLoggerMiddleware); + app.use("*", errorHandlingMiddleware); + + registerRoutes(app, dependencies); + + app.notFound((c) => { + const requestId = c.get("requestId"); + return c.json( + buildErrorResponse({ + requestId, + code: "not_found", + message: `Route not found: ${new URL(c.req.url).pathname}`, + }), + 404, + ); + }); + + return app; +} + +export type AppType = ReturnType; diff --git a/apps/server-v2/src/app.test.ts b/apps/server-v2/src/app.test.ts new file mode 100644 index 00000000..8bf663bd --- /dev/null +++ b/apps/server-v2/src/app.test.ts @@ -0,0 +1,338 @@ +import { afterEach, expect, test } from "bun:test"; +import { createApp } from "./app.js"; +import { createAppDependencies } from "./context/app-dependencies.js"; + +afterEach(() => { + delete process.env.OPENWORK_TOKEN; + delete process.env.OPENWORK_HOST_TOKEN; +}); + +function createTestApp(options?: { requireAuth?: boolean; seedRegistry?: boolean }) { + if (options?.requireAuth) { + process.env.OPENWORK_TOKEN = "client-token"; + process.env.OPENWORK_HOST_TOKEN = "host-token"; + } + + const dependencies = createAppDependencies({ + environment: "test", + inMemory: true, + legacy: { + desktopDataDir: `/tmp/openwork-server-v2-test-desktop-${Math.random().toString(16).slice(2)}`, + orchestratorDataDir: `/tmp/openwork-server-v2-test-orchestrator-${Math.random().toString(16).slice(2)}`, + }, + runtime: { + bootstrapPolicy: "disabled", + }, + startedAt: new Date("2026-04-14T00:00:00.000Z"), + version: "0.0.0-test", + }); + + if (options?.seedRegistry) { + dependencies.persistence.registry.importLocalWorkspace({ + dataDir: "/tmp/openwork-phase5-local", + displayName: "Alpha Local", + status: "ready", + }); + dependencies.persistence.registry.importRemoteWorkspace({ + baseUrl: "https://remote.example.com/w/alpha", + directory: "/srv/remote-alpha", + displayName: "Remote Alpha", + legacyNotes: { + source: "test", + }, + remoteType: "openwork", + remoteWorkspaceId: "alpha", + serverAuth: { openworkToken: "secret" }, + serverBaseUrl: "https://remote.example.com", + serverHostingKind: "self_hosted", + serverLabel: "remote.example.com", + workspaceStatus: "ready", + }); + } + + return { + app: createApp({ dependencies }), + dependencies, + }; +} + +test("root info uses the shared success envelope and route conventions", async () => { + const { app } = createTestApp(); + const response = await app.request("http://openwork.local/"); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(response.headers.get("x-request-id")).toBe(body.meta.requestId); + expect(body).toMatchObject({ + ok: true, + data: { + service: "openwork-server-v2", + routes: { + system: "/system", + workspaces: "/workspaces", + workspaceResource: "/workspaces/:workspaceId", + }, + contract: { + source: "hono-openapi", + sdkPackage: "@openwork/server-sdk", + }, + }, + }); +}); + +test("system health returns a consistent envelope", async () => { + const { app } = createTestApp(); + const response = await app.request("http://openwork.local/system/health"); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(body.ok).toBe(true); + expect(body.data.status).toBe("ok"); + expect(body.data.database.kind).toBe("sqlite"); + expect(["ready", "warning"]).toContain(body.data.database.status); +}); + +test("system metadata includes phase 10 registry, runtime, and cutover state", async () => { + const { app } = createTestApp(); + const response = await app.request("http://openwork.local/system/meta"); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(body.data.foundation.phase).toBe(10); + expect(body.data.foundation.startup.registry.localServerId).toBe("srv_local"); + expect(body.data.foundation.startup.registry.hiddenWorkspaceIds).toHaveLength(2); + expect(body.data.runtimeSupervisor.bootstrapPolicy).toBe("disabled"); +}); + +test("openapi route is generated from the live Hono app", async () => { + const { app } = createTestApp(); + const response = await app.request("http://openwork.local/openapi.json"); + const document = await response.json(); + + expect(response.status).toBe(200); + expect(document.openapi).toBe("3.1.0"); + expect(document.info.title).toBe("OpenWork Server V2"); + expect(document.paths["/system/health"].get.operationId).toBe("getSystemHealth"); + expect(document.paths["/system/meta"].get.operationId).toBe("getSystemMeta"); + expect(document.paths["/system/capabilities"].get.operationId).toBe("getSystemCapabilities"); + expect(document.paths["/system/status"].get.operationId).toBe("getSystemStatus"); + expect(document.paths["/system/opencode/health"].get.operationId).toBe("getSystemOpencodeHealth"); + expect(document.paths["/system/runtime/versions"].get.operationId).toBe("getSystemRuntimeVersions"); + expect(document.paths["/system/runtime/upgrade"].post.operationId).toBe("postSystemRuntimeUpgrade"); + expect(document.paths["/system/servers/connect"].post.operationId).toBe("postSystemServersConnect"); + expect(document.paths["/workspaces"].get.operationId).toBe("getWorkspaces"); + expect(document.paths["/workspaces/local"].post.operationId).toBe("postWorkspacesLocal"); + expect(document.paths["/workspaces/{workspaceId}/config"].get.operationId).toBe("getWorkspacesByWorkspaceIdConfig"); + expect(document.paths["/system/cloud-signin"].get.operationId).toBe("getSystemCloudSignin"); + expect(document.paths["/system/managed/mcps"].get.operationId).toBe("getSystemManagedMcps"); + expect(document.paths["/system/router/identities/telegram"].get.operationId).toBe("getSystemRouterIdentitiesTelegram"); + expect(document.paths["/workspaces/{workspaceId}/export"].get.operationId).toBe("getWorkspacesByWorkspaceIdExport"); + expect(document.paths["/workspaces/{workspaceId}/reload-events"].get.operationId).toBe("getWorkspacesByWorkspaceIdReloadEvents"); + expect(document.paths["/workspaces/{workspaceId}/sessions"].get.operationId).toBe("getWorkspacesByWorkspaceIdSessions"); + expect(document.paths["/workspaces/{workspaceId}/events"].get.operationId).toBe("getWorkspacesByWorkspaceIdEvents"); +}); + +test("runtime routes expose the initial server-owned status surfaces", async () => { + const { app } = createTestApp(); + + const [opencodeResponse, routerResponse, runtimeResponse] = await Promise.all([ + app.request("http://openwork.local/system/opencode/health"), + app.request("http://openwork.local/system/router/health"), + app.request("http://openwork.local/system/runtime/summary"), + ]); + + const opencodeBody = await opencodeResponse.json(); + const routerBody = await routerResponse.json(); + const runtimeBody = await runtimeResponse.json(); + + expect(opencodeResponse.status).toBe(200); + expect(opencodeBody.data.status).toBe("disabled"); + expect(routerBody.data.status).toBe("disabled"); + expect(runtimeBody.data.bootstrapPolicy).toBe("disabled"); +}); + +test("not found routes use the shared error envelope", async () => { + const { app } = createTestApp(); + const response = await app.request("http://openwork.local/nope"); + const body = await response.json(); + + expect(response.status).toBe(404); + expect(response.headers.get("x-request-id")).toBe(body.error.requestId); + expect(body).toMatchObject({ + ok: false, + error: { + code: "not_found", + }, + }); +}); + +test("system status reports registry summary and capabilities", async () => { + const { app } = createTestApp({ seedRegistry: true }); + const response = await app.request("http://openwork.local/system/status"); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(body.data.registry).toMatchObject({ + hiddenWorkspaceCount: 2, + remoteServerCount: 1, + totalServers: 2, + visibleWorkspaceCount: 2, + }); + expect(body.data.capabilities.transport.v2).toBe(true); + expect(body.data.capabilities.registry.remoteServerConnections).toBe(true); + expect(body.data.auth.required).toBe(false); +}); + +test("workspace list excludes hidden workspaces by default", async () => { + const { app } = createTestApp({ seedRegistry: true }); + const response = await app.request("http://openwork.local/workspaces"); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(body.data.items).toHaveLength(2); + expect(body.data.items.map((item: any) => item.displayName).sort()).toEqual(["Alpha Local", "Remote Alpha"]); + expect(body.data.items.find((item: any) => item.displayName === "Remote Alpha")?.backend.kind).toBe("remote_openwork"); +}); + +test("workspace detail hides internal workspaces from non-host readers", async () => { + const { app, dependencies } = createTestApp({ requireAuth: true, seedRegistry: true }); + const hiddenWorkspaceId = dependencies.persistence.registry.ensureHiddenWorkspace("control").id; + + const clientResponse = await app.request(`http://openwork.local/workspaces/${hiddenWorkspaceId}`, { + headers: { + Authorization: "Bearer client-token", + }, + }); + const hostResponse = await app.request(`http://openwork.local/workspaces/${hiddenWorkspaceId}`, { + headers: { + "X-OpenWork-Host-Token": "host-token", + }, + }); + + expect(clientResponse.status).toBe(404); + expect(hostResponse.status).toBe(200); +}); + +test("auth-protected registry reads require client or host scope", async () => { + const { app } = createTestApp({ requireAuth: true, seedRegistry: true }); + + const anonymous = await app.request("http://openwork.local/workspaces"); + const client = await app.request("http://openwork.local/workspaces", { + headers: { + Authorization: "Bearer client-token", + }, + }); + const clientHidden = await app.request("http://openwork.local/workspaces?includeHidden=true", { + headers: { + Authorization: "Bearer client-token", + }, + }); + const hostInventory = await app.request("http://openwork.local/system/servers", { + headers: { + "X-OpenWork-Host-Token": "host-token", + }, + }); + + expect(anonymous.status).toBe(401); + expect(client.status).toBe(200); + expect(clientHidden.status).toBe(403); + expect(hostInventory.status).toBe(200); +}); + +test("host-scoped remote server connect syncs remote workspaces into the local registry", async () => { + const remote = Bun.serve({ + fetch(request) { + const url = new URL(request.url); + if (url.pathname === "/workspaces") { + return Response.json({ + ok: true, + data: { + items: [ + { + backend: { + kind: "local_opencode", + local: { configDir: "/srv/config", dataDir: "/srv/project-alpha", opencodeProjectId: null }, + remote: null, + serverId: "srv_local", + }, + createdAt: new Date().toISOString(), + displayName: "Remote Project Alpha", + hidden: false, + id: "remote-alpha", + kind: "local", + notes: null, + preset: "starter", + runtime: { backendKind: "local_opencode", health: null, lastError: null, lastSessionRefreshAt: null, lastSyncAt: null, updatedAt: null }, + server: { auth: { configured: false, scheme: "none" }, baseUrl: null, capabilities: {}, hostingKind: "self_hosted", id: "srv_local", isEnabled: true, isLocal: true, kind: "local", label: "Remote", lastSeenAt: null, source: "seeded", updatedAt: new Date().toISOString() }, + slug: "remote-project-alpha", + status: "ready", + updatedAt: new Date().toISOString(), + }, + ], + }, + meta: { requestId: "owreq_remote_1", timestamp: new Date().toISOString() }, + }); + } + return new Response("not found", { status: 404 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + + try { + const { app } = createTestApp({ requireAuth: true }); + const response = await app.request("http://openwork.local/system/servers/connect", { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-OpenWork-Host-Token": "host-token", + }, + body: JSON.stringify({ + baseUrl: `http://127.0.0.1:${remote.port}`, + token: "remote-token", + workspaceId: "remote-alpha", + }), + }); + const body = await response.json(); + + expect(response.status).toBe(200); + expect(body.data.server.kind).toBe("remote"); + expect(body.data.selectedWorkspaceId).toMatch(/^ws_/); + expect(body.data.workspaces[0].backend.kind).toBe("remote_openwork"); + expect(body.data.workspaces[0].backend.remote.remoteWorkspaceId).toBe("remote-alpha"); + } finally { + remote.stop(true); + } +}); + +test("remote server connect returns a gateway error when the remote server rejects credentials", async () => { + const remote = Bun.serve({ + fetch() { + return Response.json({ ok: false, error: { code: "unauthorized", message: "bad token", requestId: "owreq_remote_bad_auth" } }, { status: 401 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + + try { + const { app } = createTestApp({ requireAuth: true }); + const response = await app.request("http://openwork.local/system/servers/connect", { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-OpenWork-Host-Token": "host-token", + }, + body: JSON.stringify({ + baseUrl: `http://127.0.0.1:${remote.port}`, + token: "wrong-token", + }), + }); + const body = await response.json(); + + expect(response.status).toBe(502); + expect(body.ok).toBe(false); + expect(body.error.code).toBe("bad_gateway"); + } finally { + remote.stop(true); + } +}); diff --git a/apps/server-v2/src/app.ts b/apps/server-v2/src/app.ts new file mode 100644 index 00000000..011250d5 --- /dev/null +++ b/apps/server-v2/src/app.ts @@ -0,0 +1,4 @@ +export { createApp, type AppType, type CreateAppOptions } from "./app-factory.js"; +import { createApp } from "./app-factory.js"; + +export const app = createApp(); diff --git a/apps/server-v2/src/bootstrap/server.ts b/apps/server-v2/src/bootstrap/server.ts new file mode 100644 index 00000000..e661f2be --- /dev/null +++ b/apps/server-v2/src/bootstrap/server.ts @@ -0,0 +1,83 @@ +import { createApp } from "../app.js"; +import { createAppDependencies, type AppDependencies } from "../context/app-dependencies.js"; +import { resolveServerV2Version } from "../version.js"; + +export type StartServerOptions = { + dependencies?: AppDependencies; + host?: string; + port?: number; + silent?: boolean; +}; + +export type StartedServer = { + app: ReturnType; + dependencies: AppDependencies; + host: string; + port: number; + server: Bun.Server; + stop(): Promise; + url: string; +}; + +function resolvePort(value: number | undefined) { + if (value === undefined) { + return 3100; + } + + if (!Number.isInteger(value) || value < 0 || value > 65535) { + throw new Error(`Invalid port: ${value}`); + } + + return value; +} + +export function startServer(options: StartServerOptions = {}): StartedServer { + const host = options.host ?? process.env.OPENWORK_SERVER_V2_HOST ?? "127.0.0.1"; + const port = resolvePort(options.port ?? Number.parseInt(process.env.OPENWORK_SERVER_V2_PORT ?? "3100", 10)); + const version = resolveServerV2Version(); + const dependencies = options.dependencies ?? createAppDependencies({ + localServer: { + baseUrl: port === 0 ? null : `http://${host}:${port}`, + hostingKind: process.env.OPENWORK_SERVER_V2_HOSTING_KIND === "desktop" ? "desktop" : "self_hosted", + label: "Local OpenWork Server", + }, + version, + }); + const app = createApp({ dependencies }); + const server = Bun.serve({ + fetch: app.fetch, + hostname: host, + port, + }); + const url = server.url.toString(); + const resolvedPort = new URL(url).port; + dependencies.services.registry.attachLocalServerBaseUrl(url); + if (dependencies.services.runtime.getBootstrapPolicy() === "eager") { + void dependencies.services.runtime.bootstrap().catch(() => undefined); + } + + if (!options.silent) { + console.info( + JSON.stringify({ + bootstrap: dependencies.database.getStartupDiagnostics(), + host, + port: Number(resolvedPort || port), + scope: "openwork-server-v2.start", + url, + }), + ); + } + + return { + app, + dependencies, + host, + port: Number(resolvedPort || port), + server, + async stop() { + server.stop(true); + await dependencies.close(); + }, + url, + }; +} diff --git a/apps/server-v2/src/cli.ts b/apps/server-v2/src/cli.ts new file mode 100644 index 00000000..80b2e8ee --- /dev/null +++ b/apps/server-v2/src/cli.ts @@ -0,0 +1,72 @@ +import process from "node:process"; +import { startServer } from "./bootstrap/server.js"; + +function printHelp() { + process.stdout.write([ + "openwork-server-v2", + "", + "Options:", + " --host Hostname to bind. Defaults to 127.0.0.1.", + " --port Port to bind. Defaults to 3100.", + " --help Show this help text.", + "", + ].join("\n")); +} + +function parseArgs(argv: Array) { + let host: string | undefined; + let port: number | undefined; + + for (let index = 0; index < argv.length; index += 1) { + const argument = argv[index]; + + if (argument === "--help") { + printHelp(); + process.exit(0); + } + + if (argument === "--host") { + host = argv[index + 1]; + index += 1; + continue; + } + + if (argument === "--port") { + const rawPort = argv[index + 1]; + if (!rawPort) { + throw new Error("Missing value for --port."); + } + port = Number.parseInt(rawPort, 10); + index += 1; + continue; + } + + throw new Error(`Unknown argument: ${argument}`); + } + + return { host, port }; +} + +async function main() { + const { host, port } = parseArgs(process.argv.slice(2)); + const runtime = startServer({ host, port }); + + const shutdown = async (signal: NodeJS.Signals) => { + console.info(JSON.stringify({ scope: "openwork-server-v2.stop", signal })); + await runtime.stop(); + process.exit(0); + }; + + for (const signal of ["SIGINT", "SIGTERM"] as const) { + process.on(signal, () => { + void shutdown(signal); + }); + } + + await new Promise(() => undefined); +} + +main().catch((error) => { + process.stderr.write(`${error instanceof Error ? error.stack ?? error.message : String(error)}\n`); + process.exit(1); +}); diff --git a/apps/server-v2/src/context/app-dependencies.ts b/apps/server-v2/src/context/app-dependencies.ts new file mode 100644 index 00000000..0f6c33eb --- /dev/null +++ b/apps/server-v2/src/context/app-dependencies.ts @@ -0,0 +1,214 @@ +import { createAuthService, type AuthService } from "../services/auth-service.js"; +import { createCapabilitiesService, type CapabilitiesService } from "../services/capabilities-service.js"; +import { createConfigMaterializationService, type ConfigMaterializationService } from "../services/config-materialization-service.js"; +import { createManagedResourceService, type ManagedResourceService } from "../services/managed-resource-service.js"; +import { createProcessInfoAdapter, type ProcessInfoAdapter } from "../adapters/process-info.js"; +import { createServerPersistence, type ServerPersistence } from "../database/persistence.js"; +import { createSqliteDatabaseStatusProvider, type DatabaseStatusProvider } from "../database/status-provider.js"; +import type { RuntimeAssetService } from "../runtime/assets.js"; +import type { RegistryService } from "../services/registry-service.js"; +import { createRouterProductService, type RouterProductService } from "../services/router-product-service.js"; +import { createServerRegistryService, type ServerRegistryService } from "../services/server-registry-service.js"; +import { createRuntimeService, type RuntimeService } from "../services/runtime-service.js"; +import { createWorkspaceFileService, type WorkspaceFileService } from "../services/workspace-file-service.js"; +import { createWorkspaceSessionService, type WorkspaceSessionService } from "../services/workspace-session-service.js"; +import { createSystemService, type SystemService } from "../services/system-service.js"; +import { createWorkspaceRegistryService, type WorkspaceRegistryService } from "../services/workspace-registry-service.js"; +import { createRemoteServerService, type RemoteServerService } from "../services/remote-server-service.js"; +import { createSchedulerService, type SchedulerService } from "../services/scheduler-service.js"; +import { resolveServerV2Version } from "../version.js"; + +export type AppDependencies = { + database: DatabaseStatusProvider; + environment: string; + persistence: ServerPersistence; + processInfo: ProcessInfoAdapter; + services: { + auth: AuthService; + capabilities: CapabilitiesService; + config: ConfigMaterializationService; + files: WorkspaceFileService; + managed: ManagedResourceService; + registry: RegistryService; + remoteServers: RemoteServerService; + router: RouterProductService; + runtime: RuntimeService; + scheduler: SchedulerService; + sessions: WorkspaceSessionService; + serverRegistry: ServerRegistryService; + system: SystemService; + workspaceRegistry: WorkspaceRegistryService; + }; + startedAt: Date; + version: string; + close(): Promise; +}; + +type CreateAppDependenciesOverrides = Partial> & { + inMemory?: boolean; + legacy?: { + cloudSigninJson?: string; + cloudSigninPath?: string; + desktopDataDir?: string; + orchestratorDataDir?: string; + }; + localServer?: { + baseUrl?: string | null; + hostingKind?: "cloud" | "desktop" | "self_hosted"; + label?: string; + }; + persistence?: ServerPersistence; + runtime?: { + assetService?: RuntimeAssetService; + bootstrapPolicy?: "disabled" | "eager" | "manual"; + restartPolicy?: { + backoffMs?: number; + maxAttempts?: number; + windowMs?: number; + }; + }; + workingDirectory?: string; +}; + +function isTruthy(value: string | undefined) { + if (!value) { + return false; + } + + return ["1", "true", "yes", "on"].includes(value.trim().toLowerCase()); +} + +function resolveLocalHostingKind(explicit?: "cloud" | "desktop" | "self_hosted") { + if (explicit) { + return explicit; + } + + const fromEnv = process.env.OPENWORK_SERVER_V2_HOSTING_KIND?.trim(); + if (fromEnv === "desktop" || fromEnv === "self_hosted" || fromEnv === "cloud") { + return fromEnv; + } + + if (isTruthy(process.env.OPENWORK_DESKTOP_HOSTED) || Boolean(process.env.TAURI_ENV_PLATFORM)) { + return "desktop"; + } + + return "self_hosted"; +} + +export function createAppDependencies(overrides: CreateAppDependenciesOverrides = {}): AppDependencies { + const environment = overrides.environment ?? process.env.NODE_ENV ?? "development"; + const startedAt = overrides.startedAt ?? new Date(); + const version = overrides.version ?? resolveServerV2Version(); + const processInfo = overrides.processInfo ?? createProcessInfoAdapter(environment); + const persistence = overrides.persistence ?? createServerPersistence({ + environment, + inMemory: overrides.inMemory, + legacy: overrides.legacy, + localServer: { + baseUrl: overrides.localServer?.baseUrl ?? null, + hostingKind: resolveLocalHostingKind(overrides.localServer?.hostingKind), + label: overrides.localServer?.label ?? "Local OpenWork Server", + }, + version, + workingDirectory: overrides.workingDirectory, + }); + const database = createSqliteDatabaseStatusProvider({ diagnostics: persistence.diagnostics }); + const auth = createAuthService(); + const serverRegistry = createServerRegistryService({ + localServerId: persistence.registry.localServerId, + repositories: persistence.repositories, + }); + const workspaceRegistry = createWorkspaceRegistryService({ + repositories: persistence.repositories, + servers: serverRegistry, + }); + const runtime = createRuntimeService({ + assetService: overrides.runtime?.assetService, + bootstrapPolicy: overrides.runtime?.bootstrapPolicy, + environment, + repositories: persistence.repositories, + restartPolicy: overrides.runtime?.restartPolicy, + serverId: persistence.registry.localServerId, + serverVersion: version, + workingDirectory: persistence.workingDirectory, + }); + const capabilities = createCapabilitiesService({ + auth, + runtime, + }); + const config = createConfigMaterializationService({ + repositories: persistence.repositories, + serverId: persistence.registry.localServerId, + workingDirectory: persistence.workingDirectory, + }); + const sessions = createWorkspaceSessionService({ + repositories: persistence.repositories, + runtime, + }); + const files = createWorkspaceFileService({ + config, + registry: persistence.registry, + repositories: persistence.repositories, + runtime, + serverId: persistence.registry.localServerId, + }); + const managed = createManagedResourceService({ + config, + files, + repositories: persistence.repositories, + serverId: persistence.registry.localServerId, + workingDirectory: persistence.workingDirectory, + }); + const router = createRouterProductService({ + repositories: persistence.repositories, + runtime, + serverId: persistence.registry.localServerId, + }); + const remoteServers = createRemoteServerService({ + repositories: persistence.repositories, + }); + const scheduler = createSchedulerService({ + workspaceRegistry, + }); + + return { + database, + environment, + persistence, + processInfo, + services: { + auth, + capabilities, + config, + files, + managed, + registry: persistence.registry, + remoteServers, + router, + runtime, + scheduler, + sessions, + serverRegistry, + system: createSystemService({ + auth, + capabilities, + database, + environment, + processInfo, + serverRegistry, + runtime, + startedAt, + version, + workspaceRegistry, + }), + workspaceRegistry, + }, + startedAt, + version, + async close() { + await files.dispose(); + await runtime.dispose(); + persistence.close(); + }, + }; +} diff --git a/apps/server-v2/src/context/request-context.ts b/apps/server-v2/src/context/request-context.ts new file mode 100644 index 00000000..6932ee26 --- /dev/null +++ b/apps/server-v2/src/context/request-context.ts @@ -0,0 +1,39 @@ +import type { Context, MiddlewareHandler } from "hono"; +import type { AppDependencies } from "./app-dependencies.js"; +import type { RequestActor } from "../services/auth-service.js"; + +export type RequestContext = { + actor: RequestActor; + dependencies: AppDependencies; + receivedAt: Date; + requestId: string; + services: AppDependencies["services"]; +}; + +export type AppBindings = { + Variables: { + requestContext: RequestContext; + requestId: string; + }; +}; + +export function createRequestContext(dependencies: AppDependencies, requestId: string, headers: Headers): RequestContext { + return { + actor: dependencies.services.auth.resolveActor(headers), + dependencies, + receivedAt: new Date(), + requestId, + services: dependencies.services, + }; +} + +export function requestContextMiddleware(dependencies: AppDependencies): MiddlewareHandler { + return async (c, next) => { + c.set("requestContext", createRequestContext(dependencies, c.get("requestId"), c.req.raw.headers)); + await next(); + }; +} + +export function getRequestContext(c: Pick, "get">): RequestContext { + return c.get("requestContext"); +} diff --git a/apps/server-v2/src/contract.test.ts b/apps/server-v2/src/contract.test.ts new file mode 100644 index 00000000..63bd0754 --- /dev/null +++ b/apps/server-v2/src/contract.test.ts @@ -0,0 +1,64 @@ +import { expect, test } from "bun:test"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const packageDir = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const repoDir = path.resolve(packageDir, "../.."); + +async function runCommand(command: Array, cwd: string) { + const child = Bun.spawn(command, { + cwd, + env: process.env, + stderr: "pipe", + stdout: "pipe", + }); + + const [stdout, stderr, exitCode] = await Promise.all([ + new Response(child.stdout).text(), + new Response(child.stderr).text(), + child.exited, + ]); + + return { exitCode, stderr, stdout }; +} + +test("openapi generation writes the committed server-v2 contract", async () => { + const result = await runCommand(["bun", "./scripts/generate-openapi.ts"], packageDir); + + expect(result.exitCode).toBe(0); + expect(result.stdout).toContain("apps/server-v2/openapi/openapi.json"); + + const openApiContents = await Bun.file(path.join(packageDir, "openapi/openapi.json")).text(); + expect(openApiContents).toContain('"/system/health"'); + expect(openApiContents).toContain('"getSystemHealth"'); + expect(openApiContents).toContain('"/system/status"'); + expect(openApiContents).toContain('"/system/cloud-signin"'); + expect(openApiContents).toContain('"/system/managed/mcps"'); + expect(openApiContents).toContain('"/system/router/identities/telegram"'); + expect(openApiContents).toContain('"/workspaces"'); + expect(openApiContents).toContain('"/workspaces/{workspaceId}/export"'); + expect(openApiContents).toContain('"/workspaces/{workspaceId}/sessions"'); + expect(openApiContents).toContain('"/workspaces/{workspaceId}/events"'); + expect(openApiContents).toContain('"/system/opencode/health"'); + expect(openApiContents).toContain('"/system/runtime/versions"'); +}); + +test("sdk generation succeeds from the server-v2 openapi document", async () => { + const result = await runCommand(["pnpm", "--filter", "@openwork/server-sdk", "generate"], repoDir); + + expect(result.exitCode).toBe(0); + + const sdkIndex = await Bun.file(path.join(repoDir, "packages/openwork-server-sdk/generated/index.ts")).text(); + expect(sdkIndex).toContain("getSystemHealth"); + expect(sdkIndex).toContain("getSystemStatus"); + expect(sdkIndex).toContain("getSystemCloudSignin"); + expect(sdkIndex).toContain("getSystemManagedMcps"); + expect(sdkIndex).toContain("getSystemRouterIdentitiesTelegram"); + expect(sdkIndex).toContain("getWorkspaces"); + expect(sdkIndex).toContain("getWorkspacesByWorkspaceIdExport"); + expect(sdkIndex).toContain("getWorkspacesByWorkspaceIdSessions"); + expect(sdkIndex).toContain("getWorkspacesByWorkspaceIdEvents"); + expect(sdkIndex).toContain("GetSystemHealthResponse"); + expect(sdkIndex).toContain("getSystemOpencodeHealth"); + expect(sdkIndex).toContain("getSystemRuntimeVersions"); +}); diff --git a/apps/server-v2/src/database/identifiers.ts b/apps/server-v2/src/database/identifiers.ts new file mode 100644 index 00000000..981e85eb --- /dev/null +++ b/apps/server-v2/src/database/identifiers.ts @@ -0,0 +1,64 @@ +import { createHash } from "node:crypto"; +import path from "node:path"; + +function stableHash(value: string) { + return createHash("sha256").update(value).digest("hex"); +} + +export function createStableId(prefix: string, key: string) { + return `${prefix}_${stableHash(key).slice(0, 12)}`; +} + +export function createServerId(kind: "local" | "remote", key: string) { + if (kind === "local") { + return "srv_local"; + } + + return createStableId("srv", `remote::${key}`); +} + +export function createLocalWorkspaceId(dataDir: string) { + return createStableId("ws", dataDir); +} + +export function createRemoteWorkspaceId(input: { + baseUrl: string; + directory?: string | null; + remoteWorkspaceId?: string | null; + remoteType: "openwork" | "opencode"; +}) { + if (input.remoteType === "openwork") { + const key = ["openwork", input.baseUrl, input.remoteWorkspaceId?.trim() ?? ""] + .filter(Boolean) + .join("::"); + return createStableId("ws", key); + } + + const key = ["remote", input.baseUrl, input.directory?.trim() ?? ""] + .filter(Boolean) + .join("::"); + return createStableId("ws", key); +} + +export function createInternalWorkspaceId(kind: "control" | "help") { + return createStableId("ws", `internal::${kind}`); +} + +export function slugifyWorkspaceValue(value: string, fallback: string) { + const normalized = value + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, "-") + .replace(/^-+|-+$/g, ""); + + return normalized || fallback; +} + +export function deriveWorkspaceSlugSource(input: { + dataDir?: string | null; + displayName: string; + fallback: string; +}) { + const baseName = input.dataDir ? path.basename(input.dataDir) : ""; + return slugifyWorkspaceValue(input.displayName || baseName, input.fallback); +} diff --git a/apps/server-v2/src/database/json.ts b/apps/server-v2/src/database/json.ts new file mode 100644 index 00000000..a5cc0afb --- /dev/null +++ b/apps/server-v2/src/database/json.ts @@ -0,0 +1,15 @@ +export function parseJsonValue(value: string | null | undefined, fallback: T): T { + if (!value) { + return fallback; + } + + try { + return JSON.parse(value) as T; + } catch { + return fallback; + } +} + +export function stringifyJsonValue(value: unknown): string { + return JSON.stringify(value ?? null); +} diff --git a/apps/server-v2/src/database/migrations/0001-registry-runtime.ts b/apps/server-v2/src/database/migrations/0001-registry-runtime.ts new file mode 100644 index 00000000..8b6d11dd --- /dev/null +++ b/apps/server-v2/src/database/migrations/0001-registry-runtime.ts @@ -0,0 +1,70 @@ +export const phase2RegistryRuntimeMigration = { + name: "registry-runtime", + sql: ` + CREATE TABLE IF NOT EXISTS servers ( + id TEXT PRIMARY KEY, + kind TEXT NOT NULL CHECK (kind IN ('local', 'remote')), + hosting_kind TEXT NOT NULL CHECK (hosting_kind IN ('desktop', 'self_hosted', 'cloud')), + label TEXT NOT NULL, + base_url TEXT, + auth_json TEXT, + capabilities_json TEXT NOT NULL DEFAULT '{}', + is_local INTEGER NOT NULL DEFAULT 0, + is_enabled INTEGER NOT NULL DEFAULT 1, + source TEXT NOT NULL DEFAULT 'seeded', + notes_json TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + last_seen_at TEXT + ); + + CREATE UNIQUE INDEX IF NOT EXISTS idx_servers_single_local ON servers (is_local) WHERE is_local = 1; + + CREATE TABLE IF NOT EXISTS workspaces ( + id TEXT PRIMARY KEY, + server_id TEXT NOT NULL REFERENCES servers(id) ON DELETE CASCADE, + kind TEXT NOT NULL CHECK (kind IN ('local', 'remote', 'control', 'help')), + display_name TEXT NOT NULL, + slug TEXT NOT NULL, + is_hidden INTEGER NOT NULL DEFAULT 0, + status TEXT NOT NULL CHECK (status IN ('ready', 'imported', 'attention')), + opencode_project_id TEXT, + remote_workspace_id TEXT, + data_dir TEXT, + config_dir TEXT, + notes_json TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE UNIQUE INDEX IF NOT EXISTS idx_workspaces_slug ON workspaces (slug); + CREATE UNIQUE INDEX IF NOT EXISTS idx_workspaces_local_data_dir ON workspaces (data_dir) WHERE data_dir IS NOT NULL; + CREATE INDEX IF NOT EXISTS idx_workspaces_server ON workspaces (server_id); + + CREATE TABLE IF NOT EXISTS server_runtime_state ( + server_id TEXT PRIMARY KEY REFERENCES servers(id) ON DELETE CASCADE, + runtime_version TEXT, + opencode_status TEXT NOT NULL DEFAULT 'unknown', + opencode_version TEXT, + opencode_base_url TEXT, + router_status TEXT NOT NULL DEFAULT 'disabled', + router_version TEXT, + restart_policy_json TEXT, + last_started_at TEXT, + last_exit_json TEXT, + health_json TEXT, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS workspace_runtime_state ( + workspace_id TEXT PRIMARY KEY REFERENCES workspaces(id) ON DELETE CASCADE, + backend_kind TEXT NOT NULL CHECK (backend_kind IN ('local_opencode', 'remote_openwork')), + last_sync_at TEXT, + last_session_refresh_at TEXT, + last_error_json TEXT, + health_json TEXT, + updated_at TEXT NOT NULL + ); + `, + version: "0001", +} as const; diff --git a/apps/server-v2/src/database/migrations/0002-managed-state.ts b/apps/server-v2/src/database/migrations/0002-managed-state.ts new file mode 100644 index 00000000..2e68e8ea --- /dev/null +++ b/apps/server-v2/src/database/migrations/0002-managed-state.ts @@ -0,0 +1,145 @@ +export const phase2ManagedStateMigration = { + name: "managed-state", + sql: ` + CREATE TABLE IF NOT EXISTS mcps ( + id TEXT PRIMARY KEY, + item_kind TEXT NOT NULL, + display_name TEXT NOT NULL, + item_key TEXT, + config_json TEXT NOT NULL DEFAULT '{}', + auth_json TEXT, + metadata_json TEXT, + source TEXT NOT NULL CHECK (source IN ('openwork_managed', 'imported', 'discovered', 'cloud_synced')), + cloud_item_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS skills ( + id TEXT PRIMARY KEY, + item_kind TEXT NOT NULL DEFAULT 'skill', + display_name TEXT NOT NULL, + item_key TEXT, + config_json TEXT NOT NULL DEFAULT '{}', + auth_json TEXT, + metadata_json TEXT, + source TEXT NOT NULL CHECK (source IN ('openwork_managed', 'imported', 'discovered', 'cloud_synced')), + cloud_item_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS plugins ( + id TEXT PRIMARY KEY, + item_kind TEXT NOT NULL DEFAULT 'plugin', + display_name TEXT NOT NULL, + item_key TEXT, + config_json TEXT NOT NULL DEFAULT '{}', + auth_json TEXT, + metadata_json TEXT, + source TEXT NOT NULL CHECK (source IN ('openwork_managed', 'imported', 'discovered', 'cloud_synced')), + cloud_item_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS provider_configs ( + id TEXT PRIMARY KEY, + item_kind TEXT NOT NULL DEFAULT 'provider', + display_name TEXT NOT NULL, + item_key TEXT, + config_json TEXT NOT NULL DEFAULT '{}', + auth_json TEXT, + metadata_json TEXT, + source TEXT NOT NULL CHECK (source IN ('openwork_managed', 'imported', 'discovered', 'cloud_synced')), + cloud_item_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS workspace_mcps ( + workspace_id TEXT NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + item_id TEXT NOT NULL REFERENCES mcps(id) ON DELETE CASCADE, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + ); + + CREATE TABLE IF NOT EXISTS workspace_skills ( + workspace_id TEXT NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + item_id TEXT NOT NULL REFERENCES skills(id) ON DELETE CASCADE, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + ); + + CREATE TABLE IF NOT EXISTS workspace_plugins ( + workspace_id TEXT NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + item_id TEXT NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + ); + + CREATE TABLE IF NOT EXISTS workspace_provider_configs ( + workspace_id TEXT NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + item_id TEXT NOT NULL REFERENCES provider_configs(id) ON DELETE CASCADE, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + ); + + CREATE TABLE IF NOT EXISTS cloud_signin ( + id TEXT PRIMARY KEY, + server_id TEXT NOT NULL UNIQUE REFERENCES servers(id) ON DELETE CASCADE, + cloud_base_url TEXT NOT NULL, + user_id TEXT, + org_id TEXT, + auth_json TEXT, + metadata_json TEXT, + last_validated_at TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS workspace_shares ( + id TEXT PRIMARY KEY, + workspace_id TEXT NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + access_key TEXT, + status TEXT NOT NULL CHECK (status IN ('active', 'revoked', 'disabled')), + last_used_at TEXT, + audit_json TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + revoked_at TEXT + ); + + CREATE TABLE IF NOT EXISTS router_identities ( + id TEXT PRIMARY KEY, + server_id TEXT NOT NULL REFERENCES servers(id) ON DELETE CASCADE, + kind TEXT NOT NULL, + display_name TEXT NOT NULL, + config_json TEXT NOT NULL DEFAULT '{}', + auth_json TEXT, + is_enabled INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS router_bindings ( + id TEXT PRIMARY KEY, + server_id TEXT NOT NULL REFERENCES servers(id) ON DELETE CASCADE, + router_identity_id TEXT NOT NULL REFERENCES router_identities(id) ON DELETE CASCADE, + binding_key TEXT NOT NULL, + config_json TEXT NOT NULL DEFAULT '{}', + is_enabled INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + CREATE INDEX IF NOT EXISTS idx_workspace_shares_workspace ON workspace_shares (workspace_id); + CREATE INDEX IF NOT EXISTS idx_router_identities_server ON router_identities (server_id); + CREATE INDEX IF NOT EXISTS idx_router_bindings_server ON router_bindings (server_id); + `, + version: "0002", +} as const; diff --git a/apps/server-v2/src/database/migrations/0003-files-config.ts b/apps/server-v2/src/database/migrations/0003-files-config.ts new file mode 100644 index 00000000..c8a38dfe --- /dev/null +++ b/apps/server-v2/src/database/migrations/0003-files-config.ts @@ -0,0 +1,18 @@ +export const phase7FilesConfigMigration = { + name: "files-config", + sql: ` + CREATE TABLE IF NOT EXISTS server_config_state ( + server_id TEXT PRIMARY KEY REFERENCES servers(id) ON DELETE CASCADE, + opencode_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS workspace_config_state ( + workspace_id TEXT PRIMARY KEY REFERENCES workspaces(id) ON DELETE CASCADE, + openwork_json TEXT NOT NULL DEFAULT '{}', + opencode_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ); + `, + version: "0003", +} as const; diff --git a/apps/server-v2/src/database/migrations/index.ts b/apps/server-v2/src/database/migrations/index.ts new file mode 100644 index 00000000..b2bdd738 --- /dev/null +++ b/apps/server-v2/src/database/migrations/index.ts @@ -0,0 +1,92 @@ +import { createHash } from "node:crypto"; +import type { Database } from "bun:sqlite"; +import type { MigrationResult } from "../types.js"; +import { phase2RegistryRuntimeMigration } from "./0001-registry-runtime.js"; +import { phase2ManagedStateMigration } from "./0002-managed-state.js"; +import { phase7FilesConfigMigration } from "./0003-files-config.js"; + +const migrations = [phase2RegistryRuntimeMigration, phase2ManagedStateMigration, phase7FilesConfigMigration].map((migration) => ({ + ...migration, + checksum: createHash("sha256").update(migration.sql).digest("hex"), +})); + +export function runMigrations(database: Database): MigrationResult { + database.exec(` + CREATE TABLE IF NOT EXISTS schema_migrations ( + version TEXT PRIMARY KEY, + name TEXT NOT NULL, + checksum TEXT NOT NULL, + applied_at TEXT NOT NULL + ); + `); + + const existingRows = database + .query("SELECT version, checksum FROM schema_migrations ORDER BY version") + .all() as Array<{ checksum: string; version: string }>; + const existing = new Map(existingRows.map((row) => [row.version, row.checksum])); + const applied: string[] = []; + + const applyMigration = database.transaction((migration: (typeof migrations)[number]) => { + database.exec(migration.sql); + database + .query( + ` + INSERT INTO schema_migrations (version, name, checksum, applied_at) + VALUES (?1, ?2, ?3, ?4) + `, + ) + .run(migration.version, migration.name, migration.checksum, new Date().toISOString()); + }); + + for (const migration of migrations) { + const currentChecksum = existing.get(migration.version); + if (currentChecksum) { + if (currentChecksum !== migration.checksum) { + throw new Error( + `Migration checksum mismatch for ${migration.version}. Expected ${migration.checksum} but found ${currentChecksum}.`, + ); + } + continue; + } + + applyMigration(migration); + applied.push(migration.version); + } + + return { + applied, + currentVersion: migrations[migrations.length - 1]?.version ?? "0000", + totalApplied: existing.size + applied.length, + }; +} + +export function runSpecificMigrations(database: Database, versions: string[]) { + database.exec(` + CREATE TABLE IF NOT EXISTS schema_migrations ( + version TEXT PRIMARY KEY, + name TEXT NOT NULL, + checksum TEXT NOT NULL, + applied_at TEXT NOT NULL + ); + `); + + const applyMigration = database.transaction((migration: (typeof migrations)[number]) => { + database.exec(migration.sql); + database + .query( + ` + INSERT INTO schema_migrations (version, name, checksum, applied_at) + VALUES (?1, ?2, ?3, ?4) + `, + ) + .run(migration.version, migration.name, migration.checksum, new Date().toISOString()); + }); + + for (const version of versions) { + const migration = migrations.find((candidate) => candidate.version === version); + if (!migration) { + throw new Error(`Unknown migration version: ${version}`); + } + applyMigration(migration); + } +} diff --git a/apps/server-v2/src/database/persistence.test.ts b/apps/server-v2/src/database/persistence.test.ts new file mode 100644 index 00000000..4914a0de --- /dev/null +++ b/apps/server-v2/src/database/persistence.test.ts @@ -0,0 +1,271 @@ +import { afterEach, expect, test } from "bun:test"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { Database } from "bun:sqlite"; +import { createServerPersistence } from "./persistence.js"; +import { runSpecificMigrations } from "./migrations/index.js"; +import { ensureServerWorkingDirectoryLayout, resolveServerWorkingDirectory } from "./working-directory.js"; + +const cleanupPaths: string[] = []; + +afterEach(() => { + while (cleanupPaths.length > 0) { + const target = cleanupPaths.pop(); + if (!target) { + continue; + } + + fs.rmSync(target, { force: true, recursive: true }); + } +}); + +function makeTempDir(name: string) { + const directory = fs.mkdtempSync(path.join(os.tmpdir(), `${name}-`)); + cleanupPaths.push(directory); + return directory; +} + +function createPersistence(overrides: Partial[0]> = {}) { + return createServerPersistence({ + environment: "development", + localServer: { + baseUrl: null, + hostingKind: "self_hosted", + label: "Local OpenWork Server", + }, + version: "0.0.0-test", + ...overrides, + }); +} + +test("fresh bootstrap seeds the local server and hidden workspaces", () => { + const workingDirectory = makeTempDir("openwork-server-v2-phase2-fresh"); + const persistence = createPersistence({ workingDirectory }); + + expect(persistence.diagnostics.mode).toBe("fresh"); + expect(persistence.repositories.servers.getById(persistence.registry.localServerId)).not.toBeNull(); + + const hiddenWorkspaces = persistence.repositories.workspaces.list({ includeHidden: true }).filter((workspace) => workspace.isHidden); + expect(hiddenWorkspaces.map((workspace) => workspace.kind).sort()).toEqual(["control", "help"]); + for (const workspace of hiddenWorkspaces) { + expect(workspace.configDir).toBeTruthy(); + expect(fs.existsSync(workspace.configDir!)).toBe(true); + } + + persistence.close(); +}); + +test("migration runner upgrades an existing database from the first migration", () => { + const rootDir = makeTempDir("openwork-server-v2-phase2-upgrade"); + const workingDirectory = resolveServerWorkingDirectory({ environment: "development", explicitRootDir: rootDir }); + ensureServerWorkingDirectoryLayout(workingDirectory); + const database = new Database(workingDirectory.databasePath, { create: true }); + runSpecificMigrations(database, ["0001"]); + database.close(false); + + const persistence = createPersistence({ workingDirectory: rootDir }); + + expect(persistence.diagnostics.mode).toBe("existing"); + expect(persistence.diagnostics.migrations.applied).toEqual(["0002", "0003"]); + expect(persistence.repositories.providerConfigs.list()).toEqual([]); + + persistence.close(); +}); + +test("legacy workspace import only runs once across repeated boots", () => { + const rootDir = makeTempDir("openwork-server-v2-phase2-idempotent"); + const desktopDataDir = makeTempDir("openwork-server-v2-phase2-desktop"); + const orchestratorDataDir = makeTempDir("openwork-server-v2-phase2-orchestrator"); + const localWorkspaceDir = makeTempDir("openwork-server-v2-phase2-local-workspace"); + const orchestratorOnlyWorkspaceDir = makeTempDir("openwork-server-v2-phase2-orch-only-workspace"); + + fs.writeFileSync( + path.join(desktopDataDir, "openwork-workspaces.json"), + JSON.stringify( + { + selectedWorkspaceId: "ws_legacy_selected", + watchedWorkspaceId: "ws_legacy_selected", + workspaces: [ + { + id: "ws_legacy_selected", + name: "Local Test", + path: localWorkspaceDir, + preset: "starter", + workspaceType: "local", + }, + { + id: "ws_remote_legacy", + name: "Remote Test", + workspaceType: "remote", + remoteType: "openwork", + baseUrl: "https://remote.example.com/w/remote-one", + openworkHostUrl: "https://remote.example.com", + openworkWorkspaceId: "remote-one", + openworkToken: "client-token", + }, + ], + }, + null, + 2, + ), + ); + + fs.writeFileSync( + path.join(orchestratorDataDir, "openwork-orchestrator-state.json"), + JSON.stringify( + { + activeId: "orch-1", + cliVersion: "0.11.206", + daemon: { + baseUrl: "http://127.0.0.1:4321", + pid: 123, + port: 4321, + startedAt: Date.now(), + }, + opencode: { + baseUrl: "http://127.0.0.1:4322", + pid: 456, + port: 4322, + startedAt: Date.now(), + }, + workspaces: [ + { + id: "orch-1", + name: "Local Test", + path: localWorkspaceDir, + workspaceType: "local", + }, + { + id: "orch-2", + name: "Orchestrator Only", + path: orchestratorOnlyWorkspaceDir, + workspaceType: "local", + }, + ], + }, + null, + 2, + ), + ); + + const first = createPersistence({ + legacy: { + cloudSigninJson: JSON.stringify({ + authToken: "den-token", + baseUrl: "https://app.openworklabs.com", + userId: "user-1", + }), + desktopDataDir, + orchestratorDataDir, + }, + workingDirectory: rootDir, + }); + + const firstServers = first.repositories.servers.list(); + const firstVisibleWorkspaces = first.repositories.workspaces.list(); + expect(firstServers).toHaveLength(2); + expect(firstVisibleWorkspaces).toHaveLength(3); + expect(first.repositories.cloudSignin.getPrimary()?.cloudBaseUrl).toBe("https://app.openworklabs.com"); + first.close(); + + const second = createPersistence({ + legacy: { + desktopDataDir, + orchestratorDataDir, + }, + workingDirectory: rootDir, + }); + + expect(second.diagnostics.mode).toBe("existing"); + expect(second.repositories.servers.list()).toHaveLength(2); + expect(second.repositories.workspaces.list()).toHaveLength(3); + expect(second.repositories.workspaces.list({ includeHidden: true })).toHaveLength(5); + expect(second.diagnostics.importReports.desktopWorkspaceState.status).toBe("skipped"); + expect(second.diagnostics.importReports.orchestratorState.status).toBe("skipped"); + expect(second.diagnostics.legacyWorkspaceImport.completedAt).toBeTruthy(); + expect(second.diagnostics.legacyWorkspaceImport.skipped).toBe(true); + second.close(); +}); + +test("deleted legacy-imported workspace stays deleted after restart", () => { + const rootDir = makeTempDir("openwork-server-v2-phase2-delete-persist"); + const desktopDataDir = makeTempDir("openwork-server-v2-phase2-delete-desktop"); + const localWorkspaceDir = makeTempDir("openwork-server-v2-phase2-delete-workspace"); + + fs.writeFileSync( + path.join(desktopDataDir, "openwork-workspaces.json"), + JSON.stringify( + { + selectedWorkspaceId: "ws_legacy_selected", + workspaces: [ + { + id: "ws_legacy_selected", + name: "Local Test", + path: localWorkspaceDir, + preset: "starter", + workspaceType: "local", + }, + ], + }, + null, + 2, + ), + ); + + const first = createPersistence({ + legacy: { + desktopDataDir, + }, + workingDirectory: rootDir, + }); + + const normalizedWorkspaceDir = fs.realpathSync.native(localWorkspaceDir); + const importedWorkspace = first.repositories.workspaces + .list() + .find((workspace) => workspace.dataDir === normalizedWorkspaceDir); + expect(importedWorkspace).not.toBeUndefined(); + first.close(); + + const second = createPersistence({ + legacy: { + desktopDataDir, + }, + workingDirectory: rootDir, + }); + expect(second.diagnostics.importReports.desktopWorkspaceState.status).toBe("skipped"); + expect(second.repositories.workspaces.deleteById(importedWorkspace!.id)).toBe(true); + expect(second.repositories.workspaces.list().some((workspace) => workspace.id === importedWorkspace!.id)).toBe(false); + second.close(); + + const third = createPersistence({ + legacy: { + desktopDataDir, + }, + workingDirectory: rootDir, + }); + expect(third.diagnostics.importReports.desktopWorkspaceState.status).toBe("skipped"); + expect(third.repositories.workspaces.list().some((workspace) => workspace.id === importedWorkspace!.id)).toBe(false); + third.close(); +}); + +test("corrupt legacy workspace state is surfaced without blocking bootstrap", () => { + const rootDir = makeTempDir("openwork-server-v2-phase2-corrupt"); + const desktopDataDir = makeTempDir("openwork-server-v2-phase2-corrupt-desktop"); + const orchestratorDataDir = makeTempDir("openwork-server-v2-phase2-corrupt-orchestrator"); + fs.writeFileSync(path.join(desktopDataDir, "openwork-workspaces.json"), "{not-json"); + + const persistence = createPersistence({ + legacy: { + desktopDataDir, + orchestratorDataDir, + }, + workingDirectory: rootDir, + }); + + expect(persistence.diagnostics.importReports.desktopWorkspaceState.status).toBe("error"); + expect(persistence.repositories.servers.list()).toHaveLength(1); + expect(persistence.repositories.workspaces.list({ includeHidden: true })).toHaveLength(2); + + persistence.close(); +}); diff --git a/apps/server-v2/src/database/persistence.ts b/apps/server-v2/src/database/persistence.ts new file mode 100644 index 00000000..3d31f661 --- /dev/null +++ b/apps/server-v2/src/database/persistence.ts @@ -0,0 +1,728 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { Database } from "bun:sqlite"; +import { z } from "zod"; +import { createRepositories, type ServerRepositories } from "./repositories.js"; +import type { StartupDiagnostics, HostingKind, ImportSourceReport, JsonObject } from "./types.js"; +import { runMigrations } from "./migrations/index.js"; +import { ensureServerWorkingDirectoryLayout, resolveServerWorkingDirectory, type ServerWorkingDirectory } from "./working-directory.js"; +import { createRegistryService, type RegistryService } from "../services/registry-service.js"; + +const legacyWorkspaceSchema = z.object({ + baseUrl: z.string().optional().nullable(), + directory: z.string().optional().nullable(), + displayName: z.string().optional().nullable(), + id: z.string(), + name: z.string().optional().default(""), + openworkClientToken: z.string().optional().nullable(), + openworkHostToken: z.string().optional().nullable(), + openworkHostUrl: z.string().optional().nullable(), + openworkToken: z.string().optional().nullable(), + openworkWorkspaceId: z.string().optional().nullable(), + openworkWorkspaceName: z.string().optional().nullable(), + path: z.string().default(""), + preset: z.string().optional().nullable(), + remoteType: z.enum(["openwork", "opencode"]).optional().nullable(), + sandboxBackend: z.string().optional().nullable(), + sandboxContainerName: z.string().optional().nullable(), + sandboxRunId: z.string().optional().nullable(), + workspaceType: z.enum(["local", "remote"]), +}); + +const legacyWorkspaceStateSchema = z.object({ + activeId: z.string().optional().nullable(), + selectedWorkspaceId: z.string().optional().nullable(), + version: z.number().optional(), + watchedWorkspaceId: z.string().optional().nullable(), + workspaces: z.array(legacyWorkspaceSchema), +}); + +const orchestratorStateSchema = z.object({ + activeId: z.string().optional().nullable(), + binaries: z.object({ + opencode: z.object({ + actualVersion: z.string().optional().nullable(), + expectedVersion: z.string().optional().nullable(), + path: z.string().optional().nullable(), + source: z.string().optional().nullable(), + }).optional().nullable(), + }).optional().nullable(), + cliVersion: z.string().optional().nullable(), + daemon: z.object({ + baseUrl: z.string(), + pid: z.number(), + port: z.number(), + startedAt: z.number(), + }).optional().nullable(), + opencode: z.object({ + baseUrl: z.string(), + pid: z.number(), + port: z.number(), + startedAt: z.number(), + }).optional().nullable(), + workspaces: z.array(z.object({ + baseUrl: z.string().optional().nullable(), + createdAt: z.number().optional().nullable(), + directory: z.string().optional().nullable(), + id: z.string(), + lastUsedAt: z.number().optional().nullable(), + name: z.string().optional().default(""), + path: z.string(), + workspaceType: z.string(), + })).default([]), +}); + +const orchestratorAuthSchema = z.object({ + opencodePassword: z.string().optional().nullable(), + opencodeUsername: z.string().optional().nullable(), + projectDir: z.string().optional().nullable(), + updatedAt: z.number().optional().nullable(), +}); + +const cloudSigninSchema = z.object({ + activeOrgId: z.string().optional().nullable(), + activeOrgName: z.string().optional().nullable(), + activeOrgSlug: z.string().optional().nullable(), + authToken: z.string().optional().nullable(), + baseUrl: z.string().optional().nullable(), + cloudBaseUrl: z.string().optional().nullable(), + lastValidatedAt: z.string().optional().nullable(), + orgId: z.string().optional().nullable(), + userId: z.string().optional().nullable(), +}); + +type CreateServerPersistenceOptions = { + environment: string; + inMemory?: boolean; + legacy?: { + cloudSigninJson?: string; + cloudSigninPath?: string; + desktopDataDir?: string; + orchestratorDataDir?: string; + }; + localServer: { + baseUrl?: string | null; + hostingKind: HostingKind; + label: string; + }; + version: string; + workingDirectory?: string; +}; + +export type ServerPersistence = { + close(): void; + database: Database; + diagnostics: StartupDiagnostics; + registry: RegistryService; + repositories: ServerRepositories; + workingDirectory: ServerWorkingDirectory; +}; + +function isTruthy(value: string | undefined) { + if (!value) { + return false; + } + + return ["1", "true", "yes", "on"].includes(value.trim().toLowerCase()); +} + +function normalizeWorkspacePath(value: string) { + const trimmed = value.trim(); + if (!trimmed) { + return ""; + } + + const expanded = trimmed === "~" + ? os.homedir() + : trimmed.startsWith("~/") || trimmed.startsWith("~\\") + ? path.join(os.homedir(), trimmed.slice(2)) + : trimmed; + + try { + return fs.realpathSync.native(expanded); + } catch { + return path.resolve(expanded); + } +} + +function normalizeUrl(value: string | null | undefined) { + const trimmed = value?.trim() ?? ""; + if (!trimmed) { + return null; + } + + const withProtocol = /^https?:\/\//.test(trimmed) ? trimmed : `http://${trimmed}`; + try { + const url = new URL(withProtocol); + return url.toString().replace(/\/+$/, ""); + } catch { + return null; + } +} + +function stripWorkspaceMount(value: string | null | undefined) { + const normalized = normalizeUrl(value); + if (!normalized) { + return null; + } + + const url = new URL(normalized); + const segments = url.pathname.split("/").filter(Boolean); + const last = segments[segments.length - 1] ?? ""; + const prev = segments[segments.length - 2] ?? ""; + if (prev === "w" && last) { + url.pathname = `/${segments.slice(0, -2).join("/")}`; + } + return url.toString().replace(/\/+$/, ""); +} + +function parseWorkspaceIdFromUrl(value: string | null | undefined) { + const normalized = normalizeUrl(value); + if (!normalized) { + return null; + } + + try { + const url = new URL(normalized); + const segments = url.pathname.split("/").filter(Boolean); + const last = segments[segments.length - 1] ?? ""; + const prev = segments[segments.length - 2] ?? ""; + return prev === "w" && last ? decodeURIComponent(last) : null; + } catch { + return null; + } +} + +function detectRemoteHostingKind(value: string) { + const normalized = normalizeUrl(value); + if (!normalized) { + return "self_hosted" as const; + } + + const hostname = new URL(normalized).hostname.toLowerCase(); + if ( + hostname === "app.openworklabs.com" || + hostname === "app.openwork.software" || + hostname.endsWith(".openworklabs.com") || + hostname.endsWith(".openwork.software") + ) { + return "cloud" as const; + } + + return "self_hosted" as const; +} + +function legacyDesktopDataDirCandidates(explicitDir?: string) { + if (explicitDir?.trim()) { + return [path.resolve(explicitDir)]; + } + + const candidates: string[] = []; + const home = os.homedir(); + const names = ["com.differentai.openwork.dev", "com.differentai.openwork", "OpenWork Dev", "OpenWork"]; + if (process.platform === "darwin") { + for (const name of names) { + candidates.push(path.join(home, "Library", "Application Support", name)); + } + } else if (process.platform === "win32") { + const appData = process.env.APPDATA?.trim() || path.join(home, "AppData", "Roaming"); + for (const name of names) { + candidates.push(path.join(appData, name)); + } + } else { + const xdgDataHome = process.env.XDG_DATA_HOME?.trim() || path.join(home, ".local", "share"); + for (const name of names) { + candidates.push(path.join(xdgDataHome, name)); + } + } + + return Array.from(new Set(candidates)); +} + +function legacyOrchestratorDirCandidates(explicitDir?: string) { + if (explicitDir?.trim()) { + return [path.resolve(explicitDir)]; + } + + const candidates: string[] = []; + const fromEnv = process.env.OPENWORK_DATA_DIR?.trim(); + if (fromEnv) { + candidates.push(path.resolve(fromEnv)); + } + + const home = os.homedir(); + for (const name of ["openwork-orchestrator-dev-react", "openwork-orchestrator-dev", "openwork-orchestrator"]) { + candidates.push(path.join(home, ".openwork", name)); + } + + return Array.from(new Set(candidates)); +} + +function readTextIfExists(filePath: string | null) { + if (!filePath) { + return null; + } + + try { + return fs.readFileSync(filePath, "utf8"); + } catch { + return null; + } +} + +function resolveExistingFile(candidates: string[], fileName: string) { + for (const directory of candidates) { + const filePath = path.join(directory, fileName); + if (fs.existsSync(filePath)) { + return filePath; + } + } + + return null; +} + +function fromUnixTimestamp(value: number | null | undefined) { + if (!value) { + return null; + } + + const milliseconds = value > 10_000_000_000 ? value : value * 1000; + return new Date(milliseconds).toISOString(); +} + +function createEmptyReport(status: ImportSourceReport["status"], sourcePath: string | null, details: JsonObject = {}): ImportSourceReport { + return { + details, + sourcePath, + status, + warnings: [], + }; +} + +function mergeReportWarnings(report: ImportSourceReport, warnings: string[]) { + report.warnings.push(...warnings); + return report; +} + +function asJsonObject(value: unknown): JsonObject | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + + return value as JsonObject; +} + +function readLegacyWorkspaceImportCompletedAt(value: JsonObject | null | undefined) { + const startup = asJsonObject(value?.startup); + const legacyWorkspaceImport = asJsonObject(startup?.legacyWorkspaceImport); + const completedAt = legacyWorkspaceImport?.completedAt; + return typeof completedAt === "string" && completedAt.trim() ? completedAt.trim() : null; +} + +function summarizeMode(inMemory: boolean, databasePath: string) { + if (inMemory) { + return "fresh" as const; + } + + return fs.existsSync(databasePath) ? "existing" as const : "fresh" as const; +} + +export function createServerPersistence(options: CreateServerPersistenceOptions): ServerPersistence { + const inMemory = options.inMemory ?? (isTruthy(process.env.OPENWORK_SERVER_V2_IN_MEMORY) || options.environment === "test"); + const workingDirectory = resolveServerWorkingDirectory({ + environment: options.environment, + explicitRootDir: options.workingDirectory, + }); + if (!inMemory) { + ensureServerWorkingDirectoryLayout(workingDirectory); + } + + const mode = summarizeMode(inMemory, workingDirectory.databasePath); + const database = new Database(inMemory ? ":memory:" : workingDirectory.databasePath, { create: true }); + database.exec("PRAGMA foreign_keys = ON"); + database.exec("PRAGMA journal_mode = WAL"); + database.exec("PRAGMA synchronous = NORMAL"); + + const migrations = runMigrations(database); + const repositories = createRepositories(database); + const registry = createRegistryService({ + localServerCapabilities: { + configRoutes: true, + capabilitiesRoutes: true, + contractLoop: true, + fileRoutes: true, + managedConfigTables: true, + phase: 9, + remoteServerConnections: true, + remoteWorkspaceDiscovery: true, + reloadOwnership: true, + rootMounted: true, + runtimeRoutes: true, + runtimeSupervision: true, + runtimeStateTables: true, + version: options.version, + workspaceReadRoutes: true, + workspaceRegistry: true, + }, + repositories, + workingDirectory, + }); + + const localServer = registry.ensureLocalServer({ + baseUrl: options.localServer.baseUrl ?? null, + hostingKind: options.localServer.hostingKind, + label: options.localServer.label, + notes: { + workingDirectory: workingDirectory.rootDir, + }, + }); + + const controlWorkspace = registry.ensureHiddenWorkspace("control"); + const helpWorkspace = registry.ensureHiddenWorkspace("help"); + + const existingRuntimeState = repositories.serverRuntimeState.getByServerId(registry.localServerId); + const priorLegacyWorkspaceImportCompletedAt = readLegacyWorkspaceImportCompletedAt(existingRuntimeState?.health); + const shouldImportLegacyWorkspaceState = !priorLegacyWorkspaceImportCompletedAt; + + const desktopWorkspaceFile = resolveExistingFile( + legacyDesktopDataDirCandidates(options.legacy?.desktopDataDir), + "openwork-workspaces.json", + ); + const desktopWorkspaceReport = !shouldImportLegacyWorkspaceState + ? createEmptyReport("skipped", desktopWorkspaceFile, { + completedAt: priorLegacyWorkspaceImportCompletedAt, + reason: "Legacy workspace import already completed on an earlier Server V2 startup.", + }) + : desktopWorkspaceFile + ? createEmptyReport("imported", desktopWorkspaceFile) + : createEmptyReport("unavailable", null, { reason: "No legacy desktop workspace registry file was found." }); + + if (shouldImportLegacyWorkspaceState && desktopWorkspaceFile) { + try { + const parsed = legacyWorkspaceStateSchema.parse(JSON.parse(readTextIfExists(desktopWorkspaceFile) ?? "{}")); + let localImported = 0; + let remoteImported = 0; + const importedWorkspaceIds: string[] = []; + for (const workspace of parsed.workspaces) { + if (workspace.workspaceType === "local") { + const dataDir = normalizeWorkspacePath(workspace.path); + if (!dataDir) { + desktopWorkspaceReport.warnings.push(`Skipped local workspace ${workspace.id} because its path was empty.`); + continue; + } + const record = registry.importLocalWorkspace({ + dataDir, + displayName: (workspace.displayName?.trim() || workspace.name || path.basename(dataDir)).trim(), + legacyNotes: { + legacyDesktop: { + displayName: workspace.displayName ?? null, + legacyId: workspace.id, + name: workspace.name, + preset: workspace.preset ?? null, + source: "openwork-workspaces.json", + }, + }, + status: "imported", + }); + localImported += 1; + importedWorkspaceIds.push(record.id); + continue; + } + + const remoteType = workspace.remoteType === "openwork" ? "openwork" : "opencode"; + const openworkServerBaseUrl = stripWorkspaceMount(workspace.openworkHostUrl ?? workspace.baseUrl ?? ""); + const remoteServerBaseUrl = openworkServerBaseUrl ?? normalizeUrl(workspace.baseUrl) ?? ""; + if (!remoteServerBaseUrl) { + desktopWorkspaceReport.warnings.push(`Skipped remote workspace ${workspace.id} because no valid base URL was found.`); + continue; + } + + const auth: JsonObject = {}; + if (workspace.openworkToken?.trim()) auth.openworkToken = workspace.openworkToken.trim(); + if (workspace.openworkClientToken?.trim()) auth.openworkClientToken = workspace.openworkClientToken.trim(); + if (workspace.openworkHostToken?.trim()) auth.openworkHostToken = workspace.openworkHostToken.trim(); + + const record = registry.importRemoteWorkspace({ + baseUrl: normalizeUrl(workspace.baseUrl) ?? remoteServerBaseUrl, + directory: workspace.directory?.trim() || null, + displayName: + workspace.openworkWorkspaceName?.trim() || + workspace.displayName?.trim() || + workspace.name || + remoteServerBaseUrl, + legacyNotes: { + legacyDesktop: { + baseUrl: workspace.baseUrl ?? null, + directory: workspace.directory ?? null, + displayName: workspace.displayName ?? null, + legacyId: workspace.id, + openworkHostUrl: workspace.openworkHostUrl ?? null, + sandboxBackend: workspace.sandboxBackend ?? null, + sandboxContainerName: workspace.sandboxContainerName ?? null, + sandboxRunId: workspace.sandboxRunId ?? null, + }, + }, + remoteType, + remoteWorkspaceId: + workspace.openworkWorkspaceId?.trim() || + parseWorkspaceIdFromUrl(workspace.openworkHostUrl ?? null) || + parseWorkspaceIdFromUrl(workspace.baseUrl ?? null), + serverAuth: Object.keys(auth).length > 0 ? auth : null, + serverBaseUrl: remoteServerBaseUrl, + serverHostingKind: detectRemoteHostingKind(remoteServerBaseUrl), + serverLabel: new URL(remoteServerBaseUrl).host, + workspaceStatus: "imported", + }); + remoteImported += 1; + importedWorkspaceIds.push(record.id); + } + + desktopWorkspaceReport.details = { + importedWorkspaceIds, + localImported, + remoteImported, + selectedWorkspaceId: parsed.selectedWorkspaceId?.trim() || parsed.activeId?.trim() || null, + watchedWorkspaceId: parsed.watchedWorkspaceId?.trim() || null, + }; + } catch (error) { + desktopWorkspaceReport.status = "error"; + desktopWorkspaceReport.details = { + error: error instanceof Error ? error.message : String(error), + }; + } + } + + const orchestratorStateFile = resolveExistingFile( + legacyOrchestratorDirCandidates(options.legacy?.orchestratorDataDir), + "openwork-orchestrator-state.json", + ); + const orchestratorStateReport = !shouldImportLegacyWorkspaceState + ? createEmptyReport("skipped", orchestratorStateFile, { + completedAt: priorLegacyWorkspaceImportCompletedAt, + reason: "Legacy workspace import already completed on an earlier Server V2 startup.", + }) + : orchestratorStateFile + ? createEmptyReport("imported", orchestratorStateFile) + : createEmptyReport("unavailable", null, { reason: "No legacy orchestrator state snapshot was found." }); + + if (shouldImportLegacyWorkspaceState && orchestratorStateFile) { + try { + const parsed = orchestratorStateSchema.parse(JSON.parse(readTextIfExists(orchestratorStateFile) ?? "{}")); + let importedWorkspaceCount = 0; + const importedWorkspaceIds: string[] = []; + for (const workspace of parsed.workspaces) { + if (workspace.workspaceType !== "local") { + continue; + } + + const normalizedPath = normalizeWorkspacePath(workspace.path); + if (!normalizedPath) { + continue; + } + + const record = registry.importLocalWorkspace({ + dataDir: normalizedPath, + displayName: workspace.name?.trim() || path.basename(normalizedPath), + legacyNotes: { + legacyOrchestrator: { + baseUrl: workspace.baseUrl ?? null, + createdAt: fromUnixTimestamp(workspace.createdAt ?? null), + directory: workspace.directory ?? null, + lastUsedAt: fromUnixTimestamp(workspace.lastUsedAt ?? null), + legacyId: workspace.id, + }, + }, + status: "imported", + }); + importedWorkspaceCount += 1; + importedWorkspaceIds.push(record.id); + } + + const existingRuntimeState = repositories.serverRuntimeState.getByServerId(registry.localServerId); + repositories.serverRuntimeState.upsert({ + health: { + ...(existingRuntimeState?.health ?? {}), + orchestrator: { + activeLegacyWorkspaceId: parsed.activeId?.trim() || null, + daemonBaseUrl: parsed.daemon?.baseUrl ?? null, + workspaceCount: parsed.workspaces.length, + }, + }, + lastExit: existingRuntimeState?.lastExit ?? null, + lastStartedAt: fromUnixTimestamp(parsed.daemon?.startedAt ?? parsed.opencode?.startedAt ?? null), + opencodeBaseUrl: parsed.opencode?.baseUrl ?? existingRuntimeState?.opencodeBaseUrl ?? null, + opencodeStatus: parsed.opencode ? "detected" : existingRuntimeState?.opencodeStatus ?? "unknown", + opencodeVersion: + parsed.binaries?.opencode?.actualVersion ?? parsed.cliVersion ?? existingRuntimeState?.opencodeVersion ?? null, + restartPolicy: existingRuntimeState?.restartPolicy ?? null, + routerStatus: existingRuntimeState?.routerStatus ?? "disabled", + routerVersion: existingRuntimeState?.routerVersion ?? null, + runtimeVersion: parsed.cliVersion ?? existingRuntimeState?.runtimeVersion ?? options.version, + serverId: registry.localServerId, + }); + + orchestratorStateReport.details = { + activeLegacyWorkspaceId: parsed.activeId?.trim() || null, + importedWorkspaceCount, + importedWorkspaceIds, + opencodeBaseUrl: parsed.opencode?.baseUrl ?? null, + }; + } catch (error) { + orchestratorStateReport.status = "error"; + orchestratorStateReport.details = { + error: error instanceof Error ? error.message : String(error), + }; + } + } + + const orchestratorAuthFile = resolveExistingFile( + legacyOrchestratorDirCandidates(options.legacy?.orchestratorDataDir), + "openwork-orchestrator-auth.json", + ); + const orchestratorAuthReport = orchestratorAuthFile + ? createEmptyReport("skipped", orchestratorAuthFile) + : createEmptyReport("unavailable", null, { reason: "No legacy orchestrator auth snapshot was found." }); + + if (orchestratorAuthFile) { + try { + const parsed = orchestratorAuthSchema.parse(JSON.parse(readTextIfExists(orchestratorAuthFile) ?? "{}")); + const normalizedProjectDir = parsed.projectDir ? normalizeWorkspacePath(parsed.projectDir) : null; + const matchedWorkspace = normalizedProjectDir + ? repositories.workspaces + .list({ includeHidden: true }) + .find((workspace) => workspace.dataDir === normalizedProjectDir) + : null; + orchestratorAuthReport.details = { + credentialsDetected: Boolean(parsed.opencodeUsername?.trim() || parsed.opencodePassword?.trim()), + matchedWorkspaceId: matchedWorkspace?.id ?? null, + projectDir: normalizedProjectDir, + updatedAt: fromUnixTimestamp(parsed.updatedAt ?? null), + }; + orchestratorAuthReport.warnings.push( + "Legacy orchestrator OpenCode credentials were detected but were not imported because they are transitional host secrets, not durable Phase 2 registry state.", + ); + } catch (error) { + orchestratorAuthReport.status = "error"; + orchestratorAuthReport.details = { + error: error instanceof Error ? error.message : String(error), + }; + } + } + + const cloudSigninFile = + options.legacy?.cloudSigninPath?.trim() || + resolveExistingFile(legacyDesktopDataDirCandidates(options.legacy?.desktopDataDir), "openwork-cloud-signin.json"); + const cloudSigninReport = options.legacy?.cloudSigninJson?.trim() || cloudSigninFile + ? createEmptyReport("imported", cloudSigninFile ?? "env:OPENWORK_SERVER_V2_CLOUD_SIGNIN_JSON") + : createEmptyReport( + "unavailable", + null, + { + reason: + "No server-readable cloud signin snapshot was found. The current desktop app still persists cloud auth in browser localStorage, so later phases need an explicit handoff path.", + }, + ); + + const cloudSigninRaw = options.legacy?.cloudSigninJson?.trim() || readTextIfExists(cloudSigninFile ?? null); + if (cloudSigninRaw) { + try { + const parsed = cloudSigninSchema.parse(JSON.parse(cloudSigninRaw)); + const cloudBaseUrl = normalizeUrl(parsed.cloudBaseUrl ?? parsed.baseUrl ?? ""); + if (!cloudBaseUrl) { + throw new Error("Cloud signin snapshot did not include a valid base URL."); + } + + repositories.cloudSignin.upsert({ + auth: parsed.authToken?.trim() ? { authToken: parsed.authToken.trim() } : null, + cloudBaseUrl, + id: "cloud_primary", + lastValidatedAt: parsed.lastValidatedAt?.trim() || null, + metadata: { + activeOrgName: parsed.activeOrgName?.trim() || null, + activeOrgSlug: parsed.activeOrgSlug?.trim() || null, + }, + orgId: parsed.orgId?.trim() || parsed.activeOrgId?.trim() || null, + serverId: registry.localServerId, + userId: parsed.userId?.trim() || null, + }); + + cloudSigninReport.details = { + cloudBaseUrl, + imported: true, + orgId: parsed.orgId?.trim() || parsed.activeOrgId?.trim() || null, + userId: parsed.userId?.trim() || null, + }; + } catch (error) { + cloudSigninReport.status = "error"; + cloudSigninReport.details = { + error: error instanceof Error ? error.message : String(error), + }; + } + } + + const legacyWorkspaceImportCompletedAt = priorLegacyWorkspaceImportCompletedAt + ?? (desktopWorkspaceReport.status !== "error" && orchestratorStateReport.status !== "error" + ? new Date().toISOString() + : null); + const diagnostics: StartupDiagnostics = { + completedAt: new Date().toISOString(), + importReports: { + cloudSignin: cloudSigninReport, + desktopWorkspaceState: desktopWorkspaceReport, + orchestratorAuth: orchestratorAuthReport, + orchestratorState: orchestratorStateReport, + }, + legacyWorkspaceImport: { + completedAt: legacyWorkspaceImportCompletedAt, + skipped: !shouldImportLegacyWorkspaceState, + }, + mode, + migrations, + registry: { + hiddenWorkspaceIds: [controlWorkspace.id, helpWorkspace.id], + localServerCreated: localServer.created, + localServerId: localServer.server.id, + totalServers: repositories.servers.count(), + totalVisibleWorkspaces: repositories.workspaces.countVisible(), + }, + warnings: [ + ...desktopWorkspaceReport.warnings, + ...orchestratorStateReport.warnings, + ...orchestratorAuthReport.warnings, + ...cloudSigninReport.warnings, + ], + workingDirectory: { + databasePath: inMemory ? ":memory:" : workingDirectory.databasePath, + rootDir: workingDirectory.rootDir, + workspacesDir: workingDirectory.workspacesDir, + }, + }; + + repositories.serverRuntimeState.upsert({ + health: { + startup: diagnostics, + }, + lastExit: existingRuntimeState?.lastExit ?? null, + lastStartedAt: existingRuntimeState?.lastStartedAt ?? null, + opencodeBaseUrl: existingRuntimeState?.opencodeBaseUrl ?? null, + opencodeStatus: existingRuntimeState?.opencodeStatus ?? "unknown", + opencodeVersion: existingRuntimeState?.opencodeVersion ?? options.version, + restartPolicy: existingRuntimeState?.restartPolicy ?? null, + routerStatus: existingRuntimeState?.routerStatus ?? "disabled", + routerVersion: existingRuntimeState?.routerVersion ?? null, + runtimeVersion: existingRuntimeState?.runtimeVersion ?? options.version, + serverId: registry.localServerId, + }); + + return { + close() { + database.close(false); + }, + database, + diagnostics, + registry, + repositories, + workingDirectory, + }; +} diff --git a/apps/server-v2/src/database/repositories.ts b/apps/server-v2/src/database/repositories.ts new file mode 100644 index 00000000..c0910408 --- /dev/null +++ b/apps/server-v2/src/database/repositories.ts @@ -0,0 +1,1072 @@ +import type { Database } from "bun:sqlite"; +import { parseJsonValue, stringifyJsonValue } from "./json.js"; +import type { + CloudSigninRecord, + ManagedConfigRecord, + RouterBindingRecord, + RouterIdentityRecord, + ServerConfigStateRecord, + ServerRecord, + ServerRuntimeStateRecord, + WorkspaceAssignmentRecord, + WorkspaceConfigStateRecord, + WorkspaceRecord, + WorkspaceRuntimeStateRecord, + WorkspaceShareRecord, +} from "./types.js"; + +function toBoolean(value: number | boolean | null | undefined) { + return Boolean(value); +} + +function toSqlBoolean(value: boolean) { + return value ? 1 : 0; +} + +function nowIso() { + return new Date().toISOString(); +} + +type RawServerRow = { + auth_json: string | null; + base_url: string | null; + capabilities_json: string; + created_at: string; + hosting_kind: ServerRecord["hostingKind"]; + id: string; + is_enabled: number; + is_local: number; + kind: ServerRecord["kind"]; + label: string; + last_seen_at: string | null; + notes_json: string | null; + source: string; + updated_at: string; +}; + +type RawWorkspaceRow = { + config_dir: string | null; + created_at: string; + data_dir: string | null; + display_name: string; + id: string; + is_hidden: number; + kind: WorkspaceRecord["kind"]; + notes_json: string | null; + opencode_project_id: string | null; + remote_workspace_id: string | null; + server_id: string; + slug: string; + status: WorkspaceRecord["status"]; + updated_at: string; +}; + +type RawServerRuntimeStateRow = { + health_json: string | null; + last_exit_json: string | null; + last_started_at: string | null; + opencode_base_url: string | null; + opencode_status: string; + opencode_version: string | null; + restart_policy_json: string | null; + router_status: string; + router_version: string | null; + runtime_version: string | null; + server_id: string; + updated_at: string; +}; + +type RawWorkspaceRuntimeStateRow = { + backend_kind: WorkspaceRuntimeStateRecord["backendKind"]; + health_json: string | null; + last_error_json: string | null; + last_session_refresh_at: string | null; + last_sync_at: string | null; + updated_at: string; + workspace_id: string; +}; + +type RawServerConfigStateRow = { + opencode_json: string; + server_id: string; + updated_at: string; +}; + +type RawWorkspaceConfigStateRow = { + openwork_json: string; + opencode_json: string; + updated_at: string; + workspace_id: string; +}; + +type RawManagedConfigRow = { + auth_json: string | null; + cloud_item_id: string | null; + config_json: string; + created_at: string; + display_name: string; + id: string; + item_key: string | null; + metadata_json: string | null; + source: ManagedConfigRecord["source"]; + updated_at: string; +}; + +type RawCloudSigninRow = { + auth_json: string | null; + cloud_base_url: string; + created_at: string; + id: string; + last_validated_at: string | null; + metadata_json: string | null; + org_id: string | null; + server_id: string; + updated_at: string; + user_id: string | null; +}; + +type RawWorkspaceShareRow = { + access_key: string | null; + audit_json: string | null; + created_at: string; + id: string; + last_used_at: string | null; + revoked_at: string | null; + status: WorkspaceShareRecord["status"]; + updated_at: string; + workspace_id: string; +}; + +type RawRouterIdentityRow = { + auth_json: string | null; + config_json: string; + created_at: string; + display_name: string; + id: string; + is_enabled: number; + kind: string; + server_id: string; + updated_at: string; +}; + +type RawRouterBindingRow = { + binding_key: string; + config_json: string; + created_at: string; + id: string; + is_enabled: number; + router_identity_id: string; + server_id: string; + updated_at: string; +}; + +function mapServer(row: RawServerRow | null | undefined): ServerRecord | null { + if (!row) { + return null; + } + + return { + auth: parseJsonValue(row.auth_json, null), + baseUrl: row.base_url, + capabilities: parseJsonValue(row.capabilities_json, {}), + createdAt: row.created_at, + hostingKind: row.hosting_kind, + id: row.id, + isEnabled: toBoolean(row.is_enabled), + isLocal: toBoolean(row.is_local), + kind: row.kind, + label: row.label, + lastSeenAt: row.last_seen_at, + notes: parseJsonValue(row.notes_json, null), + source: row.source, + updatedAt: row.updated_at, + }; +} + +function mapWorkspace(row: RawWorkspaceRow | null | undefined): WorkspaceRecord | null { + if (!row) { + return null; + } + + return { + configDir: row.config_dir, + createdAt: row.created_at, + dataDir: row.data_dir, + displayName: row.display_name, + id: row.id, + isHidden: toBoolean(row.is_hidden), + kind: row.kind, + notes: parseJsonValue(row.notes_json, null), + opencodeProjectId: row.opencode_project_id, + remoteWorkspaceId: row.remote_workspace_id, + serverId: row.server_id, + slug: row.slug, + status: row.status, + updatedAt: row.updated_at, + }; +} + +function mapServerRuntimeState(row: RawServerRuntimeStateRow | null | undefined): ServerRuntimeStateRecord | null { + if (!row) { + return null; + } + + return { + health: parseJsonValue(row.health_json, null), + lastExit: parseJsonValue(row.last_exit_json, null), + lastStartedAt: row.last_started_at, + opencodeBaseUrl: row.opencode_base_url, + opencodeStatus: row.opencode_status, + opencodeVersion: row.opencode_version, + restartPolicy: parseJsonValue(row.restart_policy_json, null), + routerStatus: row.router_status, + routerVersion: row.router_version, + runtimeVersion: row.runtime_version, + serverId: row.server_id, + updatedAt: row.updated_at, + }; +} + +function mapWorkspaceRuntimeState(row: RawWorkspaceRuntimeStateRow | null | undefined): WorkspaceRuntimeStateRecord | null { + if (!row) { + return null; + } + + return { + backendKind: row.backend_kind, + health: parseJsonValue(row.health_json, null), + lastError: parseJsonValue(row.last_error_json, null), + lastSessionRefreshAt: row.last_session_refresh_at, + lastSyncAt: row.last_sync_at, + updatedAt: row.updated_at, + workspaceId: row.workspace_id, + }; +} + +function mapServerConfigState(row: RawServerConfigStateRow | null | undefined): ServerConfigStateRecord | null { + if (!row) { + return null; + } + + return { + opencode: parseJsonValue(row.opencode_json, {}), + serverId: row.server_id, + updatedAt: row.updated_at, + }; +} + +function mapWorkspaceConfigState(row: RawWorkspaceConfigStateRow | null | undefined): WorkspaceConfigStateRecord | null { + if (!row) { + return null; + } + + return { + openwork: parseJsonValue(row.openwork_json, {}), + opencode: parseJsonValue(row.opencode_json, {}), + updatedAt: row.updated_at, + workspaceId: row.workspace_id, + }; +} + +function mapManagedConfig(row: RawManagedConfigRow | null | undefined): ManagedConfigRecord | null { + if (!row) { + return null; + } + + return { + auth: parseJsonValue(row.auth_json, null), + cloudItemId: row.cloud_item_id, + config: parseJsonValue(row.config_json, {}), + createdAt: row.created_at, + displayName: row.display_name, + id: row.id, + key: row.item_key, + metadata: parseJsonValue(row.metadata_json, null), + source: row.source, + updatedAt: row.updated_at, + }; +} + +function mapCloudSignin(row: RawCloudSigninRow | null | undefined): CloudSigninRecord | null { + if (!row) { + return null; + } + + return { + auth: parseJsonValue(row.auth_json, null), + cloudBaseUrl: row.cloud_base_url, + createdAt: row.created_at, + id: row.id, + lastValidatedAt: row.last_validated_at, + metadata: parseJsonValue(row.metadata_json, null), + orgId: row.org_id, + serverId: row.server_id, + updatedAt: row.updated_at, + userId: row.user_id, + }; +} + +function mapWorkspaceShare(row: RawWorkspaceShareRow | null | undefined): WorkspaceShareRecord | null { + if (!row) { + return null; + } + + return { + accessKey: row.access_key, + audit: parseJsonValue(row.audit_json, null), + createdAt: row.created_at, + id: row.id, + lastUsedAt: row.last_used_at, + revokedAt: row.revoked_at, + status: row.status, + updatedAt: row.updated_at, + workspaceId: row.workspace_id, + }; +} + +function mapRouterIdentity(row: RawRouterIdentityRow | null | undefined): RouterIdentityRecord | null { + if (!row) { + return null; + } + + return { + auth: parseJsonValue(row.auth_json, null), + config: parseJsonValue(row.config_json, {}), + createdAt: row.created_at, + displayName: row.display_name, + id: row.id, + isEnabled: toBoolean(row.is_enabled), + kind: row.kind, + serverId: row.server_id, + updatedAt: row.updated_at, + }; +} + +function mapRouterBinding(row: RawRouterBindingRow | null | undefined): RouterBindingRecord | null { + if (!row) { + return null; + } + + return { + bindingKey: row.binding_key, + config: parseJsonValue(row.config_json, {}), + createdAt: row.created_at, + id: row.id, + isEnabled: toBoolean(row.is_enabled), + routerIdentityId: row.router_identity_id, + serverId: row.server_id, + updatedAt: row.updated_at, + }; +} + +export class ServersRepository { + constructor(private readonly database: Database) {} + + getById(id: string) { + return mapServer(this.database.query("SELECT * FROM servers WHERE id = ?1").get(id) as RawServerRow | null); + } + + list() { + return (this.database.query("SELECT * FROM servers ORDER BY is_local DESC, updated_at DESC").all() as RawServerRow[]).map(mapServer).filter(Boolean) as ServerRecord[]; + } + + count() { + const row = this.database.query("SELECT COUNT(1) AS count FROM servers").get() as { count?: number } | null; + return row?.count ?? 0; + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO servers ( + id, kind, hosting_kind, label, base_url, auth_json, capabilities_json, is_local, + is_enabled, source, notes_json, created_at, updated_at, last_seen_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14) + ON CONFLICT(id) DO UPDATE SET + kind = excluded.kind, + hosting_kind = excluded.hosting_kind, + label = excluded.label, + base_url = excluded.base_url, + auth_json = excluded.auth_json, + capabilities_json = excluded.capabilities_json, + is_local = excluded.is_local, + is_enabled = excluded.is_enabled, + source = excluded.source, + notes_json = excluded.notes_json, + updated_at = excluded.updated_at, + last_seen_at = excluded.last_seen_at + `, + ) + .run( + input.id, + input.kind, + input.hostingKind, + input.label, + input.baseUrl, + stringifyJsonValue(input.auth), + stringifyJsonValue(input.capabilities), + toSqlBoolean(input.isLocal), + toSqlBoolean(input.isEnabled), + input.source, + stringifyJsonValue(input.notes), + createdAt, + updatedAt, + input.lastSeenAt, + ); + + return this.getById(input.id)!; + } +} + +export class WorkspacesRepository { + constructor(private readonly database: Database) {} + + deleteById(id: string) { + const existing = this.getById(id); + if (!existing) { + return false; + } + this.database.query("DELETE FROM workspaces WHERE id = ?1").run(id); + return true; + } + + getById(id: string) { + return mapWorkspace(this.database.query("SELECT * FROM workspaces WHERE id = ?1").get(id) as RawWorkspaceRow | null); + } + + getBySlug(slug: string) { + return mapWorkspace(this.database.query("SELECT * FROM workspaces WHERE slug = ?1").get(slug) as RawWorkspaceRow | null); + } + + list(input?: { includeHidden?: boolean }) { + const includeHidden = input?.includeHidden ?? false; + const query = includeHidden + ? "SELECT * FROM workspaces ORDER BY is_hidden ASC, display_name COLLATE NOCASE ASC" + : "SELECT * FROM workspaces WHERE is_hidden = 0 ORDER BY display_name COLLATE NOCASE ASC"; + return (this.database.query(query).all() as RawWorkspaceRow[]).map(mapWorkspace).filter(Boolean) as WorkspaceRecord[]; + } + + listByServerId(serverId: string, input?: { includeHidden?: boolean }) { + const includeHidden = input?.includeHidden ?? false; + const query = includeHidden + ? "SELECT * FROM workspaces WHERE server_id = ?1 ORDER BY is_hidden ASC, display_name COLLATE NOCASE ASC" + : "SELECT * FROM workspaces WHERE server_id = ?1 AND is_hidden = 0 ORDER BY display_name COLLATE NOCASE ASC"; + return (this.database.query(query).all(serverId) as RawWorkspaceRow[]).map(mapWorkspace).filter(Boolean) as WorkspaceRecord[]; + } + + countVisible() { + const row = this.database.query("SELECT COUNT(1) AS count FROM workspaces WHERE is_hidden = 0").get() as { count?: number } | null; + return row?.count ?? 0; + } + + findSlugConflict(slug: string, excludeWorkspaceId?: string) { + const row = excludeWorkspaceId + ? (this.database + .query("SELECT * FROM workspaces WHERE slug = ?1 AND id != ?2") + .get(slug, excludeWorkspaceId) as RawWorkspaceRow | null) + : (this.database.query("SELECT * FROM workspaces WHERE slug = ?1").get(slug) as RawWorkspaceRow | null); + return mapWorkspace(row); + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO workspaces ( + id, server_id, kind, display_name, slug, is_hidden, status, opencode_project_id, + remote_workspace_id, data_dir, config_dir, notes_json, created_at, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14) + ON CONFLICT(id) DO UPDATE SET + server_id = excluded.server_id, + kind = excluded.kind, + display_name = excluded.display_name, + slug = excluded.slug, + is_hidden = excluded.is_hidden, + status = excluded.status, + opencode_project_id = excluded.opencode_project_id, + remote_workspace_id = excluded.remote_workspace_id, + data_dir = excluded.data_dir, + config_dir = excluded.config_dir, + notes_json = excluded.notes_json, + updated_at = excluded.updated_at + `, + ) + .run( + input.id, + input.serverId, + input.kind, + input.displayName, + input.slug, + toSqlBoolean(input.isHidden), + input.status, + input.opencodeProjectId, + input.remoteWorkspaceId, + input.dataDir, + input.configDir, + stringifyJsonValue(input.notes), + createdAt, + updatedAt, + ); + return this.getById(input.id)!; + } +} + +export class ServerRuntimeStateRepository { + constructor(private readonly database: Database) {} + + getByServerId(serverId: string) { + return mapServerRuntimeState( + this.database.query("SELECT * FROM server_runtime_state WHERE server_id = ?1").get(serverId) as RawServerRuntimeStateRow | null, + ); + } + + upsert(input: Omit & { updatedAt?: string }) { + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO server_runtime_state ( + server_id, runtime_version, opencode_status, opencode_version, opencode_base_url, + router_status, router_version, restart_policy_json, last_started_at, last_exit_json, + health_json, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) + ON CONFLICT(server_id) DO UPDATE SET + runtime_version = excluded.runtime_version, + opencode_status = excluded.opencode_status, + opencode_version = excluded.opencode_version, + opencode_base_url = excluded.opencode_base_url, + router_status = excluded.router_status, + router_version = excluded.router_version, + restart_policy_json = excluded.restart_policy_json, + last_started_at = excluded.last_started_at, + last_exit_json = excluded.last_exit_json, + health_json = excluded.health_json, + updated_at = excluded.updated_at + `, + ) + .run( + input.serverId, + input.runtimeVersion, + input.opencodeStatus, + input.opencodeVersion, + input.opencodeBaseUrl, + input.routerStatus, + input.routerVersion, + stringifyJsonValue(input.restartPolicy), + input.lastStartedAt, + stringifyJsonValue(input.lastExit), + stringifyJsonValue(input.health), + updatedAt, + ); + return this.getByServerId(input.serverId)!; + } +} + +export class WorkspaceRuntimeStateRepository { + constructor(private readonly database: Database) {} + + getByWorkspaceId(workspaceId: string) { + return mapWorkspaceRuntimeState( + this.database.query("SELECT * FROM workspace_runtime_state WHERE workspace_id = ?1").get(workspaceId) as RawWorkspaceRuntimeStateRow | null, + ); + } + + upsert(input: Omit & { updatedAt?: string }) { + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO workspace_runtime_state ( + workspace_id, backend_kind, last_sync_at, last_session_refresh_at, last_error_json, + health_json, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) + ON CONFLICT(workspace_id) DO UPDATE SET + backend_kind = excluded.backend_kind, + last_sync_at = excluded.last_sync_at, + last_session_refresh_at = excluded.last_session_refresh_at, + last_error_json = excluded.last_error_json, + health_json = excluded.health_json, + updated_at = excluded.updated_at + `, + ) + .run( + input.workspaceId, + input.backendKind, + input.lastSyncAt, + input.lastSessionRefreshAt, + stringifyJsonValue(input.lastError), + stringifyJsonValue(input.health), + updatedAt, + ); + return this.getByWorkspaceId(input.workspaceId)!; + } +} + +export class ServerConfigStateRepository { + constructor(private readonly database: Database) {} + + getByServerId(serverId: string) { + return mapServerConfigState( + this.database.query("SELECT * FROM server_config_state WHERE server_id = ?1").get(serverId) as RawServerConfigStateRow | null, + ); + } + + upsert(input: Omit & { updatedAt?: string }) { + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO server_config_state (server_id, opencode_json, updated_at) + VALUES (?1, ?2, ?3) + ON CONFLICT(server_id) DO UPDATE SET + opencode_json = excluded.opencode_json, + updated_at = excluded.updated_at + `, + ) + .run(input.serverId, stringifyJsonValue(input.opencode), updatedAt); + return this.getByServerId(input.serverId)!; + } +} + +export class WorkspaceConfigStateRepository { + constructor(private readonly database: Database) {} + + getByWorkspaceId(workspaceId: string) { + return mapWorkspaceConfigState( + this.database.query("SELECT * FROM workspace_config_state WHERE workspace_id = ?1").get(workspaceId) as RawWorkspaceConfigStateRow | null, + ); + } + + upsert(input: Omit & { updatedAt?: string }) { + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO workspace_config_state (workspace_id, openwork_json, opencode_json, updated_at) + VALUES (?1, ?2, ?3, ?4) + ON CONFLICT(workspace_id) DO UPDATE SET + openwork_json = excluded.openwork_json, + opencode_json = excluded.opencode_json, + updated_at = excluded.updated_at + `, + ) + .run( + input.workspaceId, + stringifyJsonValue(input.openwork), + stringifyJsonValue(input.opencode), + updatedAt, + ); + return this.getByWorkspaceId(input.workspaceId)!; + } +} + +export class ManagedConfigRepository { + constructor( + private readonly database: Database, + private readonly tableName: "mcps" | "skills" | "plugins" | "provider_configs", + ) {} + + getById(id: string) { + return mapManagedConfig( + this.database.query(`SELECT * FROM ${this.tableName} WHERE id = ?1`).get(id) as RawManagedConfigRow | null, + ); + } + + findByKey(key: string) { + return (this.database + .query(`SELECT * FROM ${this.tableName} WHERE item_key = ?1 ORDER BY updated_at DESC`) + .all(key) as RawManagedConfigRow[]).map(mapManagedConfig).filter(Boolean) as ManagedConfigRecord[]; + } + + list() { + return (this.database + .query(`SELECT * FROM ${this.tableName} ORDER BY updated_at DESC`) + .all() as RawManagedConfigRow[]).map(mapManagedConfig).filter(Boolean) as ManagedConfigRecord[]; + } + + deleteById(id: string) { + const existing = this.getById(id); + if (!existing) { + return false; + } + this.database.query(`DELETE FROM ${this.tableName} WHERE id = ?1`).run(id); + return true; + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + const itemKind = this.tableName === "mcps" + ? "mcp" + : this.tableName === "plugins" + ? "plugin" + : this.tableName === "provider_configs" + ? "provider" + : "skill"; + this.database + .query( + ` + INSERT INTO ${this.tableName} ( + id, item_kind, display_name, item_key, config_json, auth_json, metadata_json, + source, cloud_item_id, created_at, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11) + ON CONFLICT(id) DO UPDATE SET + item_kind = excluded.item_kind, + display_name = excluded.display_name, + item_key = excluded.item_key, + config_json = excluded.config_json, + auth_json = excluded.auth_json, + metadata_json = excluded.metadata_json, + source = excluded.source, + cloud_item_id = excluded.cloud_item_id, + updated_at = excluded.updated_at + `, + ) + .run( + input.id, + itemKind, + input.displayName, + input.key, + stringifyJsonValue(input.config), + stringifyJsonValue(input.auth), + stringifyJsonValue(input.metadata), + input.source, + input.cloudItemId, + createdAt, + updatedAt, + ); + return this.getById(input.id)!; + } +} + +export class WorkspaceAssignmentRepository { + constructor( + private readonly database: Database, + private readonly tableName: + | "workspace_mcps" + | "workspace_skills" + | "workspace_plugins" + | "workspace_provider_configs", + ) {} + + listForWorkspace(workspaceId: string) { + return (this.database + .query(`SELECT workspace_id, item_id, created_at, updated_at FROM ${this.tableName} WHERE workspace_id = ?1`) + .all(workspaceId) as Array<{ + created_at: string; + item_id: string; + updated_at: string; + workspace_id: string; + }>).map((row) => ({ + createdAt: row.created_at, + itemId: row.item_id, + updatedAt: row.updated_at, + workspaceId: row.workspace_id, + })) as WorkspaceAssignmentRecord[]; + } + + listForItem(itemId: string) { + return (this.database + .query(`SELECT workspace_id, item_id, created_at, updated_at FROM ${this.tableName} WHERE item_id = ?1`) + .all(itemId) as Array<{ + created_at: string; + item_id: string; + updated_at: string; + workspace_id: string; + }>).map((row) => ({ + createdAt: row.created_at, + itemId: row.item_id, + updatedAt: row.updated_at, + workspaceId: row.workspace_id, + })) as WorkspaceAssignmentRecord[]; + } + + deleteForItem(itemId: string) { + this.database.query(`DELETE FROM ${this.tableName} WHERE item_id = ?1`).run(itemId); + } + + replaceAssignments(workspaceId: string, itemIds: string[]) { + const replace = this.database.transaction((nextItemIds: string[]) => { + this.database.query(`DELETE FROM ${this.tableName} WHERE workspace_id = ?1`).run(workspaceId); + const timestamp = nowIso(); + const insert = this.database.query( + `INSERT INTO ${this.tableName} (workspace_id, item_id, created_at, updated_at) VALUES (?1, ?2, ?3, ?4)`, + ); + for (const itemId of nextItemIds) { + insert.run(workspaceId, itemId, timestamp, timestamp); + } + }); + + replace(itemIds); + return this.listForWorkspace(workspaceId); + } +} + +export class CloudSigninRepository { + constructor(private readonly database: Database) {} + + getPrimary() { + return mapCloudSignin(this.database.query("SELECT * FROM cloud_signin LIMIT 1").get() as RawCloudSigninRow | null); + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO cloud_signin ( + id, server_id, cloud_base_url, user_id, org_id, auth_json, metadata_json, + last_validated_at, created_at, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) + ON CONFLICT(id) DO UPDATE SET + server_id = excluded.server_id, + cloud_base_url = excluded.cloud_base_url, + user_id = excluded.user_id, + org_id = excluded.org_id, + auth_json = excluded.auth_json, + metadata_json = excluded.metadata_json, + last_validated_at = excluded.last_validated_at, + updated_at = excluded.updated_at + `, + ) + .run( + input.id, + input.serverId, + input.cloudBaseUrl, + input.userId, + input.orgId, + stringifyJsonValue(input.auth), + stringifyJsonValue(input.metadata), + input.lastValidatedAt, + createdAt, + updatedAt, + ); + return this.getPrimary()!; + } + + deletePrimary() { + this.database.query("DELETE FROM cloud_signin").run(); + } +} + +export class WorkspaceSharesRepository { + constructor(private readonly database: Database) {} + + getById(id: string) { + return mapWorkspaceShare(this.database.query("SELECT * FROM workspace_shares WHERE id = ?1").get(id) as RawWorkspaceShareRow | null); + } + + listByWorkspace(workspaceId: string) { + return (this.database + .query("SELECT * FROM workspace_shares WHERE workspace_id = ?1 ORDER BY updated_at DESC") + .all(workspaceId) as RawWorkspaceShareRow[]).map(mapWorkspaceShare).filter(Boolean) as WorkspaceShareRecord[]; + } + + getLatestByWorkspace(workspaceId: string) { + return mapWorkspaceShare( + this.database.query("SELECT * FROM workspace_shares WHERE workspace_id = ?1 ORDER BY updated_at DESC LIMIT 1").get(workspaceId) as RawWorkspaceShareRow | null, + ); + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO workspace_shares ( + id, workspace_id, access_key, status, last_used_at, audit_json, created_at, updated_at, revoked_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9) + ON CONFLICT(id) DO UPDATE SET + workspace_id = excluded.workspace_id, + access_key = excluded.access_key, + status = excluded.status, + last_used_at = excluded.last_used_at, + audit_json = excluded.audit_json, + updated_at = excluded.updated_at, + revoked_at = excluded.revoked_at + `, + ) + .run( + input.id, + input.workspaceId, + input.accessKey, + input.status, + input.lastUsedAt, + stringifyJsonValue(input.audit), + createdAt, + updatedAt, + input.revokedAt, + ); + return this.listByWorkspace(input.workspaceId).find((item) => item.id === input.id)!; + } +} + +export class RouterIdentitiesRepository { + constructor(private readonly database: Database) {} + + getById(id: string) { + return mapRouterIdentity(this.database.query("SELECT * FROM router_identities WHERE id = ?1").get(id) as RawRouterIdentityRow | null); + } + + listByServer(serverId: string) { + return (this.database + .query("SELECT * FROM router_identities WHERE server_id = ?1 ORDER BY updated_at DESC") + .all(serverId) as RawRouterIdentityRow[]).map(mapRouterIdentity).filter(Boolean) as RouterIdentityRecord[]; + } + + deleteById(id: string) { + const existing = this.getById(id); + if (!existing) { + return false; + } + this.database.query("DELETE FROM router_identities WHERE id = ?1").run(id); + return true; + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO router_identities ( + id, server_id, kind, display_name, config_json, auth_json, is_enabled, created_at, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9) + ON CONFLICT(id) DO UPDATE SET + server_id = excluded.server_id, + kind = excluded.kind, + display_name = excluded.display_name, + config_json = excluded.config_json, + auth_json = excluded.auth_json, + is_enabled = excluded.is_enabled, + updated_at = excluded.updated_at + `, + ) + .run( + input.id, + input.serverId, + input.kind, + input.displayName, + stringifyJsonValue(input.config), + stringifyJsonValue(input.auth), + toSqlBoolean(input.isEnabled), + createdAt, + updatedAt, + ); + return this.listByServer(input.serverId).find((item) => item.id === input.id)!; + } +} + +export class RouterBindingsRepository { + constructor(private readonly database: Database) {} + + getById(id: string) { + return mapRouterBinding(this.database.query("SELECT * FROM router_bindings WHERE id = ?1").get(id) as RawRouterBindingRow | null); + } + + listByServer(serverId: string) { + return (this.database + .query("SELECT * FROM router_bindings WHERE server_id = ?1 ORDER BY updated_at DESC") + .all(serverId) as RawRouterBindingRow[]).map(mapRouterBinding).filter(Boolean) as RouterBindingRecord[]; + } + + deleteById(id: string) { + const existing = this.getById(id); + if (!existing) { + return false; + } + this.database.query("DELETE FROM router_bindings WHERE id = ?1").run(id); + return true; + } + + upsert(input: Omit & { createdAt?: string; updatedAt?: string }) { + const createdAt = input.createdAt ?? nowIso(); + const updatedAt = input.updatedAt ?? nowIso(); + this.database + .query( + ` + INSERT INTO router_bindings ( + id, server_id, router_identity_id, binding_key, config_json, is_enabled, created_at, updated_at + ) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8) + ON CONFLICT(id) DO UPDATE SET + server_id = excluded.server_id, + router_identity_id = excluded.router_identity_id, + binding_key = excluded.binding_key, + config_json = excluded.config_json, + is_enabled = excluded.is_enabled, + updated_at = excluded.updated_at + `, + ) + .run( + input.id, + input.serverId, + input.routerIdentityId, + input.bindingKey, + stringifyJsonValue(input.config), + toSqlBoolean(input.isEnabled), + createdAt, + updatedAt, + ); + return this.listByServer(input.serverId).find((item) => item.id === input.id)!; + } +} + +export type ServerRepositories = { + cloudSignin: CloudSigninRepository; + mcps: ManagedConfigRepository; + plugins: ManagedConfigRepository; + providerConfigs: ManagedConfigRepository; + routerBindings: RouterBindingsRepository; + routerIdentities: RouterIdentitiesRepository; + serverConfigState: ServerConfigStateRepository; + serverRuntimeState: ServerRuntimeStateRepository; + servers: ServersRepository; + skills: ManagedConfigRepository; + workspaceConfigState: WorkspaceConfigStateRepository; + workspaceMcps: WorkspaceAssignmentRepository; + workspacePlugins: WorkspaceAssignmentRepository; + workspaceProviderConfigs: WorkspaceAssignmentRepository; + workspaceRuntimeState: WorkspaceRuntimeStateRepository; + workspaceShares: WorkspaceSharesRepository; + workspaceSkills: WorkspaceAssignmentRepository; + workspaces: WorkspacesRepository; +}; + +export function createRepositories(database: Database): ServerRepositories { + return { + cloudSignin: new CloudSigninRepository(database), + mcps: new ManagedConfigRepository(database, "mcps"), + plugins: new ManagedConfigRepository(database, "plugins"), + providerConfigs: new ManagedConfigRepository(database, "provider_configs"), + routerBindings: new RouterBindingsRepository(database), + routerIdentities: new RouterIdentitiesRepository(database), + serverConfigState: new ServerConfigStateRepository(database), + serverRuntimeState: new ServerRuntimeStateRepository(database), + servers: new ServersRepository(database), + skills: new ManagedConfigRepository(database, "skills"), + workspaceConfigState: new WorkspaceConfigStateRepository(database), + workspaceMcps: new WorkspaceAssignmentRepository(database, "workspace_mcps"), + workspacePlugins: new WorkspaceAssignmentRepository(database, "workspace_plugins"), + workspaceProviderConfigs: new WorkspaceAssignmentRepository(database, "workspace_provider_configs"), + workspaceRuntimeState: new WorkspaceRuntimeStateRepository(database), + workspaceShares: new WorkspaceSharesRepository(database), + workspaceSkills: new WorkspaceAssignmentRepository(database, "workspace_skills"), + workspaces: new WorkspacesRepository(database), + }; +} diff --git a/apps/server-v2/src/database/status-provider.ts b/apps/server-v2/src/database/status-provider.ts new file mode 100644 index 00000000..ed3ebb0d --- /dev/null +++ b/apps/server-v2/src/database/status-provider.ts @@ -0,0 +1,54 @@ +import type { StartupDiagnostics } from "./types.js"; + +export type DatabaseStatus = { + bootstrapMode: "fresh" | "existing"; + configured: true; + importWarnings: number; + kind: "sqlite"; + migrations: { + appliedThisRun: string[]; + currentVersion: string; + totalApplied: number; + }; + path: string; + phaseOwner: 2; + status: "ready" | "warning"; + summary: string; + workingDirectory: string; +}; + +export type DatabaseStatusProvider = { + getStartupDiagnostics(): StartupDiagnostics; + getStatus(): DatabaseStatus; +}; + +export function createSqliteDatabaseStatusProvider(input: { diagnostics: StartupDiagnostics }): DatabaseStatusProvider { + return { + getStartupDiagnostics() { + return input.diagnostics; + }, + + getStatus() { + const warningCount = input.diagnostics.warnings.length; + const appliedThisRun = input.diagnostics.migrations.applied; + const totalVisibleWorkspaces = input.diagnostics.registry.totalVisibleWorkspaces; + const totalServers = input.diagnostics.registry.totalServers; + return { + bootstrapMode: input.diagnostics.mode, + configured: true, + importWarnings: warningCount, + kind: "sqlite", + migrations: { + appliedThisRun, + currentVersion: input.diagnostics.migrations.currentVersion, + totalApplied: input.diagnostics.migrations.totalApplied, + }, + path: input.diagnostics.workingDirectory.databasePath, + phaseOwner: 2, + status: warningCount > 0 ? "warning" : "ready", + summary: `SQLite ready with ${totalServers} server record(s), ${totalVisibleWorkspaces} visible workspace(s), and ${warningCount} import warning(s).`, + workingDirectory: input.diagnostics.workingDirectory.rootDir, + }; + }, + }; +} diff --git a/apps/server-v2/src/database/types.ts b/apps/server-v2/src/database/types.ts new file mode 100644 index 00000000..c2fc9b7f --- /dev/null +++ b/apps/server-v2/src/database/types.ts @@ -0,0 +1,199 @@ +export type ServerKind = "local" | "remote"; +export type HostingKind = "desktop" | "self_hosted" | "cloud"; +export type WorkspaceKind = "local" | "remote" | "control" | "help"; +export type WorkspaceStatus = "ready" | "imported" | "attention"; +export type BackendKind = "local_opencode" | "remote_openwork"; +export type ImportStatus = "error" | "imported" | "skipped" | "unavailable"; + +export type JsonObject = Record; + +export type ServerRecord = { + auth: JsonObject | null; + baseUrl: string | null; + capabilities: JsonObject; + createdAt: string; + hostingKind: HostingKind; + id: string; + isEnabled: boolean; + isLocal: boolean; + kind: ServerKind; + label: string; + lastSeenAt: string | null; + notes: JsonObject | null; + source: string; + updatedAt: string; +}; + +export type WorkspaceRecord = { + configDir: string | null; + createdAt: string; + dataDir: string | null; + displayName: string; + id: string; + isHidden: boolean; + kind: WorkspaceKind; + notes: JsonObject | null; + opencodeProjectId: string | null; + remoteWorkspaceId: string | null; + serverId: string; + slug: string; + status: WorkspaceStatus; + updatedAt: string; +}; + +export type ServerRuntimeStateRecord = { + health: JsonObject | null; + lastExit: JsonObject | null; + lastStartedAt: string | null; + opencodeBaseUrl: string | null; + opencodeStatus: string; + opencodeVersion: string | null; + restartPolicy: JsonObject | null; + routerStatus: string; + routerVersion: string | null; + runtimeVersion: string | null; + serverId: string; + updatedAt: string; +}; + +export type WorkspaceRuntimeStateRecord = { + backendKind: BackendKind; + health: JsonObject | null; + lastError: JsonObject | null; + lastSessionRefreshAt: string | null; + lastSyncAt: string | null; + updatedAt: string; + workspaceId: string; +}; + +export type ServerConfigStateRecord = { + opencode: JsonObject; + serverId: string; + updatedAt: string; +}; + +export type WorkspaceConfigStateRecord = { + openwork: JsonObject; + opencode: JsonObject; + updatedAt: string; + workspaceId: string; +}; + +export type ManagedSource = "cloud_synced" | "discovered" | "imported" | "openwork_managed"; + +export type ManagedConfigRecord = { + auth: JsonObject | null; + cloudItemId: string | null; + config: JsonObject; + createdAt: string; + displayName: string; + id: string; + key: string | null; + metadata: JsonObject | null; + source: ManagedSource; + updatedAt: string; +}; + +export type WorkspaceAssignmentRecord = { + createdAt: string; + itemId: string; + updatedAt: string; + workspaceId: string; +}; + +export type CloudSigninRecord = { + auth: JsonObject | null; + cloudBaseUrl: string; + createdAt: string; + id: string; + lastValidatedAt: string | null; + metadata: JsonObject | null; + orgId: string | null; + serverId: string; + updatedAt: string; + userId: string | null; +}; + +export type WorkspaceShareRecord = { + accessKey: string | null; + audit: JsonObject | null; + createdAt: string; + id: string; + lastUsedAt: string | null; + revokedAt: string | null; + status: "active" | "disabled" | "revoked"; + updatedAt: string; + workspaceId: string; +}; + +export type RouterIdentityRecord = { + auth: JsonObject | null; + config: JsonObject; + createdAt: string; + displayName: string; + id: string; + isEnabled: boolean; + kind: string; + serverId: string; + updatedAt: string; +}; + +export type RouterBindingRecord = { + config: JsonObject; + createdAt: string; + bindingKey: string; + id: string; + isEnabled: boolean; + routerIdentityId: string; + serverId: string; + updatedAt: string; +}; + +export type MigrationRecord = { + appliedAt: string; + checksum: string; + name: string; + version: string; +}; + +export type MigrationResult = { + applied: string[]; + currentVersion: string; + totalApplied: number; +}; + +export type ImportSourceReport = { + details: JsonObject; + sourcePath: string | null; + status: ImportStatus; + warnings: string[]; +}; + +export type StartupDiagnostics = { + completedAt: string; + importReports: { + cloudSignin: ImportSourceReport; + desktopWorkspaceState: ImportSourceReport; + orchestratorAuth: ImportSourceReport; + orchestratorState: ImportSourceReport; + }; + legacyWorkspaceImport: { + completedAt: string | null; + skipped: boolean; + }; + mode: "fresh" | "existing"; + migrations: MigrationResult; + registry: { + hiddenWorkspaceIds: string[]; + localServerCreated: boolean; + localServerId: string; + totalServers: number; + totalVisibleWorkspaces: number; + }; + warnings: string[]; + workingDirectory: { + databasePath: string; + rootDir: string; + workspacesDir: string; + }; +}; diff --git a/apps/server-v2/src/database/working-directory.ts b/apps/server-v2/src/database/working-directory.ts new file mode 100644 index 00000000..4b66f716 --- /dev/null +++ b/apps/server-v2/src/database/working-directory.ts @@ -0,0 +1,117 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; + +export type ServerWorkingDirectory = { + databaseDir: string; + databasePath: string; + importsDir: string; + managedDir: string; + managedMcpDir: string; + managedPluginDir: string; + managedProviderDir: string; + managedSkillDir: string; + rootDir: string; + runtimeDir: string; + workspacesDir: string; +}; + +type ResolveServerWorkingDirectoryOptions = { + environment: string; + explicitRootDir?: string; +}; + +function isTruthy(value: string | undefined) { + if (!value) { + return false; + } + + return ["1", "true", "yes", "on"].includes(value.trim().toLowerCase()); +} + +function resolvePlatformDataRoot() { + const home = os.homedir(); + const devMode = isTruthy(process.env.OPENWORK_DEV_MODE); + const folderName = devMode ? "com.differentai.openwork.dev" : "com.differentai.openwork"; + + if (process.platform === "darwin") { + return path.join(home, "Library", "Application Support", folderName); + } + + if (process.platform === "win32") { + const appData = process.env.APPDATA?.trim() || path.join(home, "AppData", "Roaming"); + return path.join(appData, folderName); + } + + const xdgDataHome = process.env.XDG_DATA_HOME?.trim() || path.join(home, ".local", "share"); + return path.join(xdgDataHome, folderName); +} + +function resolveRootDir(options: ResolveServerWorkingDirectoryOptions) { + if (options.explicitRootDir?.trim()) { + return path.resolve(options.explicitRootDir.trim()); + } + + const override = process.env.OPENWORK_SERVER_V2_WORKDIR?.trim(); + if (override) { + return path.resolve(override); + } + + const sharedDataDir = process.env.OPENWORK_DATA_DIR?.trim(); + if (sharedDataDir) { + return path.join(path.resolve(sharedDataDir), "server-v2"); + } + + if (options.environment === "test") { + return path.join(process.cwd(), ".openwork-server-v2-test"); + } + + return path.join(resolvePlatformDataRoot(), "server-v2"); +} + +export function resolveServerWorkingDirectory(options: ResolveServerWorkingDirectoryOptions): ServerWorkingDirectory { + const rootDir = resolveRootDir(options); + const databaseDir = path.join(rootDir, "state"); + const managedDir = path.join(rootDir, "managed"); + + return { + databaseDir, + databasePath: path.join(databaseDir, "openwork-server-v2.sqlite"), + importsDir: path.join(rootDir, "imports"), + managedDir, + managedMcpDir: path.join(managedDir, "mcps"), + managedPluginDir: path.join(managedDir, "plugins"), + managedProviderDir: path.join(managedDir, "providers"), + managedSkillDir: path.join(managedDir, "skills"), + rootDir, + runtimeDir: path.join(rootDir, "runtime"), + workspacesDir: path.join(rootDir, "workspaces"), + }; +} + +export function ensureServerWorkingDirectoryLayout(layout: ServerWorkingDirectory) { + for (const directory of [ + layout.rootDir, + layout.databaseDir, + layout.importsDir, + layout.managedDir, + layout.managedMcpDir, + layout.managedPluginDir, + layout.managedProviderDir, + layout.managedSkillDir, + layout.runtimeDir, + layout.workspacesDir, + ]) { + fs.mkdirSync(directory, { recursive: true }); + } +} + +export function resolveWorkspaceConfigDir(layout: ServerWorkingDirectory, workspaceId: string) { + return path.join(layout.workspacesDir, workspaceId, "config"); +} + +export function ensureWorkspaceConfigDir(layout: ServerWorkingDirectory, workspaceId: string) { + const directory = resolveWorkspaceConfigDir(layout, workspaceId); + fs.mkdirSync(directory, { recursive: true }); + return directory; +} diff --git a/apps/server-v2/src/files.test.ts b/apps/server-v2/src/files.test.ts new file mode 100644 index 00000000..2dd2a71d --- /dev/null +++ b/apps/server-v2/src/files.test.ts @@ -0,0 +1,350 @@ +import { afterEach, expect, test } from "bun:test"; +import fs from "node:fs"; +import path from "node:path"; +import os from "node:os"; +import { createApp } from "./app.js"; +import { createAppDependencies } from "./context/app-dependencies.js"; + +const tempRoots: string[] = []; + +afterEach(() => { + while (tempRoots.length) { + const next = tempRoots.pop(); + if (!next) continue; + fs.rmSync(next, { force: true, recursive: true }); + } +}); + +function createTempRoot(label: string) { + const root = fs.mkdtempSync(path.join(os.tmpdir(), `${label}-`)); + tempRoots.push(root); + return root; +} + +function createTestApp() { + const root = createTempRoot("openwork-server-v2-phase7"); + const dependencies = createAppDependencies({ + environment: "test", + inMemory: true, + runtime: { + bootstrapPolicy: "disabled", + }, + startedAt: new Date("2026-04-14T00:00:00.000Z"), + version: "0.0.0-test", + workingDirectory: path.join(root, "server-v2"), + }); + + return { + app: createApp({ dependencies }), + dependencies, + root, + }; +} + +test("local workspace creation and config routes use server-owned config directories", async () => { + const { app, dependencies, root } = createTestApp(); + const workspaceRoot = path.join(root, "workspace-alpha"); + + const createResponse = await app.request("http://openwork.local/workspaces/local", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ folderPath: workspaceRoot, name: "Alpha", preset: "starter" }), + }); + const created = await createResponse.json(); + const workspaceId = created.data.id as string; + + expect(createResponse.status).toBe(200); + expect(created.data.backend.local.dataDir).toBe(workspaceRoot); + expect(created.data.backend.local.configDir).toContain(`/workspaces/${workspaceId}/config`); + + const configResponse = await app.request(`http://openwork.local/workspaces/${workspaceId}/config`); + const configBody = await configResponse.json(); + expect(configResponse.status).toBe(200); + expect(configBody.data.stored.openwork.authorizedRoots).toEqual([]); + expect(configBody.data.effective.openwork.authorizedRoots).toEqual([]); + expect(configBody.data.effective.opencode.permission?.external_directory).toBeUndefined(); + + const patchResponse = await app.request(`http://openwork.local/workspaces/${workspaceId}/config`, { + method: "PATCH", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + openwork: { reload: { auto: true } }, + opencode: { permission: { external_directory: { [`${path.join(root, "shared-data")}/*`]: "allow" } } }, + }), + }); + const patched = await patchResponse.json(); + expect(patchResponse.status).toBe(200); + expect(patched.data.stored.openwork.reload.auto).toBe(true); + expect(patched.data.stored.openwork.authorizedRoots).toEqual([]); + expect(patched.data.effective.openwork.authorizedRoots).toEqual([path.join(root, "shared-data")]); + expect(patched.data.effective.opencode.permission.external_directory[`${path.join(root, "shared-data")}/*`]).toBe("allow"); + expect(patched.data.effective.opencode.permission.external_directory[`${workspaceRoot}/*`]).toBeUndefined(); + + const rawResponse = await app.request(`http://openwork.local/workspaces/${workspaceId}/config/opencode-raw?scope=project`); + const rawBody = await rawResponse.json(); + expect(rawResponse.status).toBe(200); + expect(rawBody.data.content).toContain("external_directory"); + expect(rawBody.data.path).toContain(`/workspaces/${workspaceId}/config/opencode.jsonc`); + + const persistedWorkspace = dependencies.persistence.repositories.workspaces.getById(workspaceId); + expect(persistedWorkspace?.configDir).toBeTruthy(); + expect(fs.existsSync(path.join(persistedWorkspace!.configDir!, "opencode.jsonc"))).toBe(true); + expect(fs.existsSync(path.join(workspaceRoot, "opencode.jsonc"))).toBe(true); +}); + +test("file routes cover simple content, file sessions, inbox, artifacts, and reload events", async () => { + const { app, root } = createTestApp(); + const workspaceRoot = path.join(root, "workspace-beta"); + + const createResponse = await app.request("http://openwork.local/workspaces/local", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ folderPath: workspaceRoot, name: "Beta", preset: "starter" }), + }); + const created = await createResponse.json(); + const workspaceId = created.data.id as string; + + const contentWrite = await app.request(`http://openwork.local/workspaces/${workspaceId}/files/content`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ path: "notes/today.md", content: "hello phase 7" }), + }); + const contentWriteBody = await contentWrite.json(); + expect(contentWrite.status).toBe(200); + expect(contentWriteBody.data.path).toBe("notes/today.md"); + + const contentRead = await app.request(`http://openwork.local/workspaces/${workspaceId}/files/content?path=notes/today.md`); + const contentReadBody = await contentRead.json(); + expect(contentRead.status).toBe(200); + expect(contentReadBody.data.content).toBe("hello phase 7"); + + const sessionCreate = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ write: true }), + }); + const sessionBody = await sessionCreate.json(); + const fileSessionId = sessionBody.data.id as string; + expect(sessionCreate.status).toBe(200); + + const writeBatch = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions/${fileSessionId}/write-batch`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ writes: [{ path: "docs/readme.txt", contentBase64: Buffer.from("file-session").toString("base64") }] }), + }); + const writeBatchBody = await writeBatch.json(); + expect(writeBatch.status).toBe(200); + + const staleBatch = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions/${fileSessionId}/write-batch`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + writes: [{ + path: "docs/readme.txt", + contentBase64: Buffer.from("stale").toString("base64"), + ifMatchRevision: "1:1", + }], + }), + }); + const staleBatchBody = await staleBatch.json(); + expect(staleBatch.status).toBe(200); + expect(staleBatchBody.data.items[0].code).toBe("conflict"); + expect(staleBatchBody.data.items[0].currentRevision).toBe(writeBatchBody.data.items[0].revision); + + const readBatch = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions/${fileSessionId}/read-batch`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ paths: ["docs/readme.txt"] }), + }); + const readBatchBody = await readBatch.json(); + expect(readBatch.status).toBe(200); + expect(Buffer.from(readBatchBody.data.items[0].contentBase64, "base64").toString("utf8")).toBe("file-session"); + + const ops = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions/${fileSessionId}/operations`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ operations: [{ type: "rename", from: "docs/readme.txt", to: "docs/renamed.txt" }] }), + }); + expect(ops.status).toBe(200); + + const catalog = await app.request(`http://openwork.local/workspaces/${workspaceId}/file-sessions/${fileSessionId}/catalog/snapshot?prefix=docs`); + const catalogBody = await catalog.json(); + expect(catalog.status).toBe(200); + expect(catalogBody.data.items.some((item: any) => item.path === "docs/renamed.txt")).toBe(true); + + const upload = await app.request(`http://openwork.local/workspaces/${workspaceId}/inbox`, { + method: "POST", + body: (() => { + const form = new FormData(); + form.append("file", new File(["hello inbox"], "hello.txt", { type: "text/plain" })); + return form; + })(), + }); + const uploadBody = await upload.json(); + expect(upload.status).toBe(200); + expect(uploadBody.data.path).toBe("hello.txt"); + + const inboxList = await app.request(`http://openwork.local/workspaces/${workspaceId}/inbox`); + const inboxListBody = await inboxList.json(); + expect(inboxList.status).toBe(200); + expect(inboxListBody.data.items[0].name).toBe("hello.txt"); + + const outboxDir = path.join(workspaceRoot, ".opencode", "openwork", "outbox"); + fs.mkdirSync(outboxDir, { recursive: true }); + fs.writeFileSync(path.join(outboxDir, "artifact.bin"), "artifact", "utf8"); + + const artifacts = await app.request(`http://openwork.local/workspaces/${workspaceId}/artifacts`); + const artifactsBody = await artifacts.json(); + expect(artifacts.status).toBe(200); + expect(artifactsBody.data.items[0].path).toBe("artifact.bin"); + + const reloads = await app.request(`http://openwork.local/workspaces/${workspaceId}/reload-events`); + const reloadBody = await reloads.json(); + expect(reloads.status).toBe(200); + expect(reloadBody.data.items.length).toBeGreaterThan(0); + expect(reloadBody.data.items.some((item: any) => item.reason === "config")).toBe(true); + + const disposed = await app.request(`http://openwork.local/workspaces/${workspaceId}/dispose`, { + method: "POST", + }); + const disposedBody = await disposed.json(); + expect(disposed.status).toBe(200); + expect(disposedBody.data.disposed).toBe(true); +}); + +test("remote workspace config and file routes proxy through the local server", async () => { + const remote = Bun.serve({ + fetch(request) { + const url = new URL(request.url); + if (url.pathname === "/workspaces/remote-alpha/config" && request.method === "GET") { + return Response.json({ + ok: true, + data: { + effective: { opencode: { permission: { external_directory: { "/srv/alpha/*": "allow" } } }, openwork: {} }, + materialized: { compatibilityOpencodePath: null, compatibilityOpenworkPath: null, configDir: "/srv/config", configOpenworkPath: "/srv/config/.opencode/openwork.json", configOpencodePath: "/srv/config/opencode.jsonc" }, + stored: { openwork: { reload: { auto: true } }, opencode: {} }, + updatedAt: new Date().toISOString(), + workspaceId: "remote-alpha", + }, + meta: { requestId: "owreq_remote_cfg_1", timestamp: new Date().toISOString() }, + }); + } + if (url.pathname === "/workspaces/remote-alpha/config" && request.method === "PATCH") { + return Response.json({ + ok: true, + data: { + effective: { opencode: { permission: { external_directory: { "/srv/alpha/*": "allow", "/srv/shared/*": "allow" } } }, openwork: {} }, + materialized: { compatibilityOpencodePath: null, compatibilityOpenworkPath: null, configDir: "/srv/config", configOpenworkPath: "/srv/config/.opencode/openwork.json", configOpencodePath: "/srv/config/opencode.jsonc" }, + stored: { openwork: { reload: { auto: true } }, opencode: {} }, + updatedAt: new Date().toISOString(), + workspaceId: "remote-alpha", + }, + meta: { requestId: "owreq_remote_cfg_2", timestamp: new Date().toISOString() }, + }); + } + if (url.pathname === "/workspaces/remote-alpha/files/content" && request.method === "GET") { + return Response.json({ ok: true, data: { path: "notes.md", content: "remote hello", bytes: 12, updatedAt: 42 }, meta: { requestId: "owreq_remote_file_1", timestamp: new Date().toISOString() } }); + } + if (url.pathname === "/workspaces/remote-alpha/files/content" && request.method === "POST") { + return Response.json({ ok: true, data: { path: "notes.md", bytes: 12, revision: "42:12", updatedAt: 43 }, meta: { requestId: "owreq_remote_file_2", timestamp: new Date().toISOString() } }); + } + if (url.pathname === "/workspaces/remote-alpha/reload-events" && request.method === "GET") { + return Response.json({ ok: true, data: { cursor: 1, items: [{ id: "evt_remote_1", reason: "config", seq: 1, timestamp: Date.now(), workspaceId: "remote-alpha" }] }, meta: { requestId: "owreq_remote_reload_1", timestamp: new Date().toISOString() } }); + } + return new Response("not found", { status: 404 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + + try { + const { app, dependencies } = createTestApp(); + const workspace = dependencies.persistence.registry.importRemoteWorkspace({ + baseUrl: `http://127.0.0.1:${remote.port}`, + displayName: "Remote Alpha", + legacyNotes: {}, + remoteType: "openwork", + remoteWorkspaceId: "remote-alpha", + serverAuth: { openworkToken: "remote-token" }, + serverBaseUrl: `http://127.0.0.1:${remote.port}`, + serverHostingKind: "self_hosted", + serverLabel: `127.0.0.1:${remote.port}`, + workspaceStatus: "ready", + }); + + const config = await app.request(`http://openwork.local/workspaces/${workspace.id}/config`); + const configBody = await config.json(); + expect(config.status).toBe(200); + expect(configBody.data.stored.openwork.reload.auto).toBe(true); + + const patched = await app.request(`http://openwork.local/workspaces/${workspace.id}/config`, { + method: "PATCH", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ opencode: { permission: { external_directory: { "/srv/shared/*": "allow" } } } }), + }); + expect(patched.status).toBe(200); + + const contentRead = await app.request(`http://openwork.local/workspaces/${workspace.id}/files/content?path=notes.md`); + const contentBody = await contentRead.json(); + expect(contentRead.status).toBe(200); + expect(contentBody.data.content).toBe("remote hello"); + + const contentWrite = await app.request(`http://openwork.local/workspaces/${workspace.id}/files/content`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ path: "notes.md", content: "remote hello" }), + }); + const contentWriteBody = await contentWrite.json(); + expect(contentWrite.status).toBe(200); + expect(contentWriteBody.data.revision).toBe("42:12"); + + const reloads = await app.request(`http://openwork.local/workspaces/${workspace.id}/reload-events`); + const reloadBody = await reloads.json(); + expect(reloads.status).toBe(200); + expect(reloadBody.data.items[0].workspaceId).toBe("remote-alpha"); + } finally { + remote.stop(true); + } +}); + +test("reconciliation absorbs recognized managed items from local workspace files", async () => { + const { dependencies, root } = createTestApp(); + const workspaceRoot = path.join(root, "workspace-gamma"); + fs.mkdirSync(path.join(workspaceRoot, ".opencode", "skills", "manual-skill"), { recursive: true }); + fs.writeFileSync(path.join(workspaceRoot, "opencode.jsonc"), JSON.stringify({ + $schema: "https://opencode.ai/config.json", + mcp: { + demo: { type: "local", command: ["demo"] }, + }, + plugin: ["demo-plugin"], + provider: { + openai: { options: { apiKey: "redacted" } }, + }, + }, null, 2), "utf8"); + fs.writeFileSync(path.join(workspaceRoot, ".opencode", "skills", "manual-skill", "SKILL.md"), "---\nname: manual-skill\ndescription: Manual skill\n---\n\nhello\n", "utf8"); + + const workspace = dependencies.persistence.registry.importLocalWorkspace({ + dataDir: workspaceRoot, + displayName: "Gamma", + status: "ready", + }); + + dependencies.services.config.reconcileAllWorkspaces(); + + const mcps = dependencies.persistence.repositories.workspaceMcps.listForWorkspace(workspace.id); + const plugins = dependencies.persistence.repositories.workspacePlugins.listForWorkspace(workspace.id); + const providers = dependencies.persistence.repositories.workspaceProviderConfigs.listForWorkspace(workspace.id); + const skills = dependencies.persistence.repositories.workspaceSkills.listForWorkspace(workspace.id); + const snapshot = await dependencies.services.config.getWorkspaceConfigSnapshot(workspace.id); + + expect(mcps).toHaveLength(1); + expect(plugins).toHaveLength(1); + expect(providers).toHaveLength(1); + expect(skills).toHaveLength(1); + expect(snapshot.stored.opencode.mcp).toBeUndefined(); + expect((snapshot.effective.opencode.mcp as any).demo.type).toBe("local"); + expect(snapshot.effective.opencode.plugin).toContain("demo-plugin"); + expect((snapshot.effective.opencode.provider as any).openai.options.apiKey).toBe("redacted"); +}); diff --git a/apps/server-v2/src/http.ts b/apps/server-v2/src/http.ts new file mode 100644 index 00000000..5315de51 --- /dev/null +++ b/apps/server-v2/src/http.ts @@ -0,0 +1,80 @@ +export type ResponseMeta = { + requestId: string; + timestamp: string; +}; + +export type SuccessResponse = { + ok: true; + data: TData; + meta: ResponseMeta; +}; + +export type ErrorCode = + | "bad_gateway" + | "conflict" + | "forbidden" + | "internal_error" + | "invalid_request" + | "not_found" + | "not_implemented" + | "service_unavailable" + | "unauthorized"; + +export type ErrorDetail = { + message: string; + path?: Array; +}; + +export type ErrorResponse = { + ok: false; + error: { + code: ErrorCode; + message: string; + requestId: string; + details?: Array; + }; +}; + +export class RouteError extends Error { + constructor( + readonly status: number, + readonly code: ErrorCode, + message: string, + readonly details?: Array, + ) { + super(message); + this.name = "RouteError"; + } +} + +export function createResponseMeta(requestId: string, now: Date = new Date()): ResponseMeta { + return { + requestId, + timestamp: now.toISOString(), + }; +} + +export function buildSuccessResponse(requestId: string, data: TData, now: Date = new Date()): SuccessResponse { + return { + ok: true, + data, + meta: createResponseMeta(requestId, now), + }; +} + +export function buildErrorResponse(input: { + requestId: string; + code: ErrorCode; + message: string; + details?: Array; +}): ErrorResponse { + return { + ok: false, + error: { + code: input.code, + message: input.message, + requestId: input.requestId, + details: input.details, + }, + }; +} diff --git a/apps/server-v2/src/index.ts b/apps/server-v2/src/index.ts new file mode 100644 index 00000000..e7f38dfb --- /dev/null +++ b/apps/server-v2/src/index.ts @@ -0,0 +1,4 @@ +export { app, createApp, type AppType, type CreateAppOptions } from "./app.js"; +export { startServer, type StartServerOptions, type StartedServer } from "./bootstrap/server.js"; +export { createAppDependencies, type AppDependencies } from "./context/app-dependencies.js"; +export { routeNamespaces, routePaths, workspaceResourcePattern, workspaceRoutePath } from "./routes/route-paths.js"; diff --git a/apps/server-v2/src/managed.test.ts b/apps/server-v2/src/managed.test.ts new file mode 100644 index 00000000..1710a85b --- /dev/null +++ b/apps/server-v2/src/managed.test.ts @@ -0,0 +1,304 @@ +import { afterEach, expect, test } from "bun:test"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { createApp } from "./app.js"; +import { createAppDependencies } from "./context/app-dependencies.js"; + +const tempRoots: string[] = []; +const envBackup = { + home: process.env.HOME, + publisherBaseUrl: process.env.OPENWORK_PUBLISHER_BASE_URL, + publisherOrigin: process.env.OPENWORK_PUBLISHER_REQUEST_ORIGIN, +}; +const originalFetch = globalThis.fetch; + +afterEach(() => { + while (tempRoots.length) { + const next = tempRoots.pop(); + if (!next) continue; + fs.rmSync(next, { force: true, recursive: true }); + } + process.env.OPENWORK_PUBLISHER_BASE_URL = envBackup.publisherBaseUrl; + process.env.OPENWORK_PUBLISHER_REQUEST_ORIGIN = envBackup.publisherOrigin; + process.env.HOME = envBackup.home; + globalThis.fetch = originalFetch; +}); + +function createTempRoot(label: string) { + const root = fs.mkdtempSync(path.join(os.tmpdir(), `${label}-`)); + tempRoots.push(root); + return root; +} + +function createTestApp(label: string) { + const root = createTempRoot(label); + const dependencies = createAppDependencies({ + environment: "test", + inMemory: true, + runtime: { + bootstrapPolicy: "disabled", + }, + startedAt: new Date("2026-04-15T00:00:00.000Z"), + version: "0.0.0-test", + workingDirectory: path.join(root, "server-v2"), + }); + return { + app: createApp({ dependencies }), + dependencies, + root, + }; +} + +test("managed resource routes cover MCPs, plugins, skills, shares, export/import, cloud signin, bundles, and router state", async () => { + const { app, dependencies, root } = createTestApp("openwork-server-v2-phase8-managed"); + const workspaceRoot = path.join(root, "workspace-managed"); + fs.mkdirSync(path.join(workspaceRoot, ".opencode", "tools"), { recursive: true }); + fs.writeFileSync(path.join(workspaceRoot, ".opencode", "tools", "demo.txt"), "tool-secret", "utf8"); + + const createResponse = await app.request("http://openwork.local/workspaces/local", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ folderPath: workspaceRoot, name: "Managed", preset: "starter" }), + }); + const created = await createResponse.json(); + const workspaceId = created.data.id as string; + + const mcpAdded = await app.request(`http://openwork.local/workspace/${workspaceId}/mcp`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: "demo", config: { command: ["demo"], type: "local" } }), + }); + const pluginsAdded = await app.request(`http://openwork.local/workspace/${workspaceId}/plugins`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ spec: "demo-plugin" }), + }); + const skillAdded = await app.request(`http://openwork.local/workspace/${workspaceId}/skills`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ content: "## When To Use\n- Demo\n", description: "Demo skill", name: "demo-skill" }), + }); + const systemManagedMcps = await app.request("http://openwork.local/system/managed/mcps"); + const shareExposed = await app.request(`http://openwork.local/workspaces/${workspaceId}/share`, { method: "POST" }); + const shareBody = await shareExposed.json(); + const exportConflict = await app.request(`http://openwork.local/workspaces/${workspaceId}/export?sensitive=auto`); + const exportSafe = await app.request(`http://openwork.local/workspaces/${workspaceId}/export?sensitive=exclude`); + const exportSafeBody = await exportSafe.json(); + + expect(mcpAdded.status).toBe(200); + expect((await mcpAdded.json()).items[0].name).toBe("demo"); + expect(pluginsAdded.status).toBe(200); + expect((await pluginsAdded.json()).items[0].spec).toBe("demo-plugin"); + expect(skillAdded.status).toBe(200); + expect((await skillAdded.json()).name).toBe("demo-skill"); + expect(systemManagedMcps.status).toBe(200); + expect((await systemManagedMcps.json()).data.items[0].workspaceIds).toContain(workspaceId); + expect(shareExposed.status).toBe(200); + expect(shareBody.data.status).toBe("active"); + expect(typeof shareBody.data.accessKey).toBe("string"); + expect(exportConflict.status).toBe(409); + expect((await exportConflict.json()).code).toBe("workspace_export_requires_decision"); + expect(exportSafe.status).toBe(200); + expect(exportSafeBody.data.skills[0].name).toBe("demo-skill"); + + const importRoot = path.join(root, "workspace-imported"); + const createImportWorkspace = await app.request("http://openwork.local/workspaces/local", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ folderPath: importRoot, name: "Imported", preset: "starter" }), + }); + const importedWorkspaceId = (await createImportWorkspace.json()).data.id as string; + const importResult = await app.request(`http://openwork.local/workspaces/${importedWorkspaceId}/import`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(exportSafeBody.data), + }); + const importedSkills = await app.request(`http://openwork.local/workspace/${importedWorkspaceId}/skills`); + expect(importResult.status).toBe(200); + expect((await importedSkills.json()).items[0].name).toBe("demo-skill"); + const importedSkillRecord = dependencies.persistence.repositories.skills.list().find((item) => item.key === "demo-skill" && item.source === "imported"); + expect(importedSkillRecord?.source).toBe("imported"); + expect((importedSkillRecord?.metadata as any)?.importedVia).toBe("portable_bundle"); + + globalThis.fetch = Object.assign( + async (input: URL | RequestInfo, init?: RequestInit | BunFetchRequestInit) => { + const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url; + if (url.includes("/skills/hub-skill/SKILL.md")) { + return new Response("---\nname: hub-skill\ndescription: Hub skill\ntrigger: Help with hub flows\n---\n\nUse for hub tasks\n", { + headers: { "Content-Type": "text/plain" }, + status: 200, + }); + } + return originalFetch(input, init); + }, + { preconnect: originalFetch.preconnect }, + ) as typeof fetch; + const hubInstall = await app.request(`http://openwork.local/workspace/${workspaceId}/skills/hub/hub-skill`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ overwrite: true }), + }); + expect(hubInstall.status).toBe(200); + const hubSkillRecord = dependencies.persistence.repositories.skills.list().find((item) => item.key === "hub-skill"); + expect(hubSkillRecord?.source).toBe("imported"); + expect((hubSkillRecord?.metadata as any)?.install?.kind).toBe("hub"); + globalThis.fetch = originalFetch; + + const cloudServer = Bun.serve({ + fetch(request) { + const url = new URL(request.url); + if (url.pathname === "/v1/me") { + return Response.json({ user: { id: "usr_123" } }); + } + return new Response("not found", { status: 404 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + tempRoots.push(path.join(root, `cloud-server-${cloudServer.port}`)); + try { + const cloudPersist = await app.request("http://openwork.local/system/cloud-signin", { + method: "PUT", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ auth: { authToken: "token-demo" }, cloudBaseUrl: `http://127.0.0.1:${cloudServer.port}` }), + }); + const cloudValidated = await app.request("http://openwork.local/system/cloud-signin/validate", { method: "POST" }); + const cloudCleared = await app.request("http://openwork.local/system/cloud-signin", { method: "DELETE" }); + expect(cloudPersist.status).toBe(200); + expect(cloudValidated.status).toBe(200); + expect((await cloudValidated.json()).data.ok).toBe(true); + expect(cloudCleared.status).toBe(200); + expect((await cloudCleared.json()).data).toBeNull(); + } finally { + cloudServer.stop(true); + } + + const publisherServer = Bun.serve({ + fetch(request) { + const url = new URL(request.url); + if (url.pathname === "/v1/bundles" && request.method === "POST") { + return Response.json({ url: `${url.origin}/b/demo-bundle` }); + } + if (url.pathname === "/b/demo-bundle/data" && request.method === "GET") { + return Response.json({ schemaVersion: 1, type: "skills-set", name: "Demo", skills: [] }); + } + return new Response("not found", { status: 404 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + process.env.OPENWORK_PUBLISHER_BASE_URL = `http://127.0.0.1:${publisherServer.port}`; + process.env.OPENWORK_PUBLISHER_REQUEST_ORIGIN = "http://127.0.0.1:3000"; + try { + const publish = await app.request("http://openwork.local/share/bundles/publish", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ bundleType: "skills-set", payload: { ok: true } }), + }); + const publishBody = await publish.json(); + const fetchBundle = await app.request("http://openwork.local/share/bundles/fetch", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ bundleUrl: publishBody.data.url }), + }); + expect(publish.status).toBe(200); + expect(fetchBundle.status).toBe(200); + expect((await fetchBundle.json()).data.type).toBe("skills-set"); + } finally { + publisherServer.stop(true); + } + + const routerSendServer = Bun.serve({ + fetch(request) { + const url = new URL(request.url); + if (url.pathname === "/send" && request.method === "POST") { + return Response.json({ attempted: 1, channel: "telegram", directory: workspaceRoot, ok: true, sent: 1 }); + } + return new Response("not found", { status: 404 }); + }, + hostname: "127.0.0.1", + port: 0, + }); + try { + dependencies.services.runtime.getRouterHealth = () => ({ + baseUrl: `http://127.0.0.1:${routerSendServer.port}`, + binaryPath: null, + diagnostics: { combined: [], stderr: [], stdout: [], totalLines: 0, truncated: false }, + enablement: { enabled: true, enabledBindingCount: 0, enabledIdentityCount: 0, forced: false, reason: "test" }, + healthUrl: `http://127.0.0.1:${routerSendServer.port}`, + lastError: null, + lastExit: null, + lastReadyAt: null, + lastStartedAt: null, + manifest: null, + materialization: null, + pid: null, + running: true, + source: "development", + status: "running", + version: "test", + }); + dependencies.services.runtime.applyRouterConfig = async () => dependencies.services.runtime.getRouterHealth(); + const telegramIdentity = await app.request(`http://openwork.local/workspace/${workspaceId}/opencode-router/identities/telegram`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ access: "private", token: "123456:demo" }), + }); + const bindings = await app.request(`http://openwork.local/workspace/${workspaceId}/opencode-router/bindings`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ channel: "telegram", directory: workspaceRoot, peerId: "peer-1" }), + }); + const send = await app.request(`http://openwork.local/workspace/${workspaceId}/opencode-router/send`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ channel: "telegram", directory: workspaceRoot, text: "hello" }), + }); + expect(telegramIdentity.status).toBe(200); + expect((await telegramIdentity.json()).telegram.pairingCode).toBeTruthy(); + expect(bindings.status).toBe(200); + expect(send.status).toBe(200); + expect((await send.json()).sent).toBe(1); + } finally { + routerSendServer.stop(true); + } +}); + +test("scheduler routes list and delete jobs for a local workspace", async () => { + const root = createTempRoot("openwork-server-v2-scheduler"); + process.env.HOME = root; + const { app } = createTestApp("openwork-server-v2-phase8-scheduler"); + const workspaceRoot = path.join(root, "workspace-scheduler"); + const jobsDir = path.join(root, ".config", "opencode", "jobs"); + fs.mkdirSync(jobsDir, { recursive: true }); + fs.writeFileSync( + path.join(jobsDir, "nightly-review.json"), + JSON.stringify({ + createdAt: new Date("2026-04-16T00:00:00.000Z").toISOString(), + name: "Nightly Review", + schedule: "0 9 * * *", + slug: "nightly-review", + workdir: workspaceRoot, + }, null, 2), + "utf8", + ); + + const createResponse = await app.request("http://openwork.local/workspaces/local", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ folderPath: workspaceRoot, name: "Scheduler", preset: "starter" }), + }); + const workspaceId = (await createResponse.json()).data.id as string; + + const listResponse = await app.request(`http://openwork.local/workspaces/${workspaceId}/scheduler/jobs`); + expect(listResponse.status).toBe(200); + expect((await listResponse.json()).data.items[0].slug).toBe("nightly-review"); + + const deleteResponse = await app.request(`http://openwork.local/workspaces/${workspaceId}/scheduler/jobs/nightly-review`, { + method: "DELETE", + }); + expect(deleteResponse.status).toBe(200); + expect((await deleteResponse.json()).data.job.slug).toBe("nightly-review"); + expect(fs.existsSync(path.join(jobsDir, "nightly-review.json"))).toBe(false); +}); diff --git a/apps/server-v2/src/middleware/error-handler.ts b/apps/server-v2/src/middleware/error-handler.ts new file mode 100644 index 00000000..332a4eef --- /dev/null +++ b/apps/server-v2/src/middleware/error-handler.ts @@ -0,0 +1,94 @@ +import type { MiddlewareHandler } from "hono"; +import { HTTPException } from "hono/http-exception"; +import { ZodError } from "zod"; +import { buildErrorResponse, RouteError } from "../http.js"; +import type { AppBindings } from "../context/request-context.js"; + +export const errorHandlingMiddleware: MiddlewareHandler = async (c, next) => { + try { + await next(); + } catch (error) { + const requestId = c.get("requestId") ?? `owreq_${crypto.randomUUID()}`; + const routeLike = error && typeof error === "object" + ? error as { code?: unknown; details?: unknown; message?: unknown; status?: unknown } + : null; + + if (error instanceof HTTPException) { + const status = error.status; + const code = status === 401 + ? "unauthorized" + : status === 403 + ? "forbidden" + : status === 404 + ? "not_found" + : "invalid_request"; + const body = buildErrorResponse({ + requestId, + code, + message: error.message || (code === "not_found" ? "Route not found." : "Request failed."), + }); + return c.json(body, status); + } + + if (error instanceof RouteError) { + return c.json( + buildErrorResponse({ + requestId, + code: error.code, + message: error.message, + details: error.details, + }), + error.status as any, + ); + } + + if ( + routeLike + && typeof routeLike.status === "number" + && typeof routeLike.code === "string" + && typeof routeLike.message === "string" + ) { + return c.json( + buildErrorResponse({ + requestId, + code: routeLike.code as any, + message: routeLike.message, + details: Array.isArray(routeLike.details) ? routeLike.details as any : undefined, + }), + routeLike.status as any, + ); + } + + if (error instanceof ZodError) { + const body = buildErrorResponse({ + requestId, + code: "invalid_request", + message: "Request validation failed.", + details: error.issues.map((issue) => ({ + message: issue.message, + path: issue.path.filter((segment): segment is string | number => typeof segment === "string" || typeof segment === "number"), + })), + }); + return c.json(body, 400); + } + + const message = error instanceof Error ? error.message : "Unexpected server error."; + + console.error( + JSON.stringify({ + message, + requestId, + scope: "openwork-server-v2.error", + }), + ); + + return c.json( + buildErrorResponse({ + requestId, + code: "internal_error", + message: "Unexpected server error.", + }), + 500, + ); + } +}; diff --git a/apps/server-v2/src/middleware/request-id.ts b/apps/server-v2/src/middleware/request-id.ts new file mode 100644 index 00000000..166ea526 --- /dev/null +++ b/apps/server-v2/src/middleware/request-id.ts @@ -0,0 +1,20 @@ +import type { MiddlewareHandler } from "hono"; +import type { AppBindings } from "../context/request-context.js"; + +export const REQUEST_ID_HEADER = "X-Request-Id"; + +function normalizeIncomingRequestId(value: string | undefined) { + const trimmed = value?.trim(); + + if (!trimmed) { + return null; + } + + return trimmed.slice(0, 200); +} + +export const requestIdMiddleware: MiddlewareHandler = async (c, next) => { + const requestId = normalizeIncomingRequestId(c.req.header(REQUEST_ID_HEADER)) ?? `owreq_${crypto.randomUUID()}`; + c.set("requestId", requestId); + await next(); +}; diff --git a/apps/server-v2/src/middleware/request-logger.ts b/apps/server-v2/src/middleware/request-logger.ts new file mode 100644 index 00000000..424cfaba --- /dev/null +++ b/apps/server-v2/src/middleware/request-logger.ts @@ -0,0 +1,22 @@ +import type { MiddlewareHandler } from "hono"; +import type { AppBindings } from "../context/request-context.js"; + +export const requestLoggerMiddleware: MiddlewareHandler = async (c, next) => { + const startedAt = performance.now(); + + await next(); + + const durationMs = Number((performance.now() - startedAt).toFixed(1)); + const url = new URL(c.req.url); + + console.info( + JSON.stringify({ + durationMs, + method: c.req.method, + path: url.pathname, + requestId: c.get("requestId"), + scope: "openwork-server-v2.request", + status: c.res.status, + }), + ); +}; diff --git a/apps/server-v2/src/middleware/response-finalizer.ts b/apps/server-v2/src/middleware/response-finalizer.ts new file mode 100644 index 00000000..6e329ead --- /dev/null +++ b/apps/server-v2/src/middleware/response-finalizer.ts @@ -0,0 +1,16 @@ +import type { MiddlewareHandler } from "hono"; +import type { AppBindings } from "../context/request-context.js"; +import { REQUEST_ID_HEADER } from "./request-id.js"; + +export const responseFinalizerMiddleware: MiddlewareHandler = async (c, next) => { + await next(); + + const requestId = c.get("requestId"); + if (requestId) { + c.header(REQUEST_ID_HEADER, requestId); + } + + if (!c.res.headers.has("Cache-Control")) { + c.header("Cache-Control", "no-store"); + } +}; diff --git a/apps/server-v2/src/openapi.ts b/apps/server-v2/src/openapi.ts new file mode 100644 index 00000000..a469ed96 --- /dev/null +++ b/apps/server-v2/src/openapi.ts @@ -0,0 +1,91 @@ +import { resolver } from "hono-openapi"; +import type { z } from "zod"; +import { + forbiddenErrorSchema, + internalErrorSchema, + invalidRequestErrorSchema, + notFoundErrorSchema, + unauthorizedErrorSchema, +} from "./schemas/errors.js"; + +function toPascalCase(value: string) { + return value + .replace(/[^a-zA-Z0-9]+/g, " ") + .trim() + .split(/\s+/) + .filter(Boolean) + .map((part) => part.charAt(0).toUpperCase() + part.slice(1)) + .join(""); +} + +export function buildOperationId(method: string, path: string) { + const parts = path + .split("/") + .filter(Boolean) + .map((part) => { + if (part.startsWith(":")) { + return `by-${part.slice(1)}`; + } + + if (part === "*") { + return "wildcard"; + } + + return part; + }); + + if (parts.length === 0) { + return `${method.toLowerCase()}Root`; + } + + return [method.toLowerCase(), ...parts] + .map(toPascalCase) + .join("") + .replace(/^[A-Z]/, (char) => char.toLowerCase()); +} + +export function jsonResponse(description: string, schema: z.ZodTypeAny) { + return { + description, + content: { + "application/json": { + schema: resolver(schema), + }, + }, + }; +} + +export function withCommonErrorResponses>( + responses: TResponses, + options: { + includeForbidden?: boolean; + includeNotFound?: boolean; + includeInvalidRequest?: boolean; + includeUnauthorized?: boolean; + } = {}, +) { + return { + ...responses, + ...(options.includeInvalidRequest + ? { + 400: jsonResponse("Request validation failed.", invalidRequestErrorSchema), + } + : {}), + ...(options.includeUnauthorized + ? { + 401: jsonResponse("Authentication is required for this route.", unauthorizedErrorSchema), + } + : {}), + ...(options.includeForbidden + ? { + 403: jsonResponse("The authenticated actor does not have access to this route.", forbiddenErrorSchema), + } + : {}), + ...(options.includeNotFound + ? { + 404: jsonResponse("The requested route was not found.", notFoundErrorSchema), + } + : {}), + 500: jsonResponse("The server failed to complete the request.", internalErrorSchema), + }; +} diff --git a/apps/server-v2/src/routes/files.ts b/apps/server-v2/src/routes/files.ts new file mode 100644 index 00000000..6c1138d3 --- /dev/null +++ b/apps/server-v2/src/routes/files.ts @@ -0,0 +1,640 @@ +import type { Context, Hono } from "hono"; +import { describeRoute, resolver } from "hono-openapi"; +import { HTTPException } from "hono/http-exception"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildSuccessResponse } from "../http.js"; +import { jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { + rawOpencodeConfigQuerySchema, + rawOpencodeConfigResponseSchema, + rawOpencodeConfigWriteRequestSchema, + workspaceConfigPatchRequestSchema, + workspaceConfigResponseSchema, +} from "../schemas/config.js"; +import { + binaryListResponseSchema, + binaryUploadResponseSchema, + engineReloadResponseSchema, + fileBatchReadRequestSchema, + fileBatchReadResponseSchema, + fileBatchWriteRequestSchema, + fileCatalogSnapshotResponseSchema, + fileMutationResultSchema, + fileOperationsRequestSchema, + fileSessionCreateRequestSchema, + fileSessionIdParamsSchema, + fileSessionResponseSchema, + reloadEventsResponseSchema, + simpleContentQuerySchema, + simpleContentResponseSchema, + simpleContentWriteRequestSchema, + workspaceActivationResponseSchema, + workspaceCreateLocalRequestSchema, + workspaceDisposeResponseSchema, + workspaceDeleteResponseSchema, +} from "../schemas/files.js"; +import { workspaceDetailResponseSchema } from "../schemas/registry.js"; +import { routePaths } from "./route-paths.js"; + +function parseQuery(schema: { parse(input: unknown): T }, url: string) { + const searchParams = new URL(url).searchParams; + const query: Record = {}; + for (const [key, value] of searchParams.entries()) { + query[key] = value; + } + return schema.parse(query); +} + +async function parseJsonBody(schema: { parse(input: unknown): T }, request: Request) { + const contentType = request.headers.get("content-type")?.toLowerCase() ?? ""; + if (!contentType.includes("application/json")) { + return schema.parse({}); + } + return schema.parse(await request.json()); +} + +function readActorKey(c: Context) { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const authorization = c.req.raw.headers.get("authorization")?.trim() ?? ""; + const hostToken = c.req.raw.headers.get("x-openwork-host-token")?.trim() ?? ""; + return { + actorKey: requestContext.actor.kind === "host" ? hostToken || authorization : authorization, + actorKind: requestContext.actor.kind === "host" ? "host" as const : "client" as const, + requestContext, + }; +} + +function requireWorkspaceAccess(c: Context) { + const { requestContext, actorKey, actorKind } = readActorKey(c); + const workspaceId = c.req.param("workspaceId") ?? ""; + const workspace = requestContext.services.workspaceRegistry.getById(workspaceId, { + includeHidden: requestContext.actor.kind === "host", + }); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return { actorKey, actorKind, requestContext, workspaceId }; +} + +function createBinaryResponse(inputValue: { buffer?: Uint8Array; filePath?: string; filename: string; size: number }) { + const headers = new Headers(); + headers.set("Content-Type", "application/octet-stream"); + headers.set("Content-Disposition", `attachment; filename="${inputValue.filename}"`); + headers.set("Content-Length", String(inputValue.size)); + return new Response(inputValue.buffer ?? (Bun as any).file(inputValue.filePath!), { headers, status: 200 }); +} + +export function registerFileRoutes(app: Hono) { + app.post( + routePaths.workspaces.createLocal, + describeRoute({ + tags: ["Workspaces"], + summary: "Create a local workspace", + description: "Creates a local workspace, initializes starter files, creates the Server V2 config directory, and reconciles the new workspace into managed config state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Local workspace created successfully.", workspaceDetailResponseSchema), + }, { includeForbidden: true, includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireHost(requestContext.actor); + const body = await parseJsonBody(workspaceCreateLocalRequestSchema, c.req.raw); + const workspace = await requestContext.services.files.createLocalWorkspace({ + folderPath: body.folderPath, + name: body.name, + preset: body.preset ?? "starter", + }); + const detail = requestContext.services.workspaceRegistry.getById(workspace.id, { includeHidden: true }); + return c.json(buildSuccessResponse(requestContext.requestId, detail)); + }, + ); + + app.post( + routePaths.workspaces.activate(), + describeRoute({ + tags: ["Workspaces"], + summary: "Activate a workspace", + description: "Marks a workspace as the active local workspace for migration-era host flows that still expect an active workspace concept.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace activated successfully.", workspaceActivationResponseSchema), + }, { includeForbidden: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const workspaceId = c.req.param("workspaceId") ?? ""; + const activeWorkspaceId = requestContext.services.files.activateWorkspace(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, { activeWorkspaceId })); + }, + ); + + app.patch( + routePaths.workspaces.displayName(), + describeRoute({ + tags: ["Workspaces"], + summary: "Update workspace display name", + description: "Updates the persisted display name for a workspace record during migration.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace detail returned successfully.", workspaceDetailResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext } = readActorKey(c); + const workspaceId = c.req.param("workspaceId") ?? ""; + const body = await c.req.json(); + const displayName = typeof body?.displayName === "string" ? body.displayName : null; + requestContext.services.auth.requireVisibleRead(requestContext.actor); + requestContext.services.files.updateWorkspaceDisplayName(workspaceId, displayName); + const detail = requestContext.services.workspaceRegistry.getById(workspaceId, { includeHidden: requestContext.actor.kind === "host" }); + return c.json(buildSuccessResponse(requestContext.requestId, detail)); + }, + ); + + app.delete( + routePaths.workspaces.byId(), + describeRoute({ + tags: ["Workspaces"], + summary: "Delete workspace", + description: "Deletes a workspace record from the local Server V2 registry during migration-era host flows.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace deleted successfully.", workspaceDeleteResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext } = readActorKey(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const workspaceId = c.req.param("workspaceId") ?? ""; + const result = requestContext.services.files.deleteWorkspace(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.dispose(), + describeRoute({ + tags: ["Workspaces"], + summary: "Dispose workspace runtime instance", + description: "Disposes the runtime instance associated with the workspace through Server V2 and refreshes managed runtime supervision where required.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace runtime instance disposed successfully.", workspaceDisposeResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext } = readActorKey(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const workspaceId = c.req.param("workspaceId") ?? ""; + const result = await requestContext.services.files.disposeWorkspaceInstance(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.config(), + describeRoute({ + tags: ["Config"], + summary: "Read workspace config", + description: "Returns stored and effective workspace config along with the materialized config paths managed by Server V2.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace config returned successfully.", workspaceConfigResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const snapshot = await requestContext.services.config.getWorkspaceConfigSnapshot(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, snapshot)); + }, + ); + + app.patch( + routePaths.workspaces.config(), + describeRoute({ + tags: ["Config"], + summary: "Patch workspace config", + description: "Updates stored workspace OpenWork/OpenCode config, absorbs recognized managed sections into the database, and rematerializes the effective config files.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace config updated successfully.", workspaceConfigResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const body = await parseJsonBody(workspaceConfigPatchRequestSchema, c.req.raw); + const snapshot = await requestContext.services.config.patchWorkspaceConfig(workspaceId, body); + if (body.opencode) { + requestContext.services.files.emitReloadEvent(workspaceId, "config", { + action: "updated", + name: "opencode.jsonc", + path: snapshot.materialized.configOpencodePath ?? undefined, + type: "config", + }); + } + if (body.openwork) { + requestContext.services.files.emitReloadEvent(workspaceId, "config", { + action: "updated", + name: "openwork.json", + path: snapshot.materialized.configOpenworkPath ?? undefined, + type: "config", + }); + } + await requestContext.services.files.recordWorkspaceAudit( + workspaceId, + "config.patch", + snapshot.materialized.configOpencodePath ?? snapshot.materialized.configOpenworkPath ?? workspaceId, + "Patched workspace config through Server V2.", + ); + return c.json(buildSuccessResponse(requestContext.requestId, snapshot)); + }, + ); + + app.get( + routePaths.workspaces.rawOpencodeConfig(), + describeRoute({ + tags: ["Config"], + summary: "Read raw OpenCode config text", + description: "Returns the editable raw OpenCode config text for project or global scope, generated from the server-owned config state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Raw OpenCode config returned successfully.", rawOpencodeConfigResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const query = parseQuery(rawOpencodeConfigQuerySchema, c.req.url); + const result = await requestContext.services.config.readRawOpencodeConfig(workspaceId, query.scope ?? "project"); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.rawOpencodeConfig(), + describeRoute({ + tags: ["Config"], + summary: "Write raw OpenCode config text", + description: "Parses raw OpenCode config text, absorbs recognized managed sections into the database, and rematerializes the effective config files.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Raw OpenCode config written successfully.", rawOpencodeConfigResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const body = await parseJsonBody(rawOpencodeConfigWriteRequestSchema, c.req.raw); + const result = body.scope === "global" + ? requestContext.services.config.writeGlobalOpencodeConfig(body.content) + : await requestContext.services.config.writeWorkspaceRawOpencodeConfig(workspaceId, body.content); + if (body.scope !== "global") { + requestContext.services.files.emitReloadEvent(workspaceId, "config", { + action: "updated", + name: "opencode.jsonc", + path: result.path ?? undefined, + type: "config", + }); + await requestContext.services.files.recordWorkspaceAudit( + workspaceId, + "config.raw.write", + result.path ?? workspaceId, + "Updated raw OpenCode config through Server V2.", + ); + } + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.reloadEvents(), + describeRoute({ + tags: ["Reload"], + summary: "List reload events", + description: "Returns workspace-scoped reload events emitted by Server V2 after config/file mutations, watched changes, or reconciliation work.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Reload events returned successfully.", reloadEventsResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const since = Number(new URL(c.req.url).searchParams.get("since") ?? "0"); + const result = await requestContext.services.files.getReloadEvents(workspaceId, Number.isFinite(since) ? since : 0); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.engineReload(), + describeRoute({ + tags: ["Reload"], + summary: "Reload the local engine", + description: "Restarts the local OpenCode runtime through the Server V2 runtime supervisor for the selected local workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace engine reloaded successfully.", engineReloadResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const result = await requestContext.services.files.reloadWorkspaceEngine(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.fileSessions.base(), + describeRoute({ + tags: ["Files"], + summary: "Create a workspace file session", + description: "Creates a server-owned file session for a local workspace and returns the session metadata used for file catalog and mutation routes.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file session created successfully.", fileSessionResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const body = await parseJsonBody(fileSessionCreateRequestSchema, c.req.raw); + const session = await requestContext.services.files.createWorkspaceFileSession(workspaceId, { actorKey, actorKind, ...body }); + return c.json(buildSuccessResponse(requestContext.requestId, session)); + }, + ); + + app.post( + routePaths.workspaces.fileSessions.renew(), + describeRoute({ + tags: ["Files"], + summary: "Renew a workspace file session", + description: "Extends the lifetime of an existing workspace file session.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file session renewed successfully.", fileSessionResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const body = await parseJsonBody(fileSessionCreateRequestSchema, c.req.raw); + const session = await requestContext.services.files.renewWorkspaceFileSession(workspaceId, params.fileSessionId, actorKey, actorKind, body.ttlSeconds); + return c.json(buildSuccessResponse(requestContext.requestId, session)); + }, + ); + + app.delete( + routePaths.workspaces.fileSessions.byId(), + describeRoute({ + tags: ["Files"], + summary: "Close a workspace file session", + description: "Closes a workspace file session and releases its temporary server-side catalog state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file session closed successfully.", workspaceActivationResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + await requestContext.services.files.closeWorkspaceFileSession(workspaceId, params.fileSessionId, actorKey, actorKind); + return c.json(buildSuccessResponse(requestContext.requestId, { activeWorkspaceId: workspaceId })); + }, + ); + + app.get( + routePaths.workspaces.fileSessions.catalogSnapshot(), + describeRoute({ + tags: ["Files"], + summary: "Get a file catalog snapshot", + description: "Returns the file catalog snapshot for a workspace file session.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file catalog returned successfully.", fileCatalogSnapshotResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const query = new URL(c.req.url).searchParams; + const result = await requestContext.services.files.listFileSessionCatalogSnapshot(workspaceId, params.fileSessionId, actorKey, actorKind, { + after: query.get("after"), + includeDirs: query.get("includeDirs") !== "false", + limit: query.get("limit"), + prefix: query.get("prefix"), + }); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.fileSessions.catalogEvents(), + describeRoute({ + tags: ["Files"], + summary: "List file session catalog events", + description: "Returns file mutation events recorded for a workspace file session since the requested cursor.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file session events returned successfully.", fileMutationResultSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const result = requestContext.services.files.listFileSessionEvents(workspaceId, params.fileSessionId, actorKey, actorKind, new URL(c.req.url).searchParams.get("since")); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.fileSessions.readBatch(), + describeRoute({ + tags: ["Files"], + summary: "Read a batch of files", + description: "Reads a batch of files through the server-owned file session model.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace files read successfully.", fileBatchReadResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const body = await parseJsonBody(fileBatchReadRequestSchema, c.req.raw); + const result = await requestContext.services.files.readWorkspaceFiles(workspaceId, params.fileSessionId, actorKey, actorKind, body.paths); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.fileSessions.writeBatch(), + describeRoute({ + tags: ["Files"], + summary: "Write a batch of files", + description: "Writes a batch of files with revision-aware conflict handling through the server-owned file session model.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace files written successfully.", fileMutationResultSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const body = await parseJsonBody(fileBatchWriteRequestSchema, c.req.raw); + const result = await requestContext.services.files.writeWorkspaceFiles(workspaceId, params.fileSessionId, actorKey, actorKind, body.writes); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.fileSessions.operations(), + describeRoute({ + tags: ["Files"], + summary: "Run file operations", + description: "Runs mkdir, rename, and delete operations through the server-owned file session model.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace file operations applied successfully.", fileMutationResultSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { actorKey, actorKind, requestContext, workspaceId } = requireWorkspaceAccess(c); + const params = fileSessionIdParamsSchema.parse(c.req.param()); + const body = await parseJsonBody(fileOperationsRequestSchema, c.req.raw); + const result = await requestContext.services.files.workspaceFileOperations(workspaceId, params.fileSessionId, actorKey, actorKind, body.operations); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.simpleContent(), + describeRoute({ + tags: ["Files"], + summary: "Read simple content", + description: "Reads markdown-oriented content for lighter file flows without using the full file session model.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace content returned successfully.", simpleContentResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const query = parseQuery(simpleContentQuerySchema, c.req.url); + const result = await requestContext.services.files.readSimpleContent(workspaceId, query.path); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.simpleContent(), + describeRoute({ + tags: ["Files"], + summary: "Write simple content", + description: "Writes markdown-oriented content with basic conflict handling for lighter file flows.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace content written successfully.", simpleContentResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const body = await parseJsonBody(simpleContentWriteRequestSchema, c.req.raw); + const result = await requestContext.services.files.writeSimpleContent(workspaceId, body); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.inbox.base(), + describeRoute({ + tags: ["Files"], + summary: "List inbox items", + description: "Returns uploadable inbox items for the selected workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace inbox returned successfully.", binaryListResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const result = await requestContext.services.files.listInbox(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.inbox.byId(), + describeRoute({ + tags: ["Files"], + summary: "Download inbox item", + description: "Downloads one inbox file for the selected workspace.", + responses: withCommonErrorResponses({ + 200: { + description: "Inbox item downloaded successfully.", + content: { + "application/octet-stream": { + schema: resolver(simpleContentResponseSchema), + }, + }, + }, + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const result = await requestContext.services.files.downloadInboxItem(workspaceId, c.req.param("inboxId") ?? ""); + return createBinaryResponse({ + buffer: (result as { buffer?: Uint8Array }).buffer, + filePath: (result as { absolutePath?: string }).absolutePath, + filename: result.filename, + size: result.size, + }); + }, + ); + + app.post( + routePaths.workspaces.inbox.base(), + describeRoute({ + tags: ["Files"], + summary: "Upload inbox item", + description: "Uploads one file into the managed inbox area for the selected workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace inbox item uploaded successfully.", binaryUploadResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const form = await c.req.raw.formData(); + const file = form.get("file"); + if (!(file instanceof File)) { + throw new HTTPException(400, { message: "Form field 'file' is required." }); + } + const requestedPath = (new URL(c.req.url).searchParams.get("path") ?? String(form.get("path") ?? "")).trim(); + const result = await requestContext.services.files.uploadInboxItem(workspaceId, requestedPath, file); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.artifacts.base(), + describeRoute({ + tags: ["Files"], + summary: "List artifacts", + description: "Returns downloadable artifact items for the selected workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace artifacts returned successfully.", binaryListResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const result = await requestContext.services.files.listArtifacts(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.get( + routePaths.workspaces.artifacts.byId(), + describeRoute({ + tags: ["Files"], + summary: "Download artifact", + description: "Downloads one artifact file for the selected workspace.", + responses: withCommonErrorResponses({ + 200: { + description: "Artifact downloaded successfully.", + content: { + "application/octet-stream": { + schema: resolver(simpleContentResponseSchema), + }, + }, + }, + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspaceAccess(c); + const result = await requestContext.services.files.downloadArtifact(workspaceId, c.req.param("artifactId") ?? ""); + return createBinaryResponse({ + buffer: (result as { buffer?: Uint8Array }).buffer, + filePath: (result as { absolutePath?: string }).absolutePath, + filename: result.filename, + size: result.size, + }); + }, + ); +} diff --git a/apps/server-v2/src/routes/index.ts b/apps/server-v2/src/routes/index.ts new file mode 100644 index 00000000..9bfc6bca --- /dev/null +++ b/apps/server-v2/src/routes/index.ts @@ -0,0 +1,18 @@ +import type { Hono } from "hono"; +import type { AppDependencies } from "../context/app-dependencies.js"; +import type { AppBindings } from "../context/request-context.js"; +import { registerFileRoutes } from "./files.js"; +import { registerManagedRoutes } from "./managed.js"; +import { registerRuntimeRoutes } from "./runtime.js"; +import { registerSessionRoutes } from "./sessions.js"; +import { registerSystemRoutes } from "./system.js"; +import { registerWorkspaceRoutes } from "./workspaces.js"; + +export function registerRoutes(app: Hono, dependencies: AppDependencies) { + registerSystemRoutes(app, dependencies); + registerRuntimeRoutes(app); + registerWorkspaceRoutes(app); + registerFileRoutes(app); + registerManagedRoutes(app); + registerSessionRoutes(app); +} diff --git a/apps/server-v2/src/routes/managed.ts b/apps/server-v2/src/routes/managed.ts new file mode 100644 index 00000000..d9d9f7ab --- /dev/null +++ b/apps/server-v2/src/routes/managed.ts @@ -0,0 +1,806 @@ +import type { Context, Hono } from "hono"; +import { describeRoute } from "hono-openapi"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildSuccessResponse, RouteError } from "../http.js"; +import { jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { + cloudSigninResponseSchema, + cloudSigninValidationResponseSchema, + cloudSigninWriteSchema, + hubSkillInstallResponseSchema, + hubSkillInstallWriteSchema, + hubSkillListResponseSchema, + managedAssignmentWriteSchema, + managedDeleteResponseSchema, + managedItemListResponseSchema, + managedItemResponseSchema, + managedItemWriteSchema, + routerBindingListResponseSchema, + routerBindingWriteSchema, + routerHealthResponseSchemaCompat, + routerIdentityListResponseSchema, + routerMutationResponseSchema, + routerSendWriteSchema, + routerSlackWriteSchema, + routerTelegramInfoResponseSchema, + routerTelegramWriteSchema, + scheduledJobDeleteResponseSchema, + scheduledJobListResponseSchema, + sharedBundleFetchResponseSchema, + sharedBundleFetchWriteSchema, + sharedBundlePublishResponseSchema, + sharedBundlePublishWriteSchema, + workspaceExportResponseSchema, + workspaceImportResponseSchema, + workspaceImportWriteSchema, + workspaceMcpListResponseSchema, + workspaceMcpWriteSchema, + workspacePluginListResponseSchema, + workspacePluginWriteSchema, + workspaceShareResponseSchema, + workspaceSkillDeleteResponseSchema, + workspaceSkillListResponseSchema, + workspaceSkillResponseSchema, + workspaceSkillWriteSchema, +} from "../schemas/managed.js"; +import { routePaths } from "./route-paths.js"; + +function parseJsonBody(schema: { parse(input: unknown): T }, request: Request) { + return request.json().then((body) => schema.parse(body)); +} + +function requireVisible(c: Context) { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return requestContext; +} + +function requireWorkspace(c: Context) { + const requestContext = requireVisible(c); + const workspaceId = c.req.param("workspaceId") ?? ""; + return { requestContext, workspaceId }; +} + +function addCompatibilityRoute( + app: Hono, + method: "DELETE" | "GET" | "PATCH" | "POST" | "PUT", + path: string, + handler: (c: Context) => Promise | Response, +) { + if (method === "GET") app.get(path, handler); + if (method === "POST") app.post(path, handler); + if (method === "PUT") app.put(path, handler); + if (method === "PATCH") app.patch(path, handler); + if (method === "DELETE") app.delete(path, handler); +} + +export function registerManagedRoutes(app: Hono) { + for (const kind of ["mcps", "plugins", "providerConfigs", "skills"] as const) { + app.get( + routePaths.system.managed.list(kind), + describeRoute({ + tags: ["Managed"], + summary: `List managed ${kind}`, + description: `Returns the server-owned ${kind} records and explicit workspace assignments.`, + responses: withCommonErrorResponses({ + 200: jsonResponse(`Managed ${kind} returned successfully.`, managedItemListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, { items: requestContext.services.managed.listManaged(kind) })); + }, + ); + + app.post( + routePaths.system.managed.list(kind), + describeRoute({ + tags: ["Managed"], + summary: `Create managed ${kind.slice(0, -1)}`, + description: `Creates a server-owned ${kind.slice(0, -1)} record and optionally assigns it to workspaces.`, + responses: withCommonErrorResponses({ + 200: jsonResponse(`Managed ${kind.slice(0, -1)} created successfully.`, managedItemResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(managedItemWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.createManaged(kind, body))); + }, + ); + + app.put( + routePaths.system.managed.item(kind), + describeRoute({ + tags: ["Managed"], + summary: `Update managed ${kind.slice(0, -1)}`, + description: `Updates a server-owned ${kind.slice(0, -1)} record.`, + responses: withCommonErrorResponses({ + 200: jsonResponse(`Managed ${kind.slice(0, -1)} updated successfully.`, managedItemResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const itemId = c.req.param("itemId") ?? ""; + const body = await parseJsonBody(managedItemWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.updateManaged(kind, itemId, body))); + }, + ); + + app.put( + routePaths.system.managed.assignments(kind), + describeRoute({ + tags: ["Managed"], + summary: `Assign managed ${kind.slice(0, -1)} to workspaces`, + description: `Replaces the workspace assignments for a server-owned managed item.`, + responses: withCommonErrorResponses({ + 200: jsonResponse(`Managed ${kind.slice(0, -1)} assignments updated successfully.`, managedItemResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const itemId = c.req.param("itemId") ?? ""; + const body = await parseJsonBody(managedAssignmentWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.updateAssignments(kind, itemId, body.workspaceIds))); + }, + ); + + app.delete( + routePaths.system.managed.item(kind), + describeRoute({ + tags: ["Managed"], + summary: `Delete managed ${kind.slice(0, -1)}`, + description: `Deletes a server-owned managed item and removes its workspace assignments.`, + responses: withCommonErrorResponses({ + 200: jsonResponse(`Managed ${kind.slice(0, -1)} deleted successfully.`, managedDeleteResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const itemId = c.req.param("itemId") ?? ""; + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.deleteManaged(kind, itemId))); + }, + ); + } + + app.get( + routePaths.system.cloudSignin, + describeRoute({ + tags: ["Cloud"], + summary: "Read cloud signin state", + description: "Returns the server-owned cloud signin record when one is configured.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Cloud signin returned successfully.", cloudSigninResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.getCloudSignin())); + }, + ); + + app.put( + routePaths.system.cloudSignin, + describeRoute({ + tags: ["Cloud"], + summary: "Persist cloud signin state", + description: "Stores cloud signin metadata in the server-owned database.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Cloud signin persisted successfully.", cloudSigninResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(cloudSigninWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.upsertCloudSignin(body))); + }, + ); + + app.post( + "/system/cloud-signin/validate", + describeRoute({ + tags: ["Cloud"], + summary: "Validate cloud signin state", + description: "Validates the stored cloud signin token against the configured cloud base URL.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Cloud signin validated successfully.", cloudSigninValidationResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.validateCloudSignin())); + }, + ); + + app.delete( + routePaths.system.cloudSignin, + describeRoute({ + tags: ["Cloud"], + summary: "Clear cloud signin state", + description: "Removes the server-owned cloud signin record for the current OpenWork server.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Cloud signin cleared successfully.", cloudSigninResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.clearCloudSignin())); + }, + ); + + app.get( + routePaths.system.router.health, + describeRoute({ + tags: ["Router"], + summary: "Read router product health", + description: "Returns the product-facing router health snapshot built from server-owned router state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router product health returned successfully.", routerHealthResponseSchemaCompat), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.router.getHealth())); + }, + ); + + app.post( + routePaths.system.router.apply, + describeRoute({ + tags: ["Router"], + summary: "Apply router state", + description: "Rematerializes the effective router config and reconciles the supervised router process.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router state applied successfully.", routerMutationResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.apply())); + }, + ); + + app.get( + routePaths.system.router.identities("telegram"), + describeRoute({ + tags: ["Router"], + summary: "List Telegram identities", + description: "Returns the server-owned Telegram router identities.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Telegram identities returned successfully.", routerIdentityListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.router.listTelegramIdentities())); + }, + ); + + app.post( + routePaths.system.router.identities("telegram"), + describeRoute({ + tags: ["Router"], + summary: "Upsert Telegram identity", + description: "Creates or updates a server-owned Telegram router identity.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Telegram identity upserted successfully.", routerMutationResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(routerTelegramWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.upsertTelegramIdentity(body))); + }, + ); + + app.get( + routePaths.system.router.identities("slack"), + describeRoute({ + tags: ["Router"], + summary: "List Slack identities", + description: "Returns the server-owned Slack router identities.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Slack identities returned successfully.", routerIdentityListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.router.listSlackIdentities())); + }, + ); + + app.post( + routePaths.system.router.identities("slack"), + describeRoute({ + tags: ["Router"], + summary: "Upsert Slack identity", + description: "Creates or updates a server-owned Slack router identity.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Slack identity upserted successfully.", routerMutationResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(routerSlackWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.upsertSlackIdentity(body))); + }, + ); + + app.get( + routePaths.system.router.telegram, + describeRoute({ + tags: ["Router"], + summary: "Read Telegram router info", + description: "Returns the current Telegram identity readiness summary for the router product surface.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Telegram router info returned successfully.", routerTelegramInfoResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.getTelegramInfo())); + }, + ); + + app.get( + routePaths.system.router.bindings, + describeRoute({ + tags: ["Router"], + summary: "List router bindings", + description: "Returns the effective server-owned router bindings.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router bindings returned successfully.", routerBindingListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = requireVisible(c); + const url = new URL(c.req.url); + const channel = url.searchParams.get("channel")?.trim() || undefined; + const identityId = url.searchParams.get("identityId")?.trim() || undefined; + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.router.listBindings({ channel, identityId }))); + }, + ); + + app.post( + routePaths.system.router.bindings, + describeRoute({ + tags: ["Router"], + summary: "Set router binding", + description: "Creates or updates a server-owned router binding.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router binding updated successfully.", routerMutationResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(routerBindingWriteSchema, c.req.raw); + if (!body.directory?.trim()) { + throw new RouteError(400, "invalid_request", "System router binding writes require a directory."); + } + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.setBinding({ channel: body.channel, directory: body.directory, identityId: body.identityId, peerId: body.peerId }))); + }, + ); + + app.post( + routePaths.system.router.send, + describeRoute({ + tags: ["Router"], + summary: "Send router message", + description: "Sends an outbound router message through the supervised router runtime.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router message delivered successfully.", routerMutationResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(routerSendWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.router.sendMessage(body))); + }, + ); + + app.get( + routePaths.workspaces.share(), + describeRoute({ + tags: ["Shares"], + summary: "Read workspace share", + description: "Returns the current workspace-scoped share record for a local workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace share returned successfully.", workspaceShareResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.getWorkspaceShare(workspaceId))); + }, + ); + + app.post( + routePaths.workspaces.share(), + describeRoute({ + tags: ["Shares"], + summary: "Expose workspace share", + description: "Creates or rotates a workspace-scoped share access key for a local workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace share exposed successfully.", workspaceShareResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.exposeWorkspaceShare(workspaceId))); + }, + ); + + app.delete( + routePaths.workspaces.share(), + describeRoute({ + tags: ["Shares"], + summary: "Revoke workspace share", + description: "Revokes the current workspace-scoped share access key for a local workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace share revoked successfully.", workspaceShareResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.revokeWorkspaceShare(workspaceId))); + }, + ); + + app.get( + routePaths.workspaces.export(), + describeRoute({ + tags: ["Bundles"], + summary: "Export workspace", + description: "Builds a portable workspace export from the server-owned config and managed-resource state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace exported successfully.", workspaceExportResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const sensitiveMode = (new URL(c.req.url).searchParams.get("sensitive")?.trim() as "auto" | "exclude" | "include" | null) ?? "auto"; + const result = await requestContext.services.managed.exportWorkspace(workspaceId, { sensitiveMode: sensitiveMode === "exclude" || sensitiveMode === "include" || sensitiveMode === "auto" ? sensitiveMode : "auto" }); + if ("conflict" in result) { + return c.json({ code: "workspace_export_requires_decision", details: { warnings: result.warnings }, message: "This workspace includes sensitive config. Choose whether to exclude it or include it before exporting." }, 409); + } + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); + + app.post( + routePaths.workspaces.import(), + describeRoute({ + tags: ["Bundles"], + summary: "Import workspace", + description: "Applies a portable workspace import through the server-owned config and managed-resource model.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace imported successfully.", workspaceImportResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspaceImportWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.importWorkspace(workspaceId, body))); + }, + ); + + app.post( + "/share/bundles/publish", + describeRoute({ + tags: ["Bundles"], + summary: "Publish shared bundle", + description: "Publishes a trusted shared bundle through the configured OpenWork bundle publisher.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Shared bundle published successfully.", sharedBundlePublishResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(sharedBundlePublishWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.publishSharedBundle(body))); + }, + ); + + app.post( + "/share/bundles/fetch", + describeRoute({ + tags: ["Bundles"], + summary: "Fetch shared bundle", + description: "Fetches a trusted shared bundle through the configured OpenWork bundle publisher.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Shared bundle fetched successfully.", sharedBundleFetchResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const body = await parseJsonBody(sharedBundleFetchWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.fetchSharedBundle(body.bundleUrl, { timeoutMs: body.timeoutMs }))); + }, + ); + + app.get( + routePaths.workspaces.mcp(), + describeRoute({ + tags: ["Managed"], + summary: "List workspace MCPs", + description: "Returns the effective workspace MCP records backed by server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace MCPs returned successfully.", workspaceMcpListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, { items: requestContext.services.managed.listWorkspaceMcp(workspaceId) })); + }, + ); + + app.post( + routePaths.workspaces.mcp(), + describeRoute({ + tags: ["Managed"], + summary: "Add workspace MCP", + description: "Creates or updates a workspace-scoped MCP through server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace MCP updated successfully.", workspaceMcpListResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspaceMcpWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.addWorkspaceMcp(workspaceId, body))); + }, + ); + + app.get( + routePaths.workspaces.plugins(), + describeRoute({ + tags: ["Managed"], + summary: "List workspace plugins", + description: "Returns the effective workspace plugins backed by server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace plugins returned successfully.", workspacePluginListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.managed.listWorkspacePlugins(workspaceId))); + }, + ); + + app.post( + routePaths.workspaces.plugins(), + describeRoute({ + tags: ["Managed"], + summary: "Add workspace plugin", + description: "Creates or updates a workspace-scoped plugin through server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace plugins updated successfully.", workspacePluginListResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspacePluginWriteSchema, c.req.raw); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.addWorkspacePlugin(workspaceId, body.spec))); + }, + ); + + app.get( + routePaths.workspaces.scheduler.base(), + describeRoute({ + tags: ["Managed"], + summary: "List scheduled jobs", + description: "Returns the scheduled jobs for a local workspace via the desktop scheduler store.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Scheduled jobs returned successfully.", scheduledJobListResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.scheduler.listWorkspaceJobs(workspaceId))); + }, + ); + + app.delete( + routePaths.workspaces.scheduler.byName(), + describeRoute({ + tags: ["Managed"], + summary: "Delete scheduled job", + description: "Deletes a scheduled job for a local workspace via the desktop scheduler store.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Scheduled job deleted successfully.", scheduledJobDeleteResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.scheduler.deleteWorkspaceJob(workspaceId, c.req.param("name") ?? ""))); + }, + ); + + app.get( + routePaths.workspaces.skills(), + describeRoute({ + tags: ["Managed"], + summary: "List workspace skills", + description: "Returns the effective workspace skills backed by server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace skills returned successfully.", workspaceSkillListResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(buildSuccessResponse(requestContext.requestId, { items: requestContext.services.managed.listWorkspaceSkills(workspaceId) })); + }, + ); + + app.post( + routePaths.workspaces.skills(), + describeRoute({ + tags: ["Managed"], + summary: "Upsert workspace skill", + description: "Creates or updates a workspace-scoped skill through server-owned managed state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace skill updated successfully.", workspaceSkillResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspaceSkillWriteSchema, c.req.raw); + const item = await requestContext.services.managed.upsertWorkspaceSkill(workspaceId, body); + return c.json(buildSuccessResponse(requestContext.requestId, { content: requestContext.services.managed.getWorkspaceSkill(workspaceId, item.name).content, item })); + }, + ); + + app.get( + routePaths.workspaces.hubSkills, + describeRoute({ + tags: ["Managed"], + summary: "List hub skills", + description: "Returns the available Skill Hub catalog backed by trusted GitHub sources.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Hub skills returned successfully.", hubSkillListResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const requestContext = requireVisible(c); + const url = new URL(c.req.url); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.listHubSkills({ owner: url.searchParams.get("owner") ?? undefined, ref: url.searchParams.get("ref") ?? undefined, repo: url.searchParams.get("repo") ?? undefined }))); + }, + ); + + app.post( + `${routePaths.workspaces.skills()}/hub/:name`, + describeRoute({ + tags: ["Managed"], + summary: "Install hub skill", + description: "Installs a trusted Skill Hub skill into server-owned managed state for a workspace.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Hub skill installed successfully.", hubSkillInstallResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(hubSkillInstallWriteSchema, c.req.raw).catch(() => ({} as any)); + return c.json(buildSuccessResponse(requestContext.requestId, await requestContext.services.managed.installHubSkill(workspaceId, { name: c.req.param("name") ?? "", overwrite: body.overwrite, repo: body.repo }))); + }, + ); + + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/mcp", (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json({ items: requestContext.services.managed.listWorkspaceMcp(workspaceId) }); + }); + addCompatibilityRoute(app, "POST", "/workspace/:workspaceId/mcp", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspaceMcpWriteSchema, c.req.raw); + return c.json(await requestContext.services.managed.addWorkspaceMcp(workspaceId, body)); + }); + addCompatibilityRoute(app, "DELETE", "/workspace/:workspaceId/mcp/:name", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.managed.removeWorkspaceMcp(workspaceId, c.req.param("name") ?? "")); + }); + addCompatibilityRoute(app, "DELETE", "/workspace/:workspaceId/mcp/:name/auth", (c) => c.json({ ok: true })); + + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/plugins", (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(requestContext.services.managed.listWorkspacePlugins(workspaceId)); + }); + addCompatibilityRoute(app, "POST", "/workspace/:workspaceId/plugins", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspacePluginWriteSchema, c.req.raw); + return c.json(await requestContext.services.managed.addWorkspacePlugin(workspaceId, body.spec)); + }); + addCompatibilityRoute(app, "DELETE", "/workspace/:workspaceId/plugins/:name", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.managed.removeWorkspacePlugin(workspaceId, c.req.param("name") ?? "")); + }); + + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/skills", (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json({ items: requestContext.services.managed.listWorkspaceSkills(workspaceId) }); + }); + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/scheduler/jobs", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.scheduler.listWorkspaceJobs(workspaceId)); + }); + addCompatibilityRoute(app, "DELETE", "/workspace/:workspaceId/scheduler/jobs/:name", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.scheduler.deleteWorkspaceJob(workspaceId, c.req.param("name") ?? "")); + }); + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/skills/:name", (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(requestContext.services.managed.getWorkspaceSkill(workspaceId, c.req.param("name") ?? "")); + }); + addCompatibilityRoute(app, "POST", "/workspace/:workspaceId/skills", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(workspaceSkillWriteSchema, c.req.raw); + return c.json(await requestContext.services.managed.upsertWorkspaceSkill(workspaceId, body)); + }); + addCompatibilityRoute(app, "DELETE", "/workspace/:workspaceId/skills/:name", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.managed.deleteWorkspaceSkill(workspaceId, c.req.param("name") ?? "")); + }); + addCompatibilityRoute(app, "GET", "/hub/skills", async (c) => { + const requestContext = requireVisible(c); + const url = new URL(c.req.url); + return c.json(await requestContext.services.managed.listHubSkills({ owner: url.searchParams.get("owner") ?? undefined, ref: url.searchParams.get("ref") ?? undefined, repo: url.searchParams.get("repo") ?? undefined })); + }); + addCompatibilityRoute(app, "POST", "/workspace/:workspaceId/skills/hub/:name", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const body = await parseJsonBody(hubSkillInstallWriteSchema, c.req.raw).catch(() => ({} as any)); + return c.json({ ok: true, ...(await requestContext.services.managed.installHubSkill(workspaceId, { name: c.req.param("name") ?? "", overwrite: body.overwrite, repo: body.repo })) }); + }); + + const workspaceRouterPaths = [ + "/workspace/:workspaceId/opencode-router", + routePaths.workspaces.router.base(), + ]; + for (const basePath of workspaceRouterPaths) { + addCompatibilityRoute(app, "GET", `${basePath}/health`, (c) => { + requireWorkspace(c); + return c.json(getRequestContext(c).services.router.getHealth()); + }); + addCompatibilityRoute(app, "POST", `${basePath}/telegram-token`, async (c) => c.json(await getRequestContext(c).services.router.setTelegramToken((await parseJsonBody(routerTelegramWriteSchema, c.req.raw)).token))); + addCompatibilityRoute(app, "GET", `${basePath}/telegram`, async (c) => c.json(await getRequestContext(c).services.router.getTelegramInfo())); + addCompatibilityRoute(app, "POST", `${basePath}/telegram-enabled`, async (c) => { + const body = await c.req.json(); + return c.json(await getRequestContext(c).services.router.setTelegramEnabled(body.enabled === true, { clearToken: body.clearToken === true })); + }); + addCompatibilityRoute(app, "GET", `${basePath}/identities/telegram`, (c) => c.json(getRequestContext(c).services.router.listTelegramIdentities())); + addCompatibilityRoute(app, "POST", `${basePath}/identities/telegram`, async (c) => c.json(await getRequestContext(c).services.router.upsertTelegramIdentity(await parseJsonBody(routerTelegramWriteSchema, c.req.raw)))); + addCompatibilityRoute(app, "DELETE", `${basePath}/identities/telegram/:identityId`, async (c) => c.json(await getRequestContext(c).services.router.deleteTelegramIdentity(c.req.param("identityId") ?? ""))); + addCompatibilityRoute(app, "GET", `${basePath}/identities/slack`, (c) => c.json(getRequestContext(c).services.router.listSlackIdentities())); + addCompatibilityRoute(app, "POST", `${basePath}/identities/slack`, async (c) => c.json(await getRequestContext(c).services.router.upsertSlackIdentity(await parseJsonBody(routerSlackWriteSchema, c.req.raw)))); + addCompatibilityRoute(app, "DELETE", `${basePath}/identities/slack/:identityId`, async (c) => c.json(await getRequestContext(c).services.router.deleteSlackIdentity(c.req.param("identityId") ?? ""))); + addCompatibilityRoute(app, "POST", `${basePath}/slack-tokens`, async (c) => { + const body = await parseJsonBody(routerSlackWriteSchema, c.req.raw); + return c.json(await getRequestContext(c).services.router.setSlackTokens(body.botToken, body.appToken)); + }); + addCompatibilityRoute(app, "GET", `${basePath}/bindings`, (c) => { + const requestContext = getRequestContext(c); + const url = new URL(c.req.url); + return c.json(requestContext.services.router.listBindings({ channel: url.searchParams.get("channel") ?? undefined, identityId: url.searchParams.get("identityId") ?? undefined })); + }); + addCompatibilityRoute(app, "POST", `${basePath}/bindings`, async (c) => { + const requestContext = getRequestContext(c); + const body = await parseJsonBody(routerBindingWriteSchema, c.req.raw); + const workspaceId = c.req.param("workspaceId") ?? ""; + const workspace = requestContext.services.workspaceRegistry.getById(workspaceId, { includeHidden: true }); + const directory = body.directory?.trim() || workspace?.backend.local?.dataDir || ""; + return c.json(await requestContext.services.router.setBinding({ channel: body.channel, directory, identityId: body.identityId, peerId: body.peerId })); + }); + addCompatibilityRoute(app, "POST", `${basePath}/send`, async (c) => c.json(await getRequestContext(c).services.router.sendMessage(await parseJsonBody(routerSendWriteSchema, c.req.raw)))); + } + + addCompatibilityRoute(app, "GET", "/workspace/:workspaceId/export", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + const sensitiveMode = (new URL(c.req.url).searchParams.get("sensitive")?.trim() as "auto" | "exclude" | "include" | null) ?? "auto"; + const result = await requestContext.services.managed.exportWorkspace(workspaceId, { sensitiveMode: sensitiveMode === "exclude" || sensitiveMode === "include" || sensitiveMode === "auto" ? sensitiveMode : "auto" }); + if ("conflict" in result) { + return c.json({ code: "workspace_export_requires_decision", details: { warnings: result.warnings }, message: "This workspace includes sensitive config. Choose whether to exclude it or include it before exporting." }, 409); + } + return c.json(result); + }); + addCompatibilityRoute(app, "POST", "/workspace/:workspaceId/import", async (c) => { + const { requestContext, workspaceId } = requireWorkspace(c); + return c.json(await requestContext.services.managed.importWorkspace(workspaceId, await c.req.json())); + }); +} diff --git a/apps/server-v2/src/routes/route-paths.ts b/apps/server-v2/src/routes/route-paths.ts new file mode 100644 index 00000000..9ad267c4 --- /dev/null +++ b/apps/server-v2/src/routes/route-paths.ts @@ -0,0 +1,187 @@ +const WORKSPACE_ID_PARAMETER = ":workspaceId"; + +export const routeNamespaces = { + root: "/", + openapi: "/openapi.json", + system: "/system", + workspaces: "/workspaces", +} as const; + +export function workspaceRoutePath(workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${routeNamespaces.workspaces}/${workspaceId}`; +} + +function workspaceSessionsBasePath(workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${workspaceRoutePath(workspaceId)}/sessions`; +} + +function workspaceSessionPath(sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${workspaceSessionsBasePath(workspaceId)}/${sessionId}`; +} + +function workspaceSessionMessagesPath(sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${workspaceSessionPath(sessionId, workspaceId)}/messages`; +} + +function workspaceFileSessionsBasePath(workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${workspaceRoutePath(workspaceId)}/file-sessions`; +} + +function workspaceFileSessionPath(fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) { + return `${workspaceFileSessionsBasePath(workspaceId)}/${fileSessionId}`; +} + +function workspaceSessionMessagePath( + messageId: string = ":messageId", + sessionId: string = ":sessionId", + workspaceId: string = WORKSPACE_ID_PARAMETER, +) { + return `${workspaceSessionMessagesPath(sessionId, workspaceId)}/${messageId}`; +} + +export const workspaceResourcePattern = workspaceRoutePath(); + +export const routePaths = { + root: routeNamespaces.root, + openapiDocument: routeNamespaces.openapi, + system: { + base: routeNamespaces.system, + capabilities: `${routeNamespaces.system}/capabilities`, + cloudSignin: `${routeNamespaces.system}/cloud-signin`, + health: `${routeNamespaces.system}/health`, + managed: { + item: (kind: string, itemId: string = ":itemId") => `${routeNamespaces.system}/managed/${kind}/${itemId}`, + list: (kind: string) => `${routeNamespaces.system}/managed/${kind}`, + assignments: (kind: string, itemId: string = ":itemId") => `${routeNamespaces.system}/managed/${kind}/${itemId}/assignments`, + }, + meta: `${routeNamespaces.system}/meta`, + opencodeHealth: `${routeNamespaces.system}/opencode/health`, + router: { + apply: `${routeNamespaces.system}/router/apply`, + bindings: `${routeNamespaces.system}/router/bindings`, + health: `${routeNamespaces.system}/router/product-health`, + identities: (kind: string) => `${routeNamespaces.system}/router/identities/${kind}`, + telegram: `${routeNamespaces.system}/router/telegram`, + send: `${routeNamespaces.system}/router/send`, + }, + routerHealth: `${routeNamespaces.system}/router/health`, + servers: `${routeNamespaces.system}/servers`, + serverById: (serverId: string = ":serverId") => `${routeNamespaces.system}/servers/${serverId}`, + serverConnect: `${routeNamespaces.system}/servers/connect`, + serverSync: (serverId: string = ":serverId") => `${routeNamespaces.system}/servers/${serverId}/sync`, + status: `${routeNamespaces.system}/status`, + runtime: { + upgrade: `${routeNamespaces.system}/runtime/upgrade`, + summary: `${routeNamespaces.system}/runtime/summary`, + versions: `${routeNamespaces.system}/runtime/versions`, + }, + }, + workspaces: { + base: routeNamespaces.workspaces, + createLocal: `${routeNamespaces.workspaces}/local`, + byId: workspaceRoutePath, + dispose: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/dispose`, + events: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/events`, + activate: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/activate`, + displayName: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/display-name`, + artifacts: { + base: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/artifacts`, + byId: (artifactId: string = ":artifactId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceRoutePath(workspaceId)}/artifacts/${artifactId}`, + }, + config: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/config`, + engineReload: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/engine/reload`, + export: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/export`, + fileSessions: { + base: workspaceFileSessionsBasePath, + byId: workspaceFileSessionPath, + renew: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/renew`, + catalogSnapshot: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/catalog/snapshot`, + catalogEvents: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/catalog/events`, + readBatch: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/read-batch`, + writeBatch: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/write-batch`, + operations: (fileSessionId: string = ":fileSessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceFileSessionPath(fileSessionId, workspaceId)}/operations`, + }, + inbox: { + base: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/inbox`, + byId: (inboxId: string = ":inboxId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceRoutePath(workspaceId)}/inbox/${inboxId}`, + }, + import: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/import`, + mcp: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/mcp`, + plugins: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/plugins`, + rawOpencodeConfig: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/config/opencode-raw`, + reloadEvents: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/reload-events`, + router: { + base: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router`, + bindings: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/bindings`, + health: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/health`, + identities: { + slack: (workspaceId: string = WORKSPACE_ID_PARAMETER, identityId: string = ":identityId") => `${workspaceRoutePath(workspaceId)}/opencode-router/identities/slack/${identityId}`, + slackBase: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/identities/slack`, + telegram: (workspaceId: string = WORKSPACE_ID_PARAMETER, identityId: string = ":identityId") => `${workspaceRoutePath(workspaceId)}/opencode-router/identities/telegram/${identityId}`, + telegramBase: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/identities/telegram`, + }, + send: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/send`, + slackTokens: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/slack-tokens`, + telegram: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/telegram`, + telegramEnabled: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/telegram-enabled`, + telegramToken: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/opencode-router/telegram-token`, + }, + share: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/share`, + sessions: { + base: workspaceSessionsBasePath, + byId: workspaceSessionPath, + statuses: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceSessionsBasePath(workspaceId)}/status`, + messages: { + base: workspaceSessionMessagesPath, + byId: workspaceSessionMessagePath, + partById: ( + partId: string = ":partId", + messageId: string = ":messageId", + sessionId: string = ":sessionId", + workspaceId: string = WORKSPACE_ID_PARAMETER, + ) => `${workspaceSessionMessagePath(messageId, sessionId, workspaceId)}/parts/${partId}`, + }, + promptAsync: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/prompt_async`, + command: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/command`, + shell: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/shell`, + todo: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/todo`, + status: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/status`, + snapshot: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/snapshot`, + init: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/init`, + fork: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/fork`, + abort: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/abort`, + share: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/share`, + summarize: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/summarize`, + revert: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/revert`, + unrevert: (sessionId: string = ":sessionId", workspaceId: string = WORKSPACE_ID_PARAMETER) => + `${workspaceSessionPath(sessionId, workspaceId)}/unrevert`, + }, + scheduler: { + base: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/scheduler/jobs`, + byName: (name: string = ":name", workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/scheduler/jobs/${name}`, + }, + skills: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/skills`, + hubSkills: "/hub/skills", + simpleContent: (workspaceId: string = WORKSPACE_ID_PARAMETER) => `${workspaceRoutePath(workspaceId)}/files/content`, + }, +} as const; diff --git a/apps/server-v2/src/routes/runtime.ts b/apps/server-v2/src/routes/runtime.ts new file mode 100644 index 00000000..ddaa55d0 --- /dev/null +++ b/apps/server-v2/src/routes/runtime.ts @@ -0,0 +1,101 @@ +import type { Hono } from "hono"; +import { describeRoute } from "hono-openapi"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildSuccessResponse } from "../http.js"; +import { jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { + opencodeHealthResponseSchema, + routerHealthResponseSchema, + runtimeSummaryResponseSchema, + runtimeUpgradeResponseSchema, + runtimeVersionsResponseSchema, +} from "../schemas/runtime.js"; +import { routePaths } from "./route-paths.js"; + +export function registerRuntimeRoutes(app: Hono) { + app.get( + routePaths.system.opencodeHealth, + describeRoute({ + tags: ["Runtime"], + summary: "Get OpenCode health", + description: "Returns the server-owned OpenCode runtime health, version, URL, and recent diagnostics.", + responses: withCommonErrorResponses({ + 200: jsonResponse("OpenCode runtime health returned successfully.", opencodeHealthResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.runtime.getOpencodeHealth())); + }, + ); + + app.get( + routePaths.system.routerHealth, + describeRoute({ + tags: ["Runtime"], + summary: "Get router health", + description: "Returns the server-owned opencode-router health, enablement decision, and recent diagnostics.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Router runtime health returned successfully.", routerHealthResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.runtime.getRouterHealth())); + }, + ); + + app.get( + routePaths.system.runtime.summary, + describeRoute({ + tags: ["Runtime"], + summary: "Get runtime summary", + description: "Returns the current runtime supervision summary, manifest, restart policy, and child process state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Runtime summary returned successfully.", runtimeSummaryResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.runtime.getRuntimeSummary())); + }, + ); + + app.get( + routePaths.system.runtime.versions, + describeRoute({ + tags: ["Runtime"], + summary: "Get runtime versions", + description: "Returns the active and pinned runtime versions that Server V2 resolved for OpenCode and opencode-router.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Runtime versions returned successfully.", runtimeVersionsResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.runtime.getRuntimeVersions())); + }, + ); + + app.post( + routePaths.system.runtime.upgrade, + describeRoute({ + tags: ["Runtime"], + summary: "Upgrade runtime assets", + description: "Re-resolves the pinned runtime bundle through Server V2, restarts managed children, and returns the resulting runtime summary plus upgrade state.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Runtime upgraded successfully.", runtimeUpgradeResponseSchema), + }, { includeForbidden: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireHost(requestContext.actor); + const result = await requestContext.services.runtime.upgradeRuntime(); + return c.json(buildSuccessResponse(requestContext.requestId, result)); + }, + ); +} diff --git a/apps/server-v2/src/routes/sessions.ts b/apps/server-v2/src/routes/sessions.ts new file mode 100644 index 00000000..688d7313 --- /dev/null +++ b/apps/server-v2/src/routes/sessions.ts @@ -0,0 +1,531 @@ +import type { Context, Hono } from "hono"; +import { describeRoute, resolver } from "hono-openapi"; +import { HTTPException } from "hono/http-exception"; +import { TextEncoder } from "node:util"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildSuccessResponse } from "../http.js"; +import { jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { + acceptedActionResponseSchema, + commandRequestSchema, + deletedActionResponseSchema, + messageIdParamsSchema, + messageListResponseSchema, + messagePartParamsSchema, + messagePartUpdateRequestSchema, + messageResponseSchema, + messageSendRequestSchema, + promptAsyncRequestSchema, + revertRequestSchema, + sessionCreateRequestSchema, + sessionForkRequestSchema, + sessionIdParamsSchema, + sessionListQuerySchema, + sessionListResponseSchema, + sessionMessagesQuerySchema, + sessionResponseSchema, + sessionSnapshotResponseSchema, + sessionStatusResponseSchema, + sessionStatusesResponseSchema, + sessionSummarizeRequestSchema, + sessionTodoListResponseSchema, + sessionUpdateRequestSchema, + shellRequestSchema, + workspaceEventSchema, +} from "../schemas/sessions.js"; +import { routePaths } from "./route-paths.js"; + +function parseQuery(schema: { parse(input: unknown): T }, url: string) { + const searchParams = new URL(url).searchParams; + const query: Record = {}; + for (const [key, value] of searchParams.entries()) { + query[key] = value; + } + return schema.parse(query); +} + +async function parseBody(schema: { parse(input: unknown): T }, request: Request) { + const contentType = request.headers.get("content-type")?.toLowerCase() ?? ""; + if (!contentType.includes("application/json")) { + return schema.parse({}); + } + return schema.parse(await request.json()); +} + +function requireReadableWorkspace(c: Context) { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const workspaceId = c.req.param("workspaceId") ?? ""; + const workspace = requestContext.services.workspaceRegistry.getById( + workspaceId, + { includeHidden: requestContext.actor.kind === "host" }, + ); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return { requestContext, workspaceId }; +} + +function createSseResponse(stream: AsyncIterable, signal?: AbortSignal) { + const encoder = new TextEncoder(); + let eventId = 0; + const iterator = stream[Symbol.asyncIterator](); + return new Response(new ReadableStream({ + async start(controller) { + try { + while (true) { + const next = await iterator.next(); + if (next.done) { + controller.close(); + return; + } + eventId += 1; + controller.enqueue(encoder.encode(`id: ${eventId}\ndata: ${JSON.stringify(next.value)}\n\n`)); + } + } catch (error) { + controller.error(error); + } + }, + async cancel() { + if (typeof iterator.return === "function") { + await iterator.return(); + } + }, + }), { + headers: { + "Cache-Control": "no-cache, no-transform", + Connection: "keep-alive", + "Content-Type": "text/event-stream", + }, + }); +} + +export function registerSessionRoutes(app: Hono) { + app.get( + routePaths.workspaces.sessions.base(), + describeRoute({ + tags: ["Sessions"], + summary: "List workspace sessions", + description: "Returns the normalized session inventory for the resolved local OpenCode or remote OpenWork workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace sessions returned successfully.", sessionListResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const query = parseQuery(sessionListQuerySchema, c.req.url); + const items = await requestContext.services.sessions.listSessions(workspaceId, query); + return c.json(buildSuccessResponse(requestContext.requestId, { items })); + }, + ); + + app.get( + routePaths.workspaces.sessions.statuses(), + describeRoute({ + tags: ["Sessions"], + summary: "List workspace session statuses", + description: "Returns the latest normalized session status map for the resolved workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session statuses returned successfully.", sessionStatusesResponseSchema), + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const items = await requestContext.services.sessions.listSessionStatuses(workspaceId); + return c.json(buildSuccessResponse(requestContext.requestId, { items })); + }, + ); + + app.post( + routePaths.workspaces.sessions.base(), + describeRoute({ + tags: ["Sessions"], + summary: "Create a workspace session", + description: "Creates a new session inside the resolved workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session created successfully.", sessionResponseSchema), + }, { includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const body = await parseBody(sessionCreateRequestSchema, c.req.raw); + const session = await requestContext.services.sessions.createSession(workspaceId, body as Record); + return c.json(buildSuccessResponse(requestContext.requestId, session)); + }, + ); + + app.get( + routePaths.workspaces.sessions.byId(), + describeRoute({ + tags: ["Sessions"], + summary: "Get workspace session detail", + description: "Returns one normalized session by workspace and session identifier.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session returned successfully.", sessionResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const session = await requestContext.services.sessions.getSession(workspaceId, params.sessionId); + return c.json(buildSuccessResponse(requestContext.requestId, session)); + }, + ); + + app.patch( + routePaths.workspaces.sessions.byId(), + describeRoute({ + tags: ["Sessions"], + summary: "Update a workspace session", + description: "Updates a normalized session inside the resolved workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session updated successfully.", sessionResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const body = await parseBody(sessionUpdateRequestSchema, c.req.raw); + const session = await requestContext.services.sessions.updateSession(workspaceId, params.sessionId, body as Record); + return c.json(buildSuccessResponse(requestContext.requestId, session)); + }, + ); + + app.delete( + routePaths.workspaces.sessions.byId(), + describeRoute({ + tags: ["Sessions"], + summary: "Delete a workspace session", + description: "Deletes a session inside the resolved workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session deleted successfully.", deletedActionResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + await requestContext.services.sessions.deleteSession(workspaceId, params.sessionId); + return c.json(buildSuccessResponse(requestContext.requestId, { deleted: true })); + }, + ); + + app.get( + routePaths.workspaces.sessions.status(), + describeRoute({ + tags: ["Sessions"], + summary: "Get one session status", + description: "Returns the normalized status for a single session.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session status returned successfully.", sessionStatusResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const status = await requestContext.services.sessions.getSessionStatus(workspaceId, params.sessionId); + return c.json(buildSuccessResponse(requestContext.requestId, status)); + }, + ); + + app.get( + routePaths.workspaces.sessions.todo(), + describeRoute({ + tags: ["Sessions"], + summary: "List one session todos", + description: "Returns the normalized todo list for a single session.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session todos returned successfully.", sessionTodoListResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const items = await requestContext.services.sessions.listTodos(workspaceId, params.sessionId); + return c.json(buildSuccessResponse(requestContext.requestId, { items })); + }, + ); + + app.get( + routePaths.workspaces.sessions.snapshot(), + describeRoute({ + tags: ["Sessions"], + summary: "Get one session snapshot", + description: "Returns session detail, messages, todos, and status in one normalized payload for detail surfaces.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session snapshot returned successfully.", sessionSnapshotResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const query = parseQuery(sessionMessagesQuerySchema, c.req.url); + const snapshot = await requestContext.services.sessions.getSessionSnapshot(workspaceId, params.sessionId, query); + return c.json(buildSuccessResponse(requestContext.requestId, snapshot)); + }, + ); + + app.get( + routePaths.workspaces.sessions.messages.base(), + describeRoute({ + tags: ["Messages"], + summary: "List session messages", + description: "Returns the normalized message list for a single session.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session messages returned successfully.", messageListResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const query = parseQuery(sessionMessagesQuerySchema, c.req.url); + const items = await requestContext.services.sessions.listMessages(workspaceId, params.sessionId, query); + return c.json(buildSuccessResponse(requestContext.requestId, { items })); + }, + ); + + app.get( + routePaths.workspaces.sessions.messages.byId(), + describeRoute({ + tags: ["Messages"], + summary: "Get one session message", + description: "Returns one normalized message by workspace, session, and message identifier.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session message returned successfully.", messageResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = messageIdParamsSchema.parse(c.req.param()); + const message = await requestContext.services.sessions.getMessage(workspaceId, params.sessionId, params.messageId); + return c.json(buildSuccessResponse(requestContext.requestId, message)); + }, + ); + + app.post( + routePaths.workspaces.sessions.messages.base(), + describeRoute({ + tags: ["Messages"], + summary: "Send a session message", + description: "Sends a normalized message payload to the resolved workspace backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session message accepted successfully.", acceptedActionResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const body = await parseBody(messageSendRequestSchema, c.req.raw); + await requestContext.services.sessions.sendMessage(workspaceId, params.sessionId, body as Record); + return c.json(buildSuccessResponse(requestContext.requestId, { accepted: true })); + }, + ); + + app.delete( + routePaths.workspaces.sessions.messages.byId(), + describeRoute({ + tags: ["Messages"], + summary: "Delete a session message", + description: "Deletes one message inside the resolved session backend.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session message deleted successfully.", deletedActionResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = messageIdParamsSchema.parse(c.req.param()); + await requestContext.services.sessions.deleteMessage(workspaceId, params.sessionId, params.messageId); + return c.json(buildSuccessResponse(requestContext.requestId, { deleted: true })); + }, + ); + + app.patch( + routePaths.workspaces.sessions.messages.partById(), + describeRoute({ + tags: ["Messages"], + summary: "Update a session message part", + description: "Updates one message part inside the resolved session backend where the upstream backend supports it.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session message part updated successfully.", acceptedActionResponseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c as never); + const params = messagePartParamsSchema.parse(c.req.param()); + const body = await parseBody(messagePartUpdateRequestSchema, c.req.raw); + await requestContext.services.sessions.updateMessagePart( + workspaceId, + params.sessionId, + params.messageId, + params.partId, + body as Record, + ); + return c.json(buildSuccessResponse(requestContext.requestId, { accepted: true })); + }, + ); + + app.delete( + routePaths.workspaces.sessions.messages.partById(), + describeRoute({ + tags: ["Messages"], + summary: "Delete a session message part", + description: "Deletes one message part inside the resolved session backend where the upstream backend supports it.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace session message part deleted successfully.", deletedActionResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c as never); + const params = messagePartParamsSchema.parse(c.req.param()); + await requestContext.services.sessions.deleteMessagePart(workspaceId, params.sessionId, params.messageId, params.partId); + return c.json(buildSuccessResponse(requestContext.requestId, { deleted: true })); + }, + ); + + const actionRoute = ( + path: string, + summary: string, + description: string, + handler: (input: { + body: Record; + requestContext: ReturnType; + sessionId: string; + workspaceId: string; + }) => Promise, + bodySchema?: { parse(input: unknown): Record }, + responseSchema: any = acceptedActionResponseSchema, + ) => { + app.post( + path, + describeRoute({ + tags: ["Sessions"], + summary, + description, + responses: withCommonErrorResponses({ + 200: jsonResponse(`${summary} completed successfully.`, responseSchema), + }, { includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + const body = bodySchema ? await parseBody(bodySchema, c.req.raw) : {}; + const result = await handler({ body, requestContext, sessionId: params.sessionId, workspaceId }); + return c.json(buildSuccessResponse(requestContext.requestId, result ?? { accepted: true })); + }, + ); + }; + + actionRoute( + routePaths.workspaces.sessions.init(), + "Initialize a session", + "Runs the upstream session init primitive through the workspace-first API.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.initSession(workspaceId, sessionId, body), + ); + actionRoute( + routePaths.workspaces.sessions.fork(), + "Fork a session", + "Forks a session inside the resolved workspace backend.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.forkSession(workspaceId, sessionId, body), + sessionForkRequestSchema as never, + sessionResponseSchema, + ); + actionRoute( + routePaths.workspaces.sessions.abort(), + "Abort a session", + "Aborts an in-flight session run through the workspace-first API.", + ({ requestContext, sessionId, workspaceId }) => requestContext.services.sessions.abortSession(workspaceId, sessionId), + ); + actionRoute( + routePaths.workspaces.sessions.share(), + "Share a session", + "Calls the upstream share primitive when the resolved backend supports it.", + ({ requestContext, sessionId, workspaceId }) => requestContext.services.sessions.shareSession(workspaceId, sessionId), + ); + app.delete( + routePaths.workspaces.sessions.share(), + describeRoute({ + tags: ["Sessions"], + summary: "Unshare a session", + description: "Calls the upstream unshare primitive when the resolved backend supports it.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Session unshared successfully.", acceptedActionResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const params = sessionIdParamsSchema.parse(c.req.param()); + await requestContext.services.sessions.unshareSession(workspaceId, params.sessionId); + return c.json(buildSuccessResponse(requestContext.requestId, { accepted: true })); + }, + ); + actionRoute( + routePaths.workspaces.sessions.summarize(), + "Summarize a session", + "Runs the upstream summarize or compact primitive for the selected session.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.summarizeSession(workspaceId, sessionId, body), + sessionSummarizeRequestSchema as never, + ); + actionRoute( + routePaths.workspaces.sessions.promptAsync(), + "Send an async prompt", + "Sends a prompt_async request to the resolved session backend for composer flows.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.promptAsync(workspaceId, sessionId, body), + promptAsyncRequestSchema as never, + ); + actionRoute( + routePaths.workspaces.sessions.command(), + "Run a session command", + "Runs a slash-command style session command through the workspace-first API.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.command(workspaceId, sessionId, body), + commandRequestSchema as never, + ); + actionRoute( + routePaths.workspaces.sessions.shell(), + "Run a session shell command", + "Runs a shell command inside the resolved session backend.", + ({ body, requestContext, sessionId, workspaceId }) => requestContext.services.sessions.shell(workspaceId, sessionId, body), + shellRequestSchema as never, + ); + actionRoute( + routePaths.workspaces.sessions.revert(), + "Revert session history", + "Reverts a session to the requested message boundary.", + ({ body, requestContext, sessionId, workspaceId }) => + requestContext.services.sessions.revert(workspaceId, sessionId, body as { messageID: string }), + revertRequestSchema as never, + sessionResponseSchema, + ); + actionRoute( + routePaths.workspaces.sessions.unrevert(), + "Restore reverted session history", + "Restores previously reverted session history.", + ({ requestContext, sessionId, workspaceId }) => requestContext.services.sessions.unrevert(workspaceId, sessionId), + undefined, + sessionResponseSchema, + ); + + app.get( + routePaths.workspaces.events(), + describeRoute({ + tags: ["Sessions"], + summary: "Stream workspace events", + description: "Streams normalized session and message events for one workspace over Server-Sent Events.", + responses: withCommonErrorResponses({ + 200: { + description: "Workspace events streamed successfully.", + content: { + "text/event-stream": { + schema: resolver(workspaceEventSchema), + }, + }, + }, + }, { includeUnauthorized: true }), + }), + async (c) => { + const { requestContext, workspaceId } = requireReadableWorkspace(c); + const abort = new AbortController(); + c.req.raw.signal.addEventListener("abort", () => abort.abort(), { once: true }); + const stream = await requestContext.services.sessions.streamWorkspaceEvents(workspaceId, abort.signal); + return createSseResponse(stream, abort.signal); + }, + ); +} diff --git a/apps/server-v2/src/routes/system.ts b/apps/server-v2/src/routes/system.ts new file mode 100644 index 00000000..d70a8ab4 --- /dev/null +++ b/apps/server-v2/src/routes/system.ts @@ -0,0 +1,319 @@ +import type { Hono } from "hono"; +import { describeRoute, openAPIRouteHandler } from "hono-openapi"; +import { HTTPException } from "hono/http-exception"; +import type { AppDependencies } from "../context/app-dependencies.js"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildErrorResponse, buildSuccessResponse, RouteError } from "../http.js"; +import { buildOperationId, jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { + capabilitiesResponseSchema, + remoteServerConnectRequestSchema, + remoteServerConnectResponseSchema, + remoteServerSyncRequestSchema, + serverInventoryListResponseSchema, + systemStatusResponseSchema, +} from "../schemas/registry.js"; +import { healthResponseSchema, metadataResponseSchema, openApiDocumentSchema, rootInfoResponseSchema } from "../schemas/system.js"; +import { routePaths } from "./route-paths.js"; + +type ServerV2App = Hono; + +function toWorkspaceSummary(workspace: ReturnType) { + const { notes: _notes, ...summary } = workspace; + return summary; +} + +async function parseJsonBody(schema: { parse(input: unknown): T }, request: Request) { + const contentType = request.headers.get("content-type")?.toLowerCase() ?? ""; + if (!contentType.includes("application/json")) { + return schema.parse({}); + } + return schema.parse(await request.json()); +} + +function buildRouteErrorJson(requestId: string, error: unknown) { + if (error instanceof HTTPException) { + const status = error.status; + const code = status === 401 + ? "unauthorized" + : status === 403 + ? "forbidden" + : status === 404 + ? "not_found" + : "invalid_request"; + return { + body: buildErrorResponse({ + code, + message: error.message || (code === "not_found" ? "Route not found." : "Request failed."), + requestId, + }), + status, + }; + } + if (error instanceof RouteError) { + return { + body: buildErrorResponse({ + code: error.code, + details: error.details, + message: error.message, + requestId, + }), + status: error.status, + }; + } + const routeLike = error && typeof error === "object" + ? error as { code?: unknown; details?: unknown; message?: unknown; status?: unknown } + : null; + if (routeLike && typeof routeLike.status === "number" && typeof routeLike.code === "string" && typeof routeLike.message === "string") { + return { + body: buildErrorResponse({ + code: routeLike.code as any, + details: Array.isArray(routeLike.details) ? routeLike.details as any : undefined, + message: routeLike.message, + requestId, + }), + status: routeLike.status, + }; + } + return null; +} + +function createOpenApiDocumentation(version: string) { + return { + openapi: "3.1.0", + info: { + title: "OpenWork Server V2", + version, + description: [ + "OpenAPI contract for the standalone OpenWork Server V2 runtime and durable registry state.", + "", + "Phase 10 makes Server V2 the default runtime, keeps release/runtime assets in a managed extracted directory, and closes the remaining cutover tooling around the standalone contract.", + ].join("\n"), + }, + servers: [{ url: "/" }], + tags: [ + { + name: "System", + description: "Server-level operational routes and contract metadata.", + }, + { + name: "Workspaces", + description: "Workspace-first resources will live under /workspaces/:workspaceId.", + }, + { + name: "Runtime", + description: "Server-owned runtime supervision, versions, and child process health.", + }, + { + name: "Sessions", + description: "Workspace-first session and streaming primitives backed by OpenCode or remote OpenWork servers.", + }, + { + name: "Messages", + description: "Workspace-first message history and mutation primitives nested under sessions.", + }, + { + name: "Config", + description: "Workspace-scoped config projection, raw config editing, and materialization owned by Server V2.", + }, + { + name: "Files", + description: "Workspace-scoped file sessions, simple content routes, inbox, and artifact surfaces owned by Server V2.", + }, + { + name: "Reload", + description: "Workspace-scoped reload events, reconciliation, and explicit runtime reload controls.", + }, + ], + }; +} + +export function registerSystemRoutes(app: ServerV2App, dependencies: AppDependencies) { + app.get( + routePaths.root, + describeRoute({ + tags: ["System"], + summary: "Get server root information", + description: "Returns the root metadata for the standalone Server V2 process and its route conventions.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Server root information returned successfully.", rootInfoResponseSchema), + }), + }), + (c) => { + const requestContext = getRequestContext(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.getRootInfo())); + }, + ); + + app.get( + routePaths.system.health, + describeRoute({ + tags: ["System"], + summary: "Check Server V2 health", + description: "Returns a lightweight health response for the standalone Server V2 process.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Server health returned successfully.", healthResponseSchema), + }), + }), + (c) => { + const requestContext = getRequestContext(c); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.getHealth())); + }, + ); + + app.get( + routePaths.system.meta, + describeRoute({ + tags: ["System"], + summary: "Get foundation metadata", + description: "Returns middleware ordering, route namespace conventions, sqlite bootstrap status, and startup import diagnostics.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Server metadata returned successfully.", metadataResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.getMetadata(requestContext.actor))); + }, + ); + + app.get( + routePaths.system.capabilities, + describeRoute({ + tags: ["System"], + summary: "Get server capabilities", + description: "Returns the typed Server V2 capability model, including auth requirements and migrated registry/runtime read slices.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Server capabilities returned successfully.", capabilitiesResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.getCapabilities(requestContext.actor))); + }, + ); + + app.get( + routePaths.system.status, + describeRoute({ + tags: ["System"], + summary: "Get normalized system status", + description: "Returns normalized status, registry summary, auth requirements, runtime summary, and capabilities for app startup and settings surfaces.", + responses: withCommonErrorResponses({ + 200: jsonResponse("System status returned successfully.", systemStatusResponseSchema), + }, { includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.getStatus(requestContext.actor))); + }, + ); + + app.get( + routePaths.system.servers, + describeRoute({ + tags: ["System"], + summary: "List known server targets", + description: "Returns the local server registry inventory. This is host-scoped because it can reveal internal server connection metadata.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Server inventory returned successfully.", serverInventoryListResponseSchema), + }, { includeForbidden: true, includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireHost(requestContext.actor); + return c.json(buildSuccessResponse(requestContext.requestId, requestContext.services.system.listServers())); + }, + ); + + app.post( + routePaths.system.serverConnect, + describeRoute({ + tags: ["System"], + summary: "Connect a remote OpenWork server", + description: "Validates a remote OpenWork server through the local Server V2 process, stores the remote connection metadata, and syncs the discovered remote workspaces into the local canonical registry.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Remote OpenWork server connected successfully.", remoteServerConnectResponseSchema), + }, { includeForbidden: true, includeInvalidRequest: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireHost(requestContext.actor); + const body = await parseJsonBody(remoteServerConnectRequestSchema, c.req.raw); + let result; + try { + result = await requestContext.services.remoteServers.connect(body); + } catch (error) { + const resolved = buildRouteErrorJson(requestContext.requestId, error); + if (resolved) { + return c.json(resolved.body, resolved.status as any); + } + throw error; + } + return c.json(buildSuccessResponse(requestContext.requestId, { + selectedWorkspaceId: result.selectedWorkspaceId, + server: requestContext.services.serverRegistry.serialize(result.server, { includeBaseUrl: true }), + workspaces: result.workspaces.map((workspace) => toWorkspaceSummary(requestContext.services.workspaceRegistry.serializeWorkspace(workspace))), + })); + }, + ); + + app.post( + routePaths.system.serverSync(), + describeRoute({ + tags: ["System"], + summary: "Sync a remote OpenWork server", + description: "Refreshes the remote workspace inventory for a stored remote OpenWork server and updates the local canonical registry mapping.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Remote OpenWork server synced successfully.", remoteServerConnectResponseSchema), + }, { includeForbidden: true, includeInvalidRequest: true, includeNotFound: true, includeUnauthorized: true }), + }), + async (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireHost(requestContext.actor); + const body = await parseJsonBody(remoteServerSyncRequestSchema, c.req.raw); + const serverId = c.req.param("serverId") ?? ""; + let result; + try { + result = await requestContext.services.remoteServers.sync(serverId, body); + } catch (error) { + const resolved = buildRouteErrorJson(requestContext.requestId, error); + if (resolved) { + return c.json(resolved.body, resolved.status as any); + } + throw error; + } + return c.json(buildSuccessResponse(requestContext.requestId, { + selectedWorkspaceId: result.selectedWorkspaceId, + server: requestContext.services.serverRegistry.serialize(result.server, { includeBaseUrl: true }), + workspaces: result.workspaces.map((workspace) => toWorkspaceSummary(requestContext.services.workspaceRegistry.serializeWorkspace(workspace))), + })); + }, + ); + + app.get( + routePaths.openapiDocument, + describeRoute({ + tags: ["System"], + summary: "Get the OpenAPI document", + description: "Returns the machine-readable OpenAPI 3.1 document generated from the Hono route definitions.", + responses: withCommonErrorResponses({ + 200: jsonResponse("OpenAPI document returned successfully.", openApiDocumentSchema), + }), + }), + openAPIRouteHandler(app, { + documentation: createOpenApiDocumentation(dependencies.version), + includeEmptyPaths: true, + exclude: [routePaths.openapiDocument], + excludeMethods: ["OPTIONS"], + defaultOptions: { + ALL: { + operationId: (route) => buildOperationId(route.method, route.path), + }, + }, + }), + ); +} diff --git a/apps/server-v2/src/routes/workspaces.ts b/apps/server-v2/src/routes/workspaces.ts new file mode 100644 index 00000000..bb77b5e2 --- /dev/null +++ b/apps/server-v2/src/routes/workspaces.ts @@ -0,0 +1,73 @@ +import type { Hono } from "hono"; +import { describeRoute } from "hono-openapi"; +import { HTTPException } from "hono/http-exception"; +import { getRequestContext, type AppBindings } from "../context/request-context.js"; +import { buildSuccessResponse } from "../http.js"; +import { jsonResponse, withCommonErrorResponses } from "../openapi.js"; +import { workspaceDetailResponseSchema, workspaceListResponseSchema } from "../schemas/registry.js"; +import { routePaths } from "./route-paths.js"; + +function readIncludeHidden(url: string) { + const value = new URL(url).searchParams.get("includeHidden")?.trim().toLowerCase(); + return value === "1" || value === "true" || value === "yes"; +} + +export function registerWorkspaceRoutes(app: Hono) { + app.get( + routePaths.workspaces.base, + describeRoute({ + tags: ["Workspaces"], + summary: "List workspaces", + description: "Returns the canonical workspace inventory from the server-owned registry. Hidden control/help workspaces are excluded unless the caller asks for them with host scope.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace inventory returned successfully.", workspaceListResponseSchema), + }, { includeForbidden: true, includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + + const includeHidden = readIncludeHidden(c.req.url); + if (includeHidden) { + requestContext.services.auth.requireHost(requestContext.actor); + } + + return c.json( + buildSuccessResponse( + requestContext.requestId, + requestContext.services.system.listWorkspaces({ includeHidden }), + ), + ); + }, + ); + + app.get( + routePaths.workspaces.byId(), + describeRoute({ + tags: ["Workspaces"], + summary: "Get workspace detail", + description: "Returns the canonical workspace detail shape for a single workspace, including backend resolution and runtime summary fields.", + responses: withCommonErrorResponses({ + 200: jsonResponse("Workspace detail returned successfully.", workspaceDetailResponseSchema), + }, { includeNotFound: true, includeUnauthorized: true }), + }), + (c) => { + const requestContext = getRequestContext(c); + requestContext.services.auth.requireVisibleRead(requestContext.actor); + const workspaceId = c.req.param("workspaceId") ?? ""; + + const workspace = requestContext.services.system.getWorkspace( + workspaceId, + { includeHidden: requestContext.actor.kind === "host" }, + ); + + if (!workspace) { + throw new HTTPException(404, { + message: `Workspace not found: ${workspaceId}`, + }); + } + + return c.json(buildSuccessResponse(requestContext.requestId, workspace)); + }, + ); +} diff --git a/apps/server-v2/src/runtime/assets.test.ts b/apps/server-v2/src/runtime/assets.test.ts new file mode 100644 index 00000000..c1045c0e --- /dev/null +++ b/apps/server-v2/src/runtime/assets.test.ts @@ -0,0 +1,275 @@ +import { afterEach, expect, test } from "bun:test"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { createHash } from "node:crypto"; +import { createRuntimeAssetService } from "./assets.js"; +import { registerEmbeddedRuntimeBundle } from "./embedded.js"; +import { resolveRuntimeTarget, type RuntimeManifest } from "./manifest.js"; + +const cleanupPaths: string[] = []; +const ENV_KEYS = [ + "OPENWORK_SERVER_V2_RUNTIME_BUNDLE_DIR", + "OPENWORK_SERVER_V2_RUNTIME_SOURCE", + "OPENWORK_SERVER_V2_RUNTIME_RELEASE_DIR", + "OPENWORK_SERVER_V2_RUNTIME_MANIFEST_PATH", +]; +const originalEnv = new Map(ENV_KEYS.map((key) => [key, process.env[key]])); + +afterEach(() => { + for (const [key, value] of originalEnv.entries()) { + if (typeof value === "string") { + process.env[key] = value; + } else { + delete process.env[key]; + } + } + while (cleanupPaths.length > 0) { + const target = cleanupPaths.pop(); + if (target) { + fs.rmSync(target, { force: true, recursive: true }); + } + } + registerEmbeddedRuntimeBundle(undefined); +}); + +function makeTempDir(name: string) { + const directory = fs.mkdtempSync(path.join(os.tmpdir(), `${name}-`)); + cleanupPaths.push(directory); + return directory; +} + +async function sha256(filePath: string) { + const contents = await Bun.file(filePath).arrayBuffer(); + return createHash("sha256").update(Buffer.from(contents)).digest("hex"); +} + +function writeVersionedBinary(filePath: string, version: string) { + const script = [ + "#!/bin/sh", + 'if [ "$1" = "--version" ]; then', + ` echo ${JSON.stringify(version)}`, + " exit 0", + "fi", + "exit 0", + "", + ].join("\n"); + fs.writeFileSync(filePath, script, "utf8"); + fs.chmodSync(filePath, 0o755); +} + +test("release runtime assets use manifest versions without reading repo metadata", async () => { + const target = resolveRuntimeTarget(); + if (!target) { + throw new Error("Unsupported test target."); + } + + const releaseRoot = makeTempDir("openwork-server-v2-release-assets"); + const opencodePath = path.join(releaseRoot, process.platform === "win32" ? "opencode.exe" : "opencode"); + const routerPath = path.join(releaseRoot, process.platform === "win32" ? "opencode-router.exe" : "opencode-router"); + writeVersionedBinary(opencodePath, "1.2.27"); + writeVersionedBinary(routerPath, "0.11.206"); + + const manifest: RuntimeManifest = { + files: { + opencode: { + path: path.basename(opencodePath), + sha256: await sha256(opencodePath), + size: fs.statSync(opencodePath).size, + }, + "opencode-router": { + path: path.basename(routerPath), + sha256: await sha256(routerPath), + size: fs.statSync(routerPath).size, + }, + }, + generatedAt: new Date().toISOString(), + manifestVersion: 1, + opencodeVersion: "1.2.27", + rootDir: releaseRoot, + routerVersion: "0.11.206", + serverVersion: "0.0.0-test", + source: "release", + target, + }; + const manifestPath = path.join(releaseRoot, "manifest.json"); + fs.writeFileSync(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); + + process.env.OPENWORK_SERVER_V2_RUNTIME_SOURCE = "release"; + process.env.OPENWORK_SERVER_V2_RUNTIME_RELEASE_DIR = releaseRoot; + process.env.OPENWORK_SERVER_V2_RUNTIME_MANIFEST_PATH = manifestPath; + + const service = createRuntimeAssetService({ + environment: "test", + serverVersion: "0.0.0-test", + workingDirectory: { + databaseDir: releaseRoot, + databasePath: path.join(releaseRoot, "db.sqlite"), + importsDir: path.join(releaseRoot, "imports"), + managedDir: path.join(releaseRoot, "managed"), + managedMcpDir: path.join(releaseRoot, "managed", "mcps"), + managedPluginDir: path.join(releaseRoot, "managed", "plugins"), + managedProviderDir: path.join(releaseRoot, "managed", "providers"), + managedSkillDir: path.join(releaseRoot, "managed", "skills"), + rootDir: releaseRoot, + runtimeDir: releaseRoot, + workspacesDir: path.join(releaseRoot, "workspaces"), + }, + }); + + const bundle = await service.resolveRuntimeBundle(); + expect(bundle.opencode.version).toBe("1.2.27"); + expect(bundle.router.version).toBe("0.11.206"); + expect(bundle.manifest.source).toBe("release"); +}); + +test("release runtime assets extract into the managed runtime directory and survive source bundle removal", async () => { + const target = resolveRuntimeTarget(); + if (!target) { + throw new Error("Unsupported test target."); + } + + const bundleRoot = makeTempDir("openwork-server-v2-release-bundle"); + const runtimeRoot = makeTempDir("openwork-server-v2-runtime-root"); + const runtimeDir = path.join(runtimeRoot, "runtime"); + fs.mkdirSync(runtimeDir, { recursive: true }); + + const opencodePath = path.join(bundleRoot, process.platform === "win32" ? "opencode.exe" : "opencode"); + const routerPath = path.join(bundleRoot, process.platform === "win32" ? "opencode-router.exe" : "opencode-router"); + writeVersionedBinary(opencodePath, "1.2.27"); + writeVersionedBinary(routerPath, "0.11.206"); + + const manifest: RuntimeManifest = { + files: { + opencode: { + path: path.basename(opencodePath), + sha256: await sha256(opencodePath), + size: fs.statSync(opencodePath).size, + }, + "opencode-router": { + path: path.basename(routerPath), + sha256: await sha256(routerPath), + size: fs.statSync(routerPath).size, + }, + }, + generatedAt: new Date().toISOString(), + manifestVersion: 1, + opencodeVersion: "1.2.27", + rootDir: bundleRoot, + routerVersion: "0.11.206", + serverVersion: "0.0.0-test", + source: "release", + target, + }; + fs.writeFileSync(path.join(bundleRoot, "manifest.json"), `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); + + process.env.OPENWORK_SERVER_V2_RUNTIME_SOURCE = "release"; + process.env.OPENWORK_SERVER_V2_RUNTIME_BUNDLE_DIR = bundleRoot; + + const workingDirectory = { + databaseDir: runtimeRoot, + databasePath: path.join(runtimeRoot, "db.sqlite"), + importsDir: path.join(runtimeRoot, "imports"), + managedDir: path.join(runtimeRoot, "managed"), + managedMcpDir: path.join(runtimeRoot, "managed", "mcps"), + managedPluginDir: path.join(runtimeRoot, "managed", "plugins"), + managedProviderDir: path.join(runtimeRoot, "managed", "providers"), + managedSkillDir: path.join(runtimeRoot, "managed", "skills"), + rootDir: runtimeRoot, + runtimeDir, + workspacesDir: path.join(runtimeRoot, "workspaces"), + }; + + const service = createRuntimeAssetService({ + environment: "test", + serverVersion: "0.0.0-test", + workingDirectory, + }); + + const firstBundle = await service.resolveRuntimeBundle(); + const extractedRoot = path.join(runtimeDir, "0.0.0-test"); + expect(firstBundle.opencode.absolutePath).toBe(path.join(extractedRoot, path.basename(opencodePath))); + expect(firstBundle.router.absolutePath).toBe(path.join(extractedRoot, path.basename(routerPath))); + expect(fs.existsSync(path.join(extractedRoot, "manifest.json"))).toBe(true); + + fs.rmSync(bundleRoot, { recursive: true, force: true }); + + const secondBundle = await service.resolveRuntimeBundle(); + expect(secondBundle.opencode.absolutePath).toBe(firstBundle.opencode.absolutePath); + expect(secondBundle.router.absolutePath).toBe(firstBundle.router.absolutePath); +}); + +test("release runtime assets can extract from an embedded runtime bundle", async () => { + const target = resolveRuntimeTarget(); + if (!target) { + throw new Error("Unsupported test target."); + } + + const bundleRoot = makeTempDir("openwork-server-v2-embedded-bundle"); + const runtimeRoot = makeTempDir("openwork-server-v2-embedded-runtime-root"); + const runtimeDir = path.join(runtimeRoot, "runtime"); + fs.mkdirSync(runtimeDir, { recursive: true }); + + const opencodePath = path.join(bundleRoot, process.platform === "win32" ? "opencode.exe" : "opencode"); + const routerPath = path.join(bundleRoot, process.platform === "win32" ? "opencode-router.exe" : "opencode-router"); + const manifestPath = path.join(bundleRoot, "manifest.json"); + writeVersionedBinary(opencodePath, "1.2.27"); + writeVersionedBinary(routerPath, "0.11.206"); + + const manifest: RuntimeManifest = { + files: { + opencode: { + path: path.basename(opencodePath), + sha256: await sha256(opencodePath), + size: fs.statSync(opencodePath).size, + }, + "opencode-router": { + path: path.basename(routerPath), + sha256: await sha256(routerPath), + size: fs.statSync(routerPath).size, + }, + }, + generatedAt: new Date().toISOString(), + manifestVersion: 1, + opencodeVersion: "1.2.27", + rootDir: bundleRoot, + routerVersion: "0.11.206", + serverVersion: "0.0.0-test", + source: "release", + target, + }; + fs.writeFileSync(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); + + process.env.OPENWORK_SERVER_V2_RUNTIME_SOURCE = "release"; + delete process.env.OPENWORK_SERVER_V2_RUNTIME_BUNDLE_DIR; + + registerEmbeddedRuntimeBundle({ + manifestPath, + opencodePath, + routerPath, + }); + + const service = createRuntimeAssetService({ + environment: "test", + serverVersion: "0.0.0-test", + workingDirectory: { + databaseDir: runtimeRoot, + databasePath: path.join(runtimeRoot, "db.sqlite"), + importsDir: path.join(runtimeRoot, "imports"), + managedDir: path.join(runtimeRoot, "managed"), + managedMcpDir: path.join(runtimeRoot, "managed", "mcps"), + managedPluginDir: path.join(runtimeRoot, "managed", "plugins"), + managedProviderDir: path.join(runtimeRoot, "managed", "providers"), + managedSkillDir: path.join(runtimeRoot, "managed", "skills"), + rootDir: runtimeRoot, + runtimeDir, + workspacesDir: path.join(runtimeRoot, "workspaces"), + }, + }); + + const bundle = await service.resolveRuntimeBundle(); + const extractedRoot = path.join(runtimeDir, "0.0.0-test"); + expect(bundle.opencode.absolutePath).toBe(path.join(extractedRoot, path.basename(opencodePath))); + expect(bundle.router.absolutePath).toBe(path.join(extractedRoot, path.basename(routerPath))); + expect(fs.existsSync(path.join(extractedRoot, "manifest.json"))).toBe(true); +}); diff --git a/apps/server-v2/src/runtime/assets.ts b/apps/server-v2/src/runtime/assets.ts new file mode 100644 index 00000000..010e363c --- /dev/null +++ b/apps/server-v2/src/runtime/assets.ts @@ -0,0 +1,867 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { createHash } from "node:crypto"; +import { chmod, copyFile, mkdir, mkdtemp, readFile, readdir, rename, rm, stat, writeFile } from "node:fs/promises"; +import { fileURLToPath } from "node:url"; +import type { ServerWorkingDirectory } from "../database/working-directory.js"; +import { getEmbeddedRuntimeBundle, type EmbeddedRuntimeBundle } from "./embedded.js"; +import { + resolveBunTarget, + resolveRuntimeTarget, + runtimeBinaryFilename, + type ResolvedRuntimeBinary, + type ResolvedRuntimeBundle, + type RuntimeAssetName, + type RuntimeAssetSource, + type RuntimeManifest, + type RuntimeTarget, +} from "./manifest.js"; + +type RuntimeAssetServiceOptions = { + environment: string; + serverVersion: string; + workingDirectory: ServerWorkingDirectory; +}; + +type ReleaseBundleSource = + | { kind: "directory"; rootDir: string } + | { bundle: EmbeddedRuntimeBundle; kind: "embedded" }; + +function isTruthy(value: string | undefined) { + if (!value) { + return false; + } + + return ["1", "true", "yes", "on"].includes(value.trim().toLowerCase()); +} + +function normalizeVersion(value: string) { + const trimmed = value.trim(); + return trimmed.startsWith("v") ? trimmed.slice(1) : trimmed; +} + +function dirnameFromMetaUrl(metaUrl: string) { + return path.dirname(fileURLToPath(metaUrl)); +} + +function findRepoRoot(startDir: string) { + let current = startDir; + while (true) { + const constantsPath = path.join(current, "constants.json"); + const serverV2PackagePath = path.join(current, "apps", "server-v2", "package.json"); + if (fs.existsSync(constantsPath) && fs.existsSync(serverV2PackagePath)) { + return current; + } + + const parent = path.dirname(current); + if (parent === current) { + return null; + } + current = parent; + } +} + +async function readJson(filePath: string): Promise { + return JSON.parse(await readFile(filePath, "utf8")) as T; +} + +async function sha256File(filePath: string) { + const contents = await readFile(filePath); + return createHash("sha256").update(contents).digest("hex"); +} + +async function ensureExecutable(filePath: string) { + if (process.platform === "win32") { + return; + } + + await chmod(filePath, 0o755); +} + +async function sleep(ms: number) { + await new Promise((resolve) => setTimeout(resolve, ms)); +} + +async function fileExists(filePath: string) { + try { + await stat(filePath); + return true; + } catch { + return false; + } +} + +async function directoryExists(directoryPath: string) { + try { + const details = await stat(directoryPath); + return details.isDirectory(); + } catch { + return false; + } +} + +async function readJsonOrNull(filePath: string): Promise { + try { + return JSON.parse(await readFile(filePath, "utf8")) as T; + } catch { + return null; + } +} + +function isProcessAlive(pid: number | null | undefined) { + if (!pid || !Number.isInteger(pid) || pid <= 0) { + return false; + } + + try { + process.kill(pid, 0); + return true; + } catch (error) { + return error instanceof Error && "code" in error && (error as NodeJS.ErrnoException).code === "EPERM"; + } +} + +async function captureProcess(command: string[], options: { cwd?: string; env?: Record; timeoutMs?: number } = {}) { + const child = Bun.spawn(command, { + cwd: options.cwd, + env: { + ...process.env, + ...options.env, + }, + stderr: "pipe", + stdout: "pipe", + }); + + const stdoutPromise = new Response(child.stdout).text(); + const stderrPromise = new Response(child.stderr).text(); + const timeoutMs = options.timeoutMs ?? 120_000; + + const timeout = setTimeout(() => { + child.kill(); + }, timeoutMs); + + try { + const exitCode = await child.exited; + const [stdout, stderr] = await Promise.all([stdoutPromise, stderrPromise]); + if (exitCode !== 0) { + const message = [stdout.trim(), stderr.trim()].filter(Boolean).join("\n"); + throw new Error(message || `Command failed with exit code ${exitCode}: ${command.join(" ")}`); + } + return { stderr, stdout }; + } finally { + clearTimeout(timeout); + } +} + +function parseVersion(output: string) { + const match = output.match(/\d+\.\d+\.\d+(?:-[\w.-]+)?/); + return match?.[0] ?? null; +} + +async function readBinaryVersion(binaryPath: string) { + try { + const result = await captureProcess([binaryPath, "--version"], { cwd: os.tmpdir(), timeoutMs: 4_000 }); + return parseVersion(`${result.stdout}\n${result.stderr}`); + } catch { + return null; + } +} + +function resolveOpencodeAsset(target: RuntimeTarget) { + const assets: Record = { + "darwin-arm64": "opencode-darwin-arm64.zip", + "darwin-x64": "opencode-darwin-x64-baseline.zip", + "linux-arm64": "opencode-linux-arm64.tar.gz", + "linux-x64": "opencode-linux-x64-baseline.tar.gz", + "windows-arm64": "opencode-windows-arm64.zip", + "windows-x64": "opencode-windows-x64-baseline.zip", + }; + return assets[target]; +} + +async function downloadToPath(url: string, destinationPath: string) { + const response = await fetch(url); + if (!response.ok) { + throw new Error(`Failed to download ${url} (HTTP ${response.status}).`); + } + + const contents = Buffer.from(await response.arrayBuffer()); + await mkdir(path.dirname(destinationPath), { recursive: true }); + const temporaryPath = `${destinationPath}.tmp-${Date.now()}`; + await writeFile(temporaryPath, contents); + await rename(temporaryPath, destinationPath); +} + +async function extractOpencodeArchive(archivePath: string, extractDir: string) { + if (process.platform === "win32") { + const quotedArchive = `'${archivePath.replace(/'/g, "''")}'`; + const quotedExtract = `'${extractDir.replace(/'/g, "''")}'`; + await captureProcess([ + "powershell", + "-NoProfile", + "-Command", + `$ErrorActionPreference = 'Stop'; Expand-Archive -Path ${quotedArchive} -DestinationPath ${quotedExtract} -Force`, + ]); + return; + } + + if (archivePath.endsWith(".zip")) { + await captureProcess(["unzip", "-q", archivePath, "-d", extractDir]); + return; + } + + if (archivePath.endsWith(".tar.gz")) { + await captureProcess(["tar", "-xzf", archivePath, "-C", extractDir]); + return; + } + + throw new Error(`Unsupported OpenCode archive format: ${archivePath}`); +} + +async function findFileRecursively(rootDir: string, matcher: (fileName: string) => boolean): Promise { + const queue = [rootDir]; + while (queue.length > 0) { + const current = queue.shift(); + if (!current) { + continue; + } + + const entries = await fs.promises.readdir(current, { withFileTypes: true }); + for (const entry of entries) { + const absolutePath = path.join(current, entry.name); + if (entry.isDirectory()) { + queue.push(absolutePath); + continue; + } + + if (matcher(entry.name)) { + return absolutePath; + } + } + } + + return null; +} + +async function writeIfChanged(filePath: string, contents: string) { + const existing = await readFile(filePath, "utf8").catch(() => null); + if (existing === contents) { + return; + } + + await mkdir(path.dirname(filePath), { recursive: true }); + await writeFile(filePath, contents); +} + +export type RuntimeAssetService = ReturnType; + +export function createRuntimeAssetService(options: RuntimeAssetServiceOptions) { + const runtimeTarget = resolveRuntimeTarget(); + if (!runtimeTarget) { + throw new Error(`Unsupported runtime target ${process.platform}/${process.arch} for Server V2 runtime assets.`); + } + + const serverVersion = options.serverVersion; + const repoRoot = findRepoRoot(path.resolve(dirnameFromMetaUrl(import.meta.url), "..", "..", "..", "..")); + const runtimeSourcePreference = process.env.OPENWORK_SERVER_V2_RUNTIME_SOURCE?.trim().toLowerCase(); + const bundleRootOverride = process.env.OPENWORK_SERVER_V2_RUNTIME_BUNDLE_DIR?.trim(); + const releaseRootOverride = process.env.OPENWORK_SERVER_V2_RUNTIME_RELEASE_DIR?.trim(); + const manifestPathOverride = process.env.OPENWORK_SERVER_V2_RUNTIME_MANIFEST_PATH?.trim(); + + const developmentRoot = repoRoot ? path.join(repoRoot, ".local", "runtime-assets") : null; + const releaseRoot = releaseRootOverride?.trim() + ? path.resolve(releaseRootOverride) + : path.join(options.workingDirectory.runtimeDir, serverVersion); + let registeredLeaseCleanup = false; + let releaseRuntimeRootPromise: Promise<{ manifest: RuntimeManifest; rootDir: string }> | null = null; + let cachedReleaseBundleSource: ReleaseBundleSource | null | undefined; + + const resolveAdjacentBundleRoot = () => { + const candidates = [path.dirname(process.execPath)]; + for (const candidate of candidates) { + const manifestPath = manifestPathOverride?.trim() + ? path.resolve(manifestPathOverride) + : path.join(candidate, "manifest.json"); + const manifestExists = fs.existsSync(manifestPath); + if (!manifestExists) { + continue; + } + return candidate; + } + return null; + }; + + const resolveReleaseBundleRoot = (): ReleaseBundleSource | null => { + if (cachedReleaseBundleSource !== undefined) { + return cachedReleaseBundleSource; + } + + if (bundleRootOverride?.trim()) { + cachedReleaseBundleSource = { + kind: "directory", + rootDir: path.resolve(bundleRootOverride), + }; + return cachedReleaseBundleSource; + } + + const adjacentBundleRoot = resolveAdjacentBundleRoot(); + if (adjacentBundleRoot) { + cachedReleaseBundleSource = { + kind: "directory", + rootDir: adjacentBundleRoot, + }; + return cachedReleaseBundleSource; + } + + const embeddedBundle = getEmbeddedRuntimeBundle(); + if (embeddedBundle) { + cachedReleaseBundleSource = { + bundle: embeddedBundle, + kind: "embedded", + }; + return cachedReleaseBundleSource; + } + + cachedReleaseBundleSource = null; + return cachedReleaseBundleSource; + }; + + const resolveSource = (): RuntimeAssetSource => { + if (runtimeSourcePreference === "development") { + if (!developmentRoot) { + throw new Error("Development runtime assets requested, but the repo root could not be resolved."); + } + return "development"; + } + + if (runtimeSourcePreference === "release") { + return "release"; + } + + if (resolveReleaseBundleRoot()) { + return "release"; + } + + if (developmentRoot) { + return "development"; + } + + return "release"; + }; + + const resolveRootDir = (source: RuntimeAssetSource) => (source === "development" ? developmentRoot! : releaseRoot); + + const readPinnedOpencodeVersion = async () => { + const candidates = [ + repoRoot ? path.join(repoRoot, "constants.json") : null, + path.resolve(dirnameFromMetaUrl(import.meta.url), "..", "..", "..", "..", "constants.json"), + ].filter(Boolean) as string[]; + + for (const candidate of candidates) { + if (!fs.existsSync(candidate)) { + continue; + } + + const parsed = await readJson<{ opencodeVersion?: string }>(candidate); + const value = parsed.opencodeVersion?.trim() ?? ""; + if (value) { + return normalizeVersion(value); + } + } + + throw new Error("Unable to resolve the pinned OpenCode version from constants.json."); + }; + + const readRouterVersion = async () => { + const candidates = [ + repoRoot ? path.join(repoRoot, "apps", "opencode-router", "package.json") : null, + path.resolve(dirnameFromMetaUrl(import.meta.url), "..", "..", "..", "opencode-router", "package.json"), + ].filter(Boolean) as string[]; + + for (const candidate of candidates) { + if (!fs.existsSync(candidate)) { + continue; + } + + const parsed = await readJson<{ version?: string }>(candidate); + const value = parsed.version?.trim() ?? ""; + if (value) { + return normalizeVersion(value); + } + } + + throw new Error("Unable to resolve the local opencode-router version."); + }; + + const materializeManifest = async (source: RuntimeAssetSource, opencode: ResolvedRuntimeBinary, router: ResolvedRuntimeBinary) => { + const rootDir = resolveRootDir(source); + const manifest: RuntimeManifest = { + files: { + opencode: { + path: path.relative(rootDir, opencode.absolutePath), + sha256: opencode.sha256, + size: opencode.size, + }, + "opencode-router": { + path: path.relative(rootDir, router.absolutePath), + sha256: router.sha256, + size: router.size, + }, + }, + generatedAt: new Date().toISOString(), + manifestVersion: 1, + opencodeVersion: opencode.version, + rootDir, + routerVersion: router.version, + serverVersion, + source, + target: runtimeTarget, + }; + + const manifestPath = + source === "release" + ? path.join(rootDir, "manifest.json") + : path.join(rootDir, "manifests", runtimeTarget, `openwork-server-v2-${serverVersion}.json`); + await writeIfChanged(`${manifestPath}`, `${JSON.stringify(manifest, null, 2)}\n`); + return manifest; + }; + + const releaseManifestPath = (rootDir: string) => path.join(rootDir, "manifest.json"); + const sourceManifestPath = (rootDir: string) => + manifestPathOverride?.trim() ? path.resolve(manifestPathOverride) : releaseManifestPath(rootDir); + + const validateManifestRoot = async (rootDir: string, manifest: RuntimeManifest) => { + for (const name of ["opencode", "opencode-router"] as const) { + const entry = manifest.files[name]; + if (!entry) { + return false; + } + + const binaryPath = path.resolve(rootDir, entry.path); + if (!(await fileExists(binaryPath))) { + return false; + } + + const checksum = await sha256File(binaryPath); + if (checksum !== entry.sha256) { + return false; + } + } + + return true; + }; + + const leasePathForRoot = (rootDir: string) => path.join(rootDir, ".runtime-lease.json"); + + const cleanupLease = async (rootDir: string) => { + await rm(leasePathForRoot(rootDir), { force: true }); + }; + + const markRuntimeLease = async (rootDir: string) => { + await writeFile( + leasePathForRoot(rootDir), + `${JSON.stringify({ pid: process.pid, serverVersion, updatedAt: new Date().toISOString() }, null, 2)}\n`, + "utf8", + ); + + if (!registeredLeaseCleanup) { + registeredLeaseCleanup = true; + for (const signal of ["SIGINT", "SIGTERM", "beforeExit", "exit"] as const) { + process.once(signal, () => { + void cleanupLease(rootDir); + }); + } + } + }; + + const isLiveLease = async (rootDir: string) => { + const lease = await readJsonOrNull<{ pid?: number }>(leasePathForRoot(rootDir)); + return isProcessAlive(typeof lease?.pid === "number" ? lease.pid : null); + }; + + const cleanupReleaseArtifacts = async (currentRoot: string) => { + const parentDir = path.dirname(currentRoot); + if (!(await directoryExists(parentDir))) { + return; + } + + const entries = await readdir(parentDir, { withFileTypes: true }); + const runtimeRoots: Array<{ absolutePath: string; mtimeMs: number }> = []; + for (const entry of entries) { + if (!entry.isDirectory()) { + continue; + } + + const absolutePath = path.join(parentDir, entry.name); + if (absolutePath === currentRoot) { + continue; + } + + if (entry.name.startsWith(`${path.basename(currentRoot)}.extract-`) || entry.name.startsWith(`${path.basename(currentRoot)}.replace-`)) { + await rm(absolutePath, { force: true, recursive: true }); + continue; + } + + const manifest = await readJsonOrNull(path.join(absolutePath, "manifest.json")); + if (!manifest) { + continue; + } + + const details = await stat(absolutePath).catch(() => null); + runtimeRoots.push({ + absolutePath, + mtimeMs: details?.mtimeMs ?? 0, + }); + } + + runtimeRoots.sort((left, right) => right.mtimeMs - left.mtimeMs); + const keep = new Set(runtimeRoots.slice(0, 2).map((item) => item.absolutePath)); + for (const candidate of runtimeRoots) { + if (keep.has(candidate.absolutePath)) { + continue; + } + if (await isLiveLease(candidate.absolutePath)) { + continue; + } + await rm(candidate.absolutePath, { force: true, recursive: true }); + } + }; + + const readReleaseManifest = async (source: ReleaseBundleSource | { kind: "directory"; rootDir: string }) => { + const manifestPath = source.kind === "embedded" + ? source.bundle.manifestPath + : sourceManifestPath(source.rootDir); + if (!fs.existsSync(manifestPath)) { + throw new Error(`Release runtime manifest not found at ${manifestPath}.`); + } + + return readJson(manifestPath); + }; + + const resolveReleaseSourceBinary = (source: ReleaseBundleSource, name: RuntimeAssetName, relativePath: string) => { + if (source.kind === "embedded") { + return name === "opencode" ? source.bundle.opencodePath : source.bundle.routerPath; + } + + return path.resolve(source.rootDir, relativePath); + }; + + const copyReleaseSourceBinary = async (sourcePath: string, targetPath: string) => { + const contents = await readFile(sourcePath); + await mkdir(path.dirname(targetPath), { recursive: true }); + await writeFile(targetPath, contents); + }; + + const acquireExtractionLock = async (rootDir: string) => { + const lockDir = `${rootDir}.lock`; + const ownerPath = path.join(lockDir, "owner.json"); + const startedAt = Date.now(); + await mkdir(path.dirname(lockDir), { recursive: true }); + + while (Date.now() - startedAt < 15_000) { + try { + await mkdir(lockDir); + await writeFile( + ownerPath, + `${JSON.stringify({ pid: process.pid, createdAt: new Date().toISOString() }, null, 2)}\n`, + "utf8", + ); + return async () => { + await rm(lockDir, { force: true, recursive: true }); + }; + } catch (error) { + const code = error instanceof Error && "code" in error ? (error as NodeJS.ErrnoException).code : null; + if (code !== "EEXIST") { + throw error; + } + + const owner = await readJsonOrNull<{ createdAt?: string; pid?: number }>(ownerPath); + const ownerAgeMs = owner?.createdAt ? Date.now() - Date.parse(owner.createdAt) : Number.POSITIVE_INFINITY; + if (!isProcessAlive(typeof owner?.pid === "number" ? owner.pid : null) && ownerAgeMs > 5_000) { + await rm(lockDir, { force: true, recursive: true }); + continue; + } + + await sleep(100); + } + } + + throw new Error(`Timed out waiting for the runtime extraction lock at ${lockDir}.`); + }; + + const ensureReleaseRuntimeRoot = async () => { + if (releaseRuntimeRootPromise) { + return releaseRuntimeRootPromise; + } + + releaseRuntimeRootPromise = (async () => { + const rootDir = resolveRootDir("release"); + const bundleSource = resolveReleaseBundleRoot(); + + if (!bundleSource || (bundleSource.kind === "directory" && path.resolve(bundleSource.rootDir) === path.resolve(rootDir))) { + const manifest = await readReleaseManifest({ kind: "directory", rootDir }); + if (!(await validateManifestRoot(rootDir, manifest))) { + throw new Error(`Release runtime manifest at ${releaseManifestPath(rootDir)} does not match the extracted runtime contents.`); + } + await markRuntimeLease(rootDir); + await cleanupReleaseArtifacts(rootDir); + return { manifest, rootDir }; + } + + const unlock = await acquireExtractionLock(rootDir); + try { + const existingManifest = await readJsonOrNull(releaseManifestPath(rootDir)); + if (existingManifest && (await validateManifestRoot(rootDir, existingManifest))) { + await markRuntimeLease(rootDir); + await cleanupReleaseArtifacts(rootDir); + return { manifest: existingManifest, rootDir }; + } + + const sourceManifest = await readReleaseManifest(bundleSource); + const bundleLabel = bundleSource.kind === "embedded" ? "embedded runtime bundle" : bundleSource.rootDir; + const tempRoot = `${rootDir}.extract-${process.pid}-${Date.now()}`; + const backupRoot = `${rootDir}.replace-${Date.now()}`; + await rm(tempRoot, { force: true, recursive: true }); + await mkdir(tempRoot, { recursive: true }); + + for (const name of ["opencode", "opencode-router"] as const) { + const entry = sourceManifest.files[name]; + if (!entry) { + throw new Error(`Release runtime manifest in ${bundleLabel} is missing the ${name} entry.`); + } + const sourcePath = resolveReleaseSourceBinary(bundleSource, name, entry.path); + if (!(await fileExists(sourcePath))) { + throw new Error(`Release runtime source binary for ${name} was expected at ${sourcePath}, but it was not found.`); + } + const targetPath = path.resolve(tempRoot, entry.path); + await copyReleaseSourceBinary(sourcePath, targetPath); + await ensureExecutable(targetPath); + } + + const extractedManifest: RuntimeManifest = { + ...sourceManifest, + generatedAt: new Date().toISOString(), + rootDir, + }; + await writeFile(releaseManifestPath(tempRoot), `${JSON.stringify(extractedManifest, null, 2)}\n`, "utf8"); + + if (await directoryExists(rootDir)) { + await rm(backupRoot, { force: true, recursive: true }); + await rename(rootDir, backupRoot); + } + + await rename(tempRoot, rootDir); + await rm(backupRoot, { force: true, recursive: true }); + + await markRuntimeLease(rootDir); + await cleanupReleaseArtifacts(rootDir); + return { manifest: extractedManifest, rootDir }; + } finally { + await unlock(); + } + })(); + + try { + return await releaseRuntimeRootPromise; + } catch (error) { + releaseRuntimeRootPromise = null; + throw error; + } + }; + + const buildResolvedBinary = async ( + source: RuntimeAssetSource, + name: RuntimeAssetName, + absolutePath: string, + version: string, + ): Promise => { + const details = await stat(absolutePath); + return { + absolutePath, + name, + sha256: await sha256File(absolutePath), + size: details.size, + source, + stagedRoot: resolveRootDir(source), + target: runtimeTarget, + version, + }; + }; + + const ensureDevelopmentOpencodeBinary = async (version: string) => { + const rootDir = resolveRootDir("development"); + const targetDir = path.join(rootDir, "opencode", runtimeTarget, `v${version}`); + const targetPath = path.join(targetDir, runtimeBinaryFilename("opencode", runtimeTarget)); + if (await fileExists(targetPath)) { + const actualVersion = await readBinaryVersion(targetPath); + if (!actualVersion || actualVersion === version) { + await ensureExecutable(targetPath); + return targetPath; + } + await rm(targetPath, { force: true }); + } + + const asset = resolveOpencodeAsset(runtimeTarget); + const archivePath = path.join(os.tmpdir(), `openwork-server-v2-opencode-${Date.now()}-${asset}`); + const extractDir = await mkdtemp(path.join(os.tmpdir(), "openwork-server-v2-opencode-")); + const downloadUrl = `https://github.com/anomalyco/opencode/releases/download/v${version}/${asset}`; + + try { + await downloadToPath(downloadUrl, archivePath); + await extractOpencodeArchive(archivePath, extractDir); + const extractedBinary = await findFileRecursively(extractDir, (fileName) => fileName === "opencode" || fileName === "opencode.exe"); + if (!extractedBinary) { + throw new Error(`Downloaded OpenCode archive did not contain an opencode binary for ${runtimeTarget}.`); + } + + await mkdir(targetDir, { recursive: true }); + await copyFile(extractedBinary, targetPath); + await ensureExecutable(targetPath); + return targetPath; + } catch (error) { + throw new Error( + `Failed to download the pinned OpenCode ${version} artifact for ${runtimeTarget}: ${error instanceof Error ? error.message : String(error)}`, + ); + } finally { + await rm(extractDir, { force: true, recursive: true }); + await rm(archivePath, { force: true }); + } + }; + + const ensureDevelopmentRouterBinary = async (version: string) => { + if (!repoRoot) { + throw new Error("Cannot build opencode-router in development mode because the repo root could not be resolved."); + } + + const rootDir = resolveRootDir("development"); + const targetDir = path.join(rootDir, "opencode-router", runtimeTarget, `v${version}`); + const targetPath = path.join(targetDir, runtimeBinaryFilename("opencode-router", runtimeTarget)); + if (await fileExists(targetPath)) { + const actualVersion = await readBinaryVersion(targetPath); + if (!actualVersion || actualVersion === version) { + await ensureExecutable(targetPath); + return targetPath; + } + await rm(targetPath, { force: true }); + } + + await mkdir(targetDir, { recursive: true }); + const packageDir = path.join(repoRoot, "apps", "opencode-router"); + const entrypoint = path.join(packageDir, "src", "cli.ts"); + const outfile = targetPath; + const bunCommand = [ + process.execPath, + "build", + entrypoint, + "--compile", + "--outfile", + outfile, + "--target", + resolveBunTarget(runtimeTarget), + "--define", + `__OPENCODE_ROUTER_VERSION__=\"${version}\"`, + ]; + + try { + await captureProcess(bunCommand, { cwd: packageDir, timeoutMs: 300_000 }); + await ensureExecutable(outfile); + return outfile; + } catch (error) { + throw new Error( + `Failed to build the local opencode-router ${version} binary for ${runtimeTarget}: ${error instanceof Error ? error.message : String(error)}`, + ); + } + }; + + const ensureReleaseBinary = async (name: RuntimeAssetName, version: string) => { + const { manifest, rootDir } = await ensureReleaseRuntimeRoot(); + const entry = manifest.files[name]; + if (!entry) { + throw new Error(`Release runtime manifest is missing the ${name} entry.`); + } + + const binaryPath = path.resolve(rootDir, entry.path); + if (!(await fileExists(binaryPath))) { + throw new Error(`Release runtime binary for ${name} was expected at ${binaryPath}, but it was not found.`); + } + + await ensureExecutable(binaryPath); + const checksum = await sha256File(binaryPath); + if (checksum !== entry.sha256) { + throw new Error(`Release runtime binary checksum mismatch for ${name} at ${binaryPath}.`); + } + + const actualVersion = await readBinaryVersion(binaryPath); + if (actualVersion && actualVersion !== version) { + throw new Error(`Release runtime ${name} version mismatch: expected ${version}, got ${actualVersion}.`); + } + + return binaryPath; + }; + + const readReleaseManifestVersion = async (name: RuntimeAssetName) => { + const { manifest } = await ensureReleaseRuntimeRoot(); + return name === "opencode" ? manifest.opencodeVersion : manifest.routerVersion; + }; + + const ensureBinary = async (name: RuntimeAssetName) => { + const source = resolveSource(); + const version = source === "release" + ? await readReleaseManifestVersion(name) + : name === "opencode" + ? await readPinnedOpencodeVersion() + : await readRouterVersion(); + + const absolutePath = source === "development" + ? name === "opencode" + ? await ensureDevelopmentOpencodeBinary(version) + : await ensureDevelopmentRouterBinary(version) + : await ensureReleaseBinary(name, version); + return buildResolvedBinary(source, name, absolutePath, version); + }; + + return { + async ensureOpencodeBinary() { + return ensureBinary("opencode"); + }, + + async ensureRouterBinary() { + return ensureBinary("opencode-router"); + }, + + async getPinnedOpencodeVersion() { + return readPinnedOpencodeVersion(); + }, + + async getRouterVersion() { + return readRouterVersion(); + }, + + getSource() { + return resolveSource(); + }, + + getTarget() { + return runtimeTarget; + }, + + getDevelopmentRoot() { + return developmentRoot; + }, + + getReleaseRoot() { + return releaseRoot; + }, + + async resolveRuntimeBundle(): Promise { + const [opencode, router] = await Promise.all([this.ensureOpencodeBinary(), this.ensureRouterBinary()]); + const manifest = await materializeManifest(opencode.source, opencode, router); + return { + manifest, + opencode, + router, + }; + }, + }; +} diff --git a/apps/server-v2/src/runtime/embedded.ts b/apps/server-v2/src/runtime/embedded.ts new file mode 100644 index 00000000..b1ce2e68 --- /dev/null +++ b/apps/server-v2/src/runtime/embedded.ts @@ -0,0 +1,19 @@ +export type EmbeddedRuntimeBundle = { + manifestPath: string; + opencodePath: string; + routerPath: string; +}; + +declare global { + var __OPENWORK_SERVER_V2_EMBEDDED_RUNTIME__: + | EmbeddedRuntimeBundle + | undefined; +} + +export function registerEmbeddedRuntimeBundle(bundle: EmbeddedRuntimeBundle | undefined) { + globalThis.__OPENWORK_SERVER_V2_EMBEDDED_RUNTIME__ = bundle; +} + +export function getEmbeddedRuntimeBundle() { + return globalThis.__OPENWORK_SERVER_V2_EMBEDDED_RUNTIME__ ?? null; +} diff --git a/apps/server-v2/src/runtime/manifest.ts b/apps/server-v2/src/runtime/manifest.ts new file mode 100644 index 00000000..28e341bb --- /dev/null +++ b/apps/server-v2/src/runtime/manifest.ts @@ -0,0 +1,88 @@ +export type RuntimeTarget = + | "darwin-arm64" + | "darwin-x64" + | "linux-arm64" + | "linux-x64" + | "windows-arm64" + | "windows-x64"; + +export type RuntimeAssetName = "opencode" | "opencode-router"; +export type RuntimeAssetSource = "development" | "release"; + +export type RuntimeManifestFile = { + path: string; + sha256: string; + size: number; +}; + +export type RuntimeManifest = { + files: Record; + generatedAt: string; + manifestVersion: 1; + opencodeVersion: string; + rootDir: string; + routerVersion: string; + serverVersion: string; + source: RuntimeAssetSource; + target: RuntimeTarget; +}; + +export type ResolvedRuntimeBinary = { + absolutePath: string; + name: RuntimeAssetName; + sha256: string; + size: number; + source: RuntimeAssetSource; + stagedRoot: string; + target: RuntimeTarget; + version: string; +}; + +export type ResolvedRuntimeBundle = { + manifest: RuntimeManifest; + opencode: ResolvedRuntimeBinary; + router: ResolvedRuntimeBinary; +}; + +export function resolveRuntimeTarget(): RuntimeTarget | null { + if (process.platform === "darwin") { + if (process.arch === "arm64") { + return "darwin-arm64"; + } + if (process.arch === "x64") { + return "darwin-x64"; + } + return null; + } + + if (process.platform === "linux") { + if (process.arch === "arm64") { + return "linux-arm64"; + } + if (process.arch === "x64") { + return "linux-x64"; + } + return null; + } + + if (process.platform === "win32") { + if (process.arch === "arm64") { + return "windows-arm64"; + } + if (process.arch === "x64") { + return "windows-x64"; + } + return null; + } + + return null; +} + +export function runtimeBinaryFilename(name: RuntimeAssetName, target: RuntimeTarget) { + const base = name === "opencode" ? "opencode" : "opencode-router"; + return target.startsWith("windows") ? `${base}.exe` : base; +} + +export function resolveBunTarget(target: RuntimeTarget) { + return `bun-${target}`; +} diff --git a/apps/server-v2/src/runtime/output-buffer.ts b/apps/server-v2/src/runtime/output-buffer.ts new file mode 100644 index 00000000..b8703461 --- /dev/null +++ b/apps/server-v2/src/runtime/output-buffer.ts @@ -0,0 +1,127 @@ +export type RuntimeOutputStream = "stdout" | "stderr"; + +export type RuntimeOutputLine = { + at: string; + stream: RuntimeOutputStream; + text: string; +}; + +export type RuntimeOutputSnapshot = { + combined: RuntimeOutputLine[]; + stderr: string[]; + stdout: string[]; + totalLines: number; + truncated: boolean; +}; + +type CreateBoundedOutputCollectorOptions = { + maxBytes?: number; + maxLines?: number; + onLine?: (line: RuntimeOutputLine) => void; +}; + +function byteLength(value: string) { + return Buffer.byteLength(value, "utf8"); +} + +export function createBoundedOutputCollector(options: CreateBoundedOutputCollectorOptions = {}) { + const maxLines = Math.max(1, options.maxLines ?? 200); + const maxBytes = Math.max(256, options.maxBytes ?? 16_384); + const combined: RuntimeOutputLine[] = []; + const partials: Record = { + stderr: "", + stdout: "", + }; + + let totalBytes = 0; + let truncated = false; + + const appendLine = (stream: RuntimeOutputStream, text: string) => { + const line: RuntimeOutputLine = { + at: new Date().toISOString(), + stream, + text, + }; + + combined.push(line); + totalBytes += byteLength(text); + options.onLine?.(line); + + while (combined.length > maxLines || totalBytes > maxBytes) { + const removed = combined.shift(); + if (!removed) { + break; + } + totalBytes -= byteLength(removed.text); + truncated = true; + } + }; + + const flushPartial = (stream: RuntimeOutputStream) => { + const partial = partials[stream]; + if (!partial) { + return; + } + partials[stream] = ""; + appendLine(stream, partial); + }; + + return { + finish(stream: RuntimeOutputStream) { + flushPartial(stream); + }, + + pushChunk(stream: RuntimeOutputStream, chunk: string) { + if (!chunk) { + return; + } + + let buffer = partials[stream] + chunk; + while (true) { + const newlineIndex = buffer.search(/\r?\n/); + if (newlineIndex < 0) { + break; + } + + const newlineWidth = buffer[newlineIndex] === "\r" && buffer[newlineIndex + 1] === "\n" ? 2 : 1; + const line = buffer.slice(0, newlineIndex); + appendLine(stream, line); + buffer = buffer.slice(newlineIndex + newlineWidth); + } + + partials[stream] = buffer; + }, + + snapshot(): RuntimeOutputSnapshot { + const stdout: string[] = []; + const stderr: string[] = []; + for (const line of combined) { + if (line.stream === "stdout") { + stdout.push(line.text); + } else { + stderr.push(line.text); + } + } + + return { + combined: combined.map((line) => ({ ...line })), + stderr, + stdout, + totalLines: combined.length, + truncated, + }; + }, + }; +} + +export function formatRuntimeOutput(snapshot: RuntimeOutputSnapshot) { + if (snapshot.combined.length === 0) { + return "(no child output captured)"; + } + + const lines = snapshot.combined.map((line) => `${line.stream}: ${line.text}`); + if (snapshot.truncated) { + lines.unshift("(bounded output buffer truncated older lines)"); + } + return lines.join("\n"); +} diff --git a/apps/server-v2/src/schemas/common.ts b/apps/server-v2/src/schemas/common.ts new file mode 100644 index 00000000..77874629 --- /dev/null +++ b/apps/server-v2/src/schemas/common.ts @@ -0,0 +1,29 @@ +import { z } from "zod"; + +export const requestIdSchema = z.string().min(1).meta({ ref: "OpenWorkServerV2RequestId" }); + +export const identifierSchema = z.string().min(1).max(200).meta({ ref: "OpenWorkServerV2Identifier" }); + +export const isoTimestampSchema = z.string().datetime({ offset: true }).meta({ ref: "OpenWorkServerV2IsoTimestamp" }); + +export const responseMetaSchema = z.object({ + requestId: requestIdSchema, + timestamp: isoTimestampSchema, +}).meta({ ref: "OpenWorkServerV2ResponseMeta" }); + +export const workspaceIdParamsSchema = z.object({ + workspaceId: identifierSchema.describe("Stable OpenWork workspace identifier."), +}).meta({ ref: "OpenWorkServerV2WorkspaceIdParams" }); + +export const paginationQuerySchema = z.object({ + cursor: z.string().min(1).optional(), + limit: z.coerce.number().int().min(1).max(100).optional(), +}).meta({ ref: "OpenWorkServerV2PaginationQuery" }); + +export function successResponseSchema(ref: string, data: TSchema) { + return z.object({ + ok: z.literal(true), + data, + meta: responseMetaSchema, + }).meta({ ref }); +} diff --git a/apps/server-v2/src/schemas/config.ts b/apps/server-v2/src/schemas/config.ts new file mode 100644 index 00000000..9c8bba71 --- /dev/null +++ b/apps/server-v2/src/schemas/config.ts @@ -0,0 +1,57 @@ +import { z } from "zod"; +import { identifierSchema, successResponseSchema, workspaceIdParamsSchema } from "./common.js"; + +const jsonRecordSchema = z.record(z.string(), z.unknown()); + +export const workspaceConfigSnapshotSchema = z.object({ + effective: z.object({ + opencode: jsonRecordSchema, + openwork: jsonRecordSchema, + }), + materialized: z.object({ + compatibilityOpencodePath: z.string().nullable(), + compatibilityOpenworkPath: z.string().nullable(), + configDir: z.string().nullable(), + configOpencodePath: z.string().nullable(), + configOpenworkPath: z.string().nullable(), + }), + stored: z.object({ + opencode: jsonRecordSchema, + openwork: jsonRecordSchema, + }), + updatedAt: z.string(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceConfigSnapshot" }); + +export const workspaceConfigPatchRequestSchema = z.object({ + opencode: jsonRecordSchema.optional(), + openwork: jsonRecordSchema.optional(), +}).meta({ ref: "OpenWorkServerV2WorkspaceConfigPatchRequest" }); + +export const rawOpencodeConfigQuerySchema = z.object({ + scope: z.enum(["global", "project"]).optional(), +}).meta({ ref: "OpenWorkServerV2RawOpencodeConfigQuery" }); + +export const rawOpencodeConfigWriteRequestSchema = z.object({ + content: z.string(), + scope: z.enum(["global", "project"]).optional(), +}).meta({ ref: "OpenWorkServerV2RawOpencodeConfigWriteRequest" }); + +export const rawOpencodeConfigDataSchema = z.object({ + content: z.string(), + exists: z.boolean(), + path: z.string().nullable(), + updatedAt: z.string(), +}).meta({ ref: "OpenWorkServerV2RawOpencodeConfigData" }); + +export const workspaceConfigResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceConfigResponse", + workspaceConfigSnapshotSchema, +); + +export const rawOpencodeConfigResponseSchema = successResponseSchema( + "OpenWorkServerV2RawOpencodeConfigResponse", + rawOpencodeConfigDataSchema, +); + +export const rawOpencodeConfigParamsSchema = workspaceIdParamsSchema.meta({ ref: "OpenWorkServerV2RawOpencodeConfigParams" }); diff --git a/apps/server-v2/src/schemas/errors.ts b/apps/server-v2/src/schemas/errors.ts new file mode 100644 index 00000000..2feb06b7 --- /dev/null +++ b/apps/server-v2/src/schemas/errors.ts @@ -0,0 +1,48 @@ +import { z } from "zod"; +import { requestIdSchema } from "./common.js"; + +export const errorDetailSchema = z.object({ + message: z.string(), + path: z.array(z.union([z.string(), z.number()])).optional(), +}).meta({ ref: "OpenWorkServerV2ErrorDetail" }); + +const baseErrorSchema = z.object({ + message: z.string(), + requestId: requestIdSchema, + details: z.array(errorDetailSchema).optional(), +}); + +export const invalidRequestErrorSchema = z.object({ + ok: z.literal(false), + error: baseErrorSchema.extend({ + code: z.literal("invalid_request"), + }), +}).meta({ ref: "OpenWorkServerV2InvalidRequestError" }); + +export const unauthorizedErrorSchema = z.object({ + ok: z.literal(false), + error: baseErrorSchema.extend({ + code: z.literal("unauthorized"), + }), +}).meta({ ref: "OpenWorkServerV2UnauthorizedError" }); + +export const forbiddenErrorSchema = z.object({ + ok: z.literal(false), + error: baseErrorSchema.extend({ + code: z.literal("forbidden"), + }), +}).meta({ ref: "OpenWorkServerV2ForbiddenError" }); + +export const notFoundErrorSchema = z.object({ + ok: z.literal(false), + error: baseErrorSchema.extend({ + code: z.literal("not_found"), + }), +}).meta({ ref: "OpenWorkServerV2NotFoundError" }); + +export const internalErrorSchema = z.object({ + ok: z.literal(false), + error: baseErrorSchema.extend({ + code: z.literal("internal_error"), + }), +}).meta({ ref: "OpenWorkServerV2InternalError" }); diff --git a/apps/server-v2/src/schemas/files.ts b/apps/server-v2/src/schemas/files.ts new file mode 100644 index 00000000..51854953 --- /dev/null +++ b/apps/server-v2/src/schemas/files.ts @@ -0,0 +1,191 @@ +import { z } from "zod"; +import { identifierSchema, successResponseSchema, workspaceIdParamsSchema } from "./common.js"; + +const fileSessionIdParamsSchema = workspaceIdParamsSchema.extend({ + fileSessionId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2FileSessionIdParams" }); + +const jsonRecordSchema = z.record(z.string(), z.unknown()); + +export const workspaceActivationDataSchema = z.object({ + activeWorkspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceActivationData" }); + +export const engineReloadDataSchema = z.object({ + reloadedAt: z.number().int().nonnegative(), +}).meta({ ref: "OpenWorkServerV2EngineReloadData" }); + +export const workspaceDeleteDataSchema = z.object({ + deleted: z.boolean(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceDeleteData" }); + +export const workspaceDisposeDataSchema = z.object({ + disposed: z.boolean(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceDisposeData" }); + +export const workspaceCreateLocalRequestSchema = z.object({ + folderPath: z.string().min(1), + name: z.string().min(1), + preset: z.string().min(1).optional(), +}).meta({ ref: "OpenWorkServerV2WorkspaceCreateLocalRequest" }); + +export const reloadEventSchema = z.object({ + id: identifierSchema, + reason: z.enum(["agents", "commands", "config", "mcp", "plugins", "skills"]), + seq: z.number().int().nonnegative(), + timestamp: z.number().int().nonnegative(), + trigger: z.object({ + action: z.enum(["added", "removed", "updated"]).optional(), + name: z.string().optional(), + path: z.string().optional(), + type: z.enum(["agent", "command", "config", "mcp", "plugin", "skill"]), + }).optional(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2ReloadEvent" }); + +export const reloadEventsDataSchema = z.object({ + cursor: z.number().int().nonnegative(), + items: z.array(reloadEventSchema), +}).meta({ ref: "OpenWorkServerV2ReloadEventsData" }); + +export const fileSessionCreateRequestSchema = z.object({ + ttlSeconds: z.number().positive().optional(), + write: z.boolean().optional(), +}).meta({ ref: "OpenWorkServerV2FileSessionCreateRequest" }); + +export const fileSessionDataSchema = z.object({ + canWrite: z.boolean(), + createdAt: z.number().int().nonnegative(), + expiresAt: z.number().int().nonnegative(), + id: identifierSchema, + ttlMs: z.number().int().nonnegative(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2FileSessionData" }); + +export const fileCatalogSnapshotSchema = z.object({ + cursor: z.number().int().nonnegative(), + generatedAt: z.number().int().nonnegative(), + items: z.array(z.object({ + kind: z.enum(["dir", "file"]), + mtimeMs: z.number(), + path: z.string(), + revision: z.string(), + size: z.number().int().nonnegative(), + })), + nextAfter: z.string().optional(), + sessionId: identifierSchema, + total: z.number().int().nonnegative(), + truncated: z.boolean(), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2FileCatalogSnapshot" }); + +export const fileBatchReadRequestSchema = z.object({ + paths: z.array(z.string()).min(1), +}).meta({ ref: "OpenWorkServerV2FileBatchReadRequest" }); + +export const fileBatchReadResponseSchema = successResponseSchema( + "OpenWorkServerV2FileBatchReadResponse", + z.object({ items: z.array(jsonRecordSchema) }), +); + +export const fileBatchWriteRequestSchema = z.object({ + writes: z.array(jsonRecordSchema).min(1), +}).meta({ ref: "OpenWorkServerV2FileBatchWriteRequest" }); + +export const fileOperationsRequestSchema = z.object({ + operations: z.array(jsonRecordSchema).min(1), +}).meta({ ref: "OpenWorkServerV2FileOperationsRequest" }); + +export const fileMutationResultSchema = successResponseSchema( + "OpenWorkServerV2FileMutationResult", + z.object({ + cursor: z.number().int().nonnegative(), + items: z.array(jsonRecordSchema), + }), +); + +export const simpleContentQuerySchema = z.object({ + path: z.string().min(1), +}).meta({ ref: "OpenWorkServerV2SimpleContentQuery" }); + +export const simpleContentWriteRequestSchema = z.object({ + baseUpdatedAt: z.number().nullable().optional(), + content: z.string(), + force: z.boolean().optional(), + path: z.string().min(1), +}).meta({ ref: "OpenWorkServerV2SimpleContentWriteRequest" }); + +export const simpleContentDataSchema = z.object({ + bytes: z.number().int().nonnegative(), + content: z.string(), + path: z.string(), + revision: z.string().optional(), + updatedAt: z.number(), +}).meta({ ref: "OpenWorkServerV2SimpleContentData" }); + +export const binaryItemSchema = z.object({ + id: z.string(), + name: z.string().optional(), + path: z.string(), + size: z.number().int().nonnegative(), + updatedAt: z.number(), +}).meta({ ref: "OpenWorkServerV2BinaryItem" }); + +export const binaryListResponseSchema = successResponseSchema( + "OpenWorkServerV2BinaryListResponse", + z.object({ items: z.array(binaryItemSchema) }), +); + +export const binaryUploadDataSchema = z.object({ + bytes: z.number().int().nonnegative(), + path: z.string(), +}).meta({ ref: "OpenWorkServerV2BinaryUploadData" }); + +export const workspaceActivationResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceActivationResponse", + workspaceActivationDataSchema, +); + +export const engineReloadResponseSchema = successResponseSchema( + "OpenWorkServerV2EngineReloadResponse", + engineReloadDataSchema, +); + +export const workspaceDeleteResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceDeleteResponse", + workspaceDeleteDataSchema, +); + +export const workspaceDisposeResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceDisposeResponse", + workspaceDisposeDataSchema, +); + +export const reloadEventsResponseSchema = successResponseSchema( + "OpenWorkServerV2ReloadEventsResponse", + reloadEventsDataSchema, +); + +export const fileSessionResponseSchema = successResponseSchema( + "OpenWorkServerV2FileSessionResponse", + fileSessionDataSchema, +); + +export const fileCatalogSnapshotResponseSchema = successResponseSchema( + "OpenWorkServerV2FileCatalogSnapshotResponse", + fileCatalogSnapshotSchema, +); + +export const simpleContentResponseSchema = successResponseSchema( + "OpenWorkServerV2SimpleContentResponse", + simpleContentDataSchema, +); + +export const binaryUploadResponseSchema = successResponseSchema( + "OpenWorkServerV2BinaryUploadResponse", + binaryUploadDataSchema, +); + +export { fileSessionIdParamsSchema }; diff --git a/apps/server-v2/src/schemas/managed.ts b/apps/server-v2/src/schemas/managed.ts new file mode 100644 index 00000000..68e92337 --- /dev/null +++ b/apps/server-v2/src/schemas/managed.ts @@ -0,0 +1,300 @@ +import { z } from "zod"; +import { identifierSchema, isoTimestampSchema, successResponseSchema, workspaceIdParamsSchema } from "./common.js"; + +const jsonObjectSchema = z.record(z.string(), z.unknown()); + +export const managedKindSchema = z.enum(["mcps", "plugins", "providerConfigs", "skills"]); + +export const managedItemSchema = z.object({ + auth: jsonObjectSchema.nullable(), + cloudItemId: z.string().nullable(), + config: jsonObjectSchema, + createdAt: isoTimestampSchema, + displayName: z.string(), + id: identifierSchema, + key: z.string().nullable(), + metadata: jsonObjectSchema.nullable(), + source: z.enum(["cloud_synced", "discovered", "imported", "openwork_managed"]), + updatedAt: isoTimestampSchema, + workspaceIds: z.array(identifierSchema), +}).meta({ ref: "OpenWorkServerV2ManagedItem" }); + +export const managedItemWriteSchema = z.object({ + auth: jsonObjectSchema.nullable().optional(), + cloudItemId: z.string().nullable().optional(), + config: jsonObjectSchema.optional(), + displayName: z.string(), + key: z.string().nullable().optional(), + metadata: jsonObjectSchema.nullable().optional(), + source: z.enum(["cloud_synced", "discovered", "imported", "openwork_managed"]).optional(), + workspaceIds: z.array(identifierSchema).optional(), +}).meta({ ref: "OpenWorkServerV2ManagedItemWrite" }); + +export const managedAssignmentWriteSchema = z.object({ + workspaceIds: z.array(identifierSchema), +}).meta({ ref: "OpenWorkServerV2ManagedAssignmentWrite" }); + +export const managedItemListResponseSchema = successResponseSchema( + "OpenWorkServerV2ManagedItemListResponse", + z.object({ items: z.array(managedItemSchema) }), +); +export const managedItemResponseSchema = successResponseSchema("OpenWorkServerV2ManagedItemResponse", managedItemSchema); +export const managedDeleteResponseSchema = successResponseSchema( + "OpenWorkServerV2ManagedDeleteResponse", + z.object({ deleted: z.boolean(), id: identifierSchema }), +); + +export const workspaceMcpItemSchema = z.object({ + config: jsonObjectSchema, + disabledByTools: z.boolean().optional(), + name: z.string(), + source: z.enum(["config.global", "config.project", "config.remote"]), +}).meta({ ref: "OpenWorkServerV2WorkspaceMcpItem" }); +export const workspaceMcpListResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceMcpListResponse", + z.object({ items: z.array(workspaceMcpItemSchema) }), +); +export const workspaceMcpWriteSchema = z.object({ + config: jsonObjectSchema, + name: z.string(), +}).meta({ ref: "OpenWorkServerV2WorkspaceMcpWrite" }); + +export const workspacePluginItemSchema = z.object({ + path: z.string().optional(), + scope: z.enum(["global", "project"]), + source: z.enum(["config", "dir.project", "dir.global"]), + spec: z.string(), +}).meta({ ref: "OpenWorkServerV2WorkspacePluginItem" }); +export const workspacePluginListResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspacePluginListResponse", + z.object({ items: z.array(workspacePluginItemSchema), loadOrder: z.array(z.string()) }), +); +export const workspacePluginWriteSchema = z.object({ spec: z.string() }).meta({ ref: "OpenWorkServerV2WorkspacePluginWrite" }); + +export const scheduledJobRunSchema = z.object({ + agent: z.string().optional(), + arguments: z.string().optional(), + attachUrl: z.string().optional(), + command: z.string().optional(), + continue: z.boolean().optional(), + files: z.array(z.string()).optional(), + model: z.string().optional(), + port: z.number().int().optional(), + prompt: z.string().optional(), + runFormat: z.string().optional(), + session: z.string().optional(), + share: z.boolean().optional(), + timeoutSeconds: z.number().int().optional(), + title: z.string().optional(), + variant: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2ScheduledJobRun" }); + +export const scheduledJobSchema = z.object({ + attachUrl: z.string().optional(), + createdAt: isoTimestampSchema, + invocation: z.object({ args: z.array(z.string()), command: z.string() }).optional(), + lastRunAt: isoTimestampSchema.optional(), + lastRunError: z.string().optional(), + lastRunExitCode: z.number().int().optional(), + lastRunSource: z.string().optional(), + lastRunStatus: z.string().optional(), + name: z.string(), + prompt: z.string().optional(), + run: scheduledJobRunSchema.optional(), + schedule: z.string(), + scopeId: z.string().optional(), + slug: z.string(), + source: z.string().optional(), + timeoutSeconds: z.number().int().optional(), + updatedAt: isoTimestampSchema.optional(), + workdir: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2ScheduledJob" }); + +export const scheduledJobListResponseSchema = successResponseSchema( + "OpenWorkServerV2ScheduledJobListResponse", + z.object({ items: z.array(scheduledJobSchema) }), +); + +export const scheduledJobDeleteResponseSchema = successResponseSchema( + "OpenWorkServerV2ScheduledJobDeleteResponse", + z.object({ job: scheduledJobSchema }), +); + +export const workspaceSkillItemSchema = z.object({ + description: z.string(), + name: z.string(), + path: z.string(), + scope: z.enum(["global", "project"]), + trigger: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2WorkspaceSkillItem" }); +export const workspaceSkillContentSchema = z.object({ + content: z.string(), + item: workspaceSkillItemSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceSkillContent" }); +export const workspaceSkillListResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceSkillListResponse", + z.object({ items: z.array(workspaceSkillItemSchema) }), +); +export const workspaceSkillResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceSkillResponse", workspaceSkillContentSchema); +export const workspaceSkillWriteSchema = z.object({ + content: z.string(), + description: z.string().optional(), + name: z.string(), + trigger: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2WorkspaceSkillWrite" }); +export const workspaceSkillDeleteResponseSchema = successResponseSchema( + "OpenWorkServerV2WorkspaceSkillDeleteResponse", + z.object({ path: z.string() }), +); + +export const hubRepoSchema = z.object({ + owner: z.string().optional(), + ref: z.string().optional(), + repo: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2HubRepo" }); +export const hubSkillItemSchema = z.object({ + description: z.string(), + name: z.string(), + source: z.object({ owner: z.string(), path: z.string(), ref: z.string(), repo: z.string() }), + trigger: z.string().optional(), +}).meta({ ref: "OpenWorkServerV2HubSkillItem" }); +export const hubSkillListResponseSchema = successResponseSchema( + "OpenWorkServerV2HubSkillListResponse", + z.object({ items: z.array(hubSkillItemSchema) }), +); +export const hubSkillInstallWriteSchema = z.object({ + overwrite: z.boolean().optional(), + repo: hubRepoSchema.optional(), +}).meta({ ref: "OpenWorkServerV2HubSkillInstallWrite" }); +export const hubSkillInstallResponseSchema = successResponseSchema( + "OpenWorkServerV2HubSkillInstallResponse", + z.object({ + action: z.enum(["added", "updated"]), + name: z.string(), + path: z.string(), + skipped: z.number().int().nonnegative(), + written: z.number().int().nonnegative(), + }), +); + +export const cloudSigninSchema = z.object({ + auth: jsonObjectSchema.nullable(), + cloudBaseUrl: z.string(), + createdAt: isoTimestampSchema, + id: identifierSchema, + lastValidatedAt: isoTimestampSchema.nullable(), + metadata: jsonObjectSchema.nullable(), + orgId: z.string().nullable(), + serverId: identifierSchema, + updatedAt: isoTimestampSchema, + userId: z.string().nullable(), +}).meta({ ref: "OpenWorkServerV2CloudSignin" }); +export const cloudSigninWriteSchema = z.object({ + auth: jsonObjectSchema.nullable().optional(), + cloudBaseUrl: z.string(), + metadata: jsonObjectSchema.nullable().optional(), + orgId: z.string().nullable().optional(), + userId: z.string().nullable().optional(), +}).meta({ ref: "OpenWorkServerV2CloudSigninWrite" }); +export const cloudSigninResponseSchema = successResponseSchema("OpenWorkServerV2CloudSigninResponse", cloudSigninSchema.nullable()); +export const cloudSigninValidationResponseSchema = successResponseSchema( + "OpenWorkServerV2CloudSigninValidationResponse", + z.object({ lastValidatedAt: isoTimestampSchema.nullable(), ok: z.boolean(), record: cloudSigninSchema }), +); + +export const workspaceShareSchema = z.object({ + accessKey: z.string().nullable(), + audit: jsonObjectSchema.nullable(), + createdAt: isoTimestampSchema, + id: identifierSchema, + lastUsedAt: isoTimestampSchema.nullable(), + revokedAt: isoTimestampSchema.nullable(), + status: z.enum(["active", "disabled", "revoked"]), + updatedAt: isoTimestampSchema, + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceShare" }); +export const workspaceShareResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceShareResponse", workspaceShareSchema.nullable()); + +export const workspaceExportWarningSchema = z.object({ + detail: z.string(), + id: z.string(), + label: z.string(), +}).meta({ ref: "OpenWorkServerV2WorkspaceExportWarning" }); +export const workspaceExportDataSchema = z.object({ + commands: z.array(z.object({ description: z.string().optional(), name: z.string(), template: z.string() })), + exportedAt: z.number().int().nonnegative(), + files: z.array(z.object({ content: z.string(), path: z.string() })).optional(), + openwork: jsonObjectSchema, + opencode: jsonObjectSchema, + skills: z.array(z.object({ content: z.string(), description: z.string().optional(), name: z.string(), trigger: z.string().optional() })), + workspaceId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceExportData" }); +export const workspaceExportResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceExportResponse", workspaceExportDataSchema); +export const workspaceImportWriteSchema = z.record(z.string(), z.unknown()).meta({ ref: "OpenWorkServerV2WorkspaceImportWrite" }); +export const workspaceImportResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceImportResponse", z.object({ ok: z.boolean() })); + +export const sharedBundlePublishWriteSchema = z.object({ + bundleType: z.string(), + name: z.string().optional(), + payload: z.unknown(), + timeoutMs: z.number().int().positive().optional(), +}).meta({ ref: "OpenWorkServerV2SharedBundlePublishWrite" }); +export const sharedBundleFetchWriteSchema = z.object({ + bundleUrl: z.string(), + timeoutMs: z.number().int().positive().optional(), +}).meta({ ref: "OpenWorkServerV2SharedBundleFetchWrite" }); +export const sharedBundlePublishResponseSchema = successResponseSchema( + "OpenWorkServerV2SharedBundlePublishResponse", + z.object({ url: z.string() }), +); +export const sharedBundleFetchResponseSchema = successResponseSchema( + "OpenWorkServerV2SharedBundleFetchResponse", + z.record(z.string(), z.unknown()), +); + +export const routerIdentityItemSchema = z.object({ + access: z.enum(["private", "public"]).optional(), + enabled: z.boolean(), + id: z.string(), + pairingRequired: z.boolean().optional(), + running: z.boolean(), +}).meta({ ref: "OpenWorkServerV2RouterIdentityItem" }); +export const routerHealthSnapshotSchema = z.object({ + config: z.object({ groupsEnabled: z.boolean() }), + channels: z.object({ slack: z.boolean(), telegram: z.boolean(), whatsapp: z.boolean() }), + ok: z.boolean(), + opencode: z.object({ healthy: z.boolean(), url: z.string(), version: z.string().optional() }), +}).meta({ ref: "OpenWorkServerV2RouterHealthSnapshot" }); +export const routerIdentityListResponseSchema = successResponseSchema( + "OpenWorkServerV2RouterIdentityListResponse", + z.object({ items: z.array(routerIdentityItemSchema), ok: z.boolean() }), +); +export const routerTelegramInfoResponseSchema = successResponseSchema( + "OpenWorkServerV2RouterTelegramInfoResponse", + z.object({ + bot: z.object({ id: z.number().int(), name: z.string().optional(), username: z.string().optional() }).nullable(), + configured: z.boolean(), + enabled: z.boolean(), + ok: z.boolean(), + }), +); +export const routerHealthResponseSchemaCompat = successResponseSchema("OpenWorkServerV2RouterHealthCompatResponse", routerHealthSnapshotSchema); +export const routerTelegramWriteSchema = z.object({ access: z.enum(["private", "public"]).optional(), enabled: z.boolean().optional(), id: z.string().optional(), token: z.string() }).meta({ ref: "OpenWorkServerV2RouterTelegramWrite" }); +export const routerSlackWriteSchema = z.object({ appToken: z.string(), botToken: z.string(), enabled: z.boolean().optional(), id: z.string().optional() }).meta({ ref: "OpenWorkServerV2RouterSlackWrite" }); +export const routerBindingWriteSchema = z.object({ channel: z.enum(["slack", "telegram"]), directory: z.string().optional(), identityId: z.string().optional(), peerId: z.string() }).meta({ ref: "OpenWorkServerV2RouterBindingWrite" }); +export const routerBindingListResponseSchema = successResponseSchema( + "OpenWorkServerV2RouterBindingListResponse", + z.object({ + items: z.array(z.object({ channel: z.string(), directory: z.string(), identityId: z.string(), peerId: z.string(), updatedAt: z.number().int().optional() })), + ok: z.boolean(), + }), +); +export const routerSendWriteSchema = z.object({ autoBind: z.boolean().optional(), channel: z.enum(["slack", "telegram"]), directory: z.string().optional(), identityId: z.string().optional(), peerId: z.string().optional(), text: z.string() }).meta({ ref: "OpenWorkServerV2RouterSendWrite" }); +export const routerMutationResponseSchema = successResponseSchema( + "OpenWorkServerV2RouterMutationResponse", + z.record(z.string(), z.unknown()), +); + +export const managedItemIdParamsSchema = z.object({ itemId: identifierSchema }).meta({ ref: "OpenWorkServerV2ManagedItemIdParams" }); +export const workspaceNamedItemParamsSchema = workspaceIdParamsSchema.extend({ name: z.string() }).meta({ ref: "OpenWorkServerV2WorkspaceNamedItemParams" }); +export const workspaceIdentityParamsSchema = workspaceIdParamsSchema.extend({ identityId: identifierSchema }).meta({ ref: "OpenWorkServerV2WorkspaceIdentityParams" }); diff --git a/apps/server-v2/src/schemas/registry.ts b/apps/server-v2/src/schemas/registry.ts new file mode 100644 index 00000000..22b769e3 --- /dev/null +++ b/apps/server-v2/src/schemas/registry.ts @@ -0,0 +1,261 @@ +import { z } from "zod"; +import { identifierSchema, isoTimestampSchema, successResponseSchema } from "./common.js"; + +const jsonObjectSchema = z.record(z.string(), z.unknown()); + +export const authSummarySchema = z.object({ + actorKind: z.enum(["anonymous", "client", "host"]), + configured: z.object({ + clientToken: z.boolean(), + hostToken: z.boolean(), + }), + headers: z.object({ + authorization: z.literal("Authorization"), + hostToken: z.literal("X-OpenWork-Host-Token"), + }), + required: z.boolean(), + scopes: z.object({ + hiddenWorkspaceReads: z.literal("host"), + serverInventory: z.literal("host"), + visibleRead: z.literal("client_or_host"), + }), +}).meta({ ref: "OpenWorkServerV2AuthSummary" }); + +export const serverInventoryItemSchema = z.object({ + auth: z.object({ + configured: z.boolean(), + scheme: z.enum(["bearer", "none"]), + }), + baseUrl: z.string().nullable(), + capabilities: jsonObjectSchema, + hostingKind: z.enum(["desktop", "self_hosted", "cloud"]), + id: identifierSchema, + isEnabled: z.boolean(), + isLocal: z.boolean(), + kind: z.enum(["local", "remote"]), + label: z.string(), + lastSeenAt: isoTimestampSchema.nullable(), + source: z.string(), + updatedAt: isoTimestampSchema, +}).meta({ ref: "OpenWorkServerV2ServerInventoryItem" }); + +export const registrySummarySchema = z.object({ + hiddenWorkspaceCount: z.number().int().nonnegative(), + localServerId: identifierSchema, + remoteServerCount: z.number().int().nonnegative(), + totalServers: z.number().int().nonnegative(), + visibleWorkspaceCount: z.number().int().nonnegative(), +}).meta({ ref: "OpenWorkServerV2RegistrySummary" }); + +export const capabilitiesDataSchema = z.object({ + auth: authSummarySchema, + bundles: z.object({ + fetch: z.literal(true), + publish: z.literal(true), + workspaceExport: z.literal(true), + workspaceImport: z.literal(true), + }), + cloud: z.object({ + persistence: z.literal(true), + validation: z.literal(true), + }), + config: z.object({ + projection: z.literal(true), + rawRead: z.literal(true), + rawWrite: z.literal(true), + read: z.literal(true), + write: z.literal(true), + }), + files: z.object({ + artifacts: z.literal(true), + contentRoutes: z.literal(true), + fileSessions: z.literal(true), + inbox: z.literal(true), + mutations: z.literal(true), + }), + managed: z.object({ + assignments: z.literal(true), + mcps: z.literal(true), + plugins: z.literal(true), + providerConfigs: z.literal(true), + skills: z.literal(true), + }), + reload: z.object({ + manualEngineReload: z.literal(true), + reconciliation: z.literal(true), + watch: z.literal(true), + workspaceEvents: z.literal(true), + }), + registry: z.object({ + backendResolution: z.literal(true), + remoteServerConnections: z.literal(true), + remoteWorkspaceSync: z.literal(true), + hiddenWorkspaceFiltering: z.literal(true), + serverInventory: z.literal(true), + workspaceDetail: z.literal(true), + workspaceList: z.literal(true), + }), + sessions: z.object({ + events: z.literal(true), + list: z.literal(true), + messages: z.literal(true), + mutations: z.literal(true), + promptAsync: z.literal(true), + revertHistory: z.literal(true), + }), + runtime: z.object({ + opencodeHealth: z.literal(true), + routerHealth: z.literal(true), + runtimeSummary: z.literal(true), + runtimeUpgrade: z.literal(true), + runtimeVersions: z.literal(true), + }), + router: z.object({ + bindings: z.literal(true), + identities: z.literal(true), + outboundSend: z.literal(true), + productRoutes: z.literal(true), + }), + shares: z.object({ + workspaceScoped: z.literal(true), + }), + workspaces: z.object({ + activate: z.literal(true), + createLocal: z.literal(true), + }), + transport: z.object({ + rootMounted: z.literal(true), + sdkPackage: z.literal("@openwork/server-sdk"), + v2: z.literal(true), + }), +}).meta({ ref: "OpenWorkServerV2CapabilitiesData" }); + +const workspaceBackendSchema = z.object({ + kind: z.enum(["local_opencode", "remote_openwork"]), + local: z.object({ + configDir: z.string().nullable(), + dataDir: z.string().nullable(), + opencodeProjectId: z.string().nullable(), + }).nullable(), + remote: z.object({ + directory: z.string().nullable(), + hostUrl: z.string().nullable(), + remoteType: z.enum(["openwork", "opencode"]), + remoteWorkspaceId: z.string().nullable(), + workspaceName: z.string().nullable(), + }).nullable(), + serverId: identifierSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceBackend" }); + +const workspaceRuntimeSummarySchema = z.object({ + backendKind: z.enum(["local_opencode", "remote_openwork"]), + health: jsonObjectSchema.nullable(), + lastError: jsonObjectSchema.nullable(), + lastSessionRefreshAt: isoTimestampSchema.nullable(), + lastSyncAt: isoTimestampSchema.nullable(), + updatedAt: isoTimestampSchema.nullable(), +}).meta({ ref: "OpenWorkServerV2WorkspaceRuntimeSummary" }); + +export const workspaceSummaryDataSchema = z.object({ + backend: workspaceBackendSchema, + createdAt: isoTimestampSchema, + displayName: z.string(), + hidden: z.boolean(), + id: identifierSchema, + kind: z.enum(["local", "remote", "control", "help"]), + preset: z.enum(["minimal", "remote", "starter"]), + runtime: workspaceRuntimeSummarySchema, + server: serverInventoryItemSchema, + slug: z.string(), + status: z.enum(["ready", "imported", "attention"]), + updatedAt: isoTimestampSchema, +}).meta({ ref: "OpenWorkServerV2WorkspaceSummaryData" }); + +export const workspaceDetailDataSchema = workspaceSummaryDataSchema.extend({ + notes: jsonObjectSchema.nullable(), +}).meta({ ref: "OpenWorkServerV2WorkspaceDetailData" }); + +export const workspaceListDataSchema = z.object({ + items: z.array(workspaceSummaryDataSchema), +}).meta({ ref: "OpenWorkServerV2WorkspaceListData" }); + +export const serverInventoryListDataSchema = z.object({ + items: z.array(serverInventoryItemSchema), +}).meta({ ref: "OpenWorkServerV2ServerInventoryListData" }); + +export const remoteServerConnectRequestSchema = z.object({ + baseUrl: z.string().min(1), + directory: z.string().nullable().optional(), + hostToken: z.string().nullable().optional(), + label: z.string().nullable().optional(), + token: z.string().nullable().optional(), + workspaceId: z.string().nullable().optional(), +}).meta({ ref: "OpenWorkServerV2RemoteServerConnectRequest" }); + +export const remoteServerSyncRequestSchema = z.object({ + directory: z.string().nullable().optional(), + workspaceId: z.string().nullable().optional(), +}).meta({ ref: "OpenWorkServerV2RemoteServerSyncRequest" }); + +export const remoteServerConnectDataSchema = z.object({ + selectedWorkspaceId: identifierSchema.nullable(), + server: serverInventoryItemSchema, + workspaces: z.array(workspaceSummaryDataSchema), +}).meta({ ref: "OpenWorkServerV2RemoteServerConnectData" }); + +export const systemStatusDataSchema = z.object({ + auth: authSummarySchema, + capabilities: capabilitiesDataSchema, + database: z.object({ + bootstrapMode: z.enum(["fresh", "existing"]), + configured: z.literal(true), + importWarnings: z.number().int().nonnegative(), + kind: z.literal("sqlite"), + migrations: z.object({ + appliedThisRun: z.array(z.string()), + currentVersion: z.string(), + totalApplied: z.number().int().nonnegative(), + }), + path: z.string(), + phaseOwner: z.literal(2), + status: z.enum(["ready", "warning"]), + summary: z.string(), + workingDirectory: z.string(), + }), + environment: z.string(), + registry: registrySummarySchema, + runtime: z.object({ + opencode: z.object({ + baseUrl: z.string().nullable(), + running: z.boolean(), + status: z.enum(["crashed", "disabled", "error", "restart_scheduled", "running", "starting", "stopped"]), + version: z.string().nullable(), + }), + router: z.object({ + baseUrl: z.string().nullable(), + running: z.boolean(), + status: z.enum(["crashed", "disabled", "error", "restart_scheduled", "running", "starting", "stopped"]), + version: z.string().nullable(), + }), + source: z.enum(["development", "release"]), + target: z.enum(["darwin-arm64", "darwin-x64", "linux-arm64", "linux-x64", "windows-arm64", "windows-x64"]), + }), + service: z.literal("openwork-server-v2"), + startedAt: isoTimestampSchema, + status: z.literal("ok"), + uptimeMs: z.number().int().nonnegative(), + version: z.string(), +}).meta({ ref: "OpenWorkServerV2SystemStatusData" }); + +export const capabilitiesResponseSchema = successResponseSchema("OpenWorkServerV2CapabilitiesResponse", capabilitiesDataSchema); +export const serverInventoryListResponseSchema = successResponseSchema( + "OpenWorkServerV2ServerInventoryListResponse", + serverInventoryListDataSchema, +); +export const remoteServerConnectResponseSchema = successResponseSchema( + "OpenWorkServerV2RemoteServerConnectResponse", + remoteServerConnectDataSchema, +); +export const systemStatusResponseSchema = successResponseSchema("OpenWorkServerV2SystemStatusResponse", systemStatusDataSchema); +export const workspaceDetailResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceDetailResponse", workspaceDetailDataSchema); +export const workspaceListResponseSchema = successResponseSchema("OpenWorkServerV2WorkspaceListResponse", workspaceListDataSchema); diff --git a/apps/server-v2/src/schemas/runtime.ts b/apps/server-v2/src/schemas/runtime.ts new file mode 100644 index 00000000..147ab88c --- /dev/null +++ b/apps/server-v2/src/schemas/runtime.ts @@ -0,0 +1,156 @@ +import { z } from "zod"; +import { isoTimestampSchema, successResponseSchema } from "./common.js"; + +const runtimeOutputSnapshotSchema = z.object({ + combined: z.array(z.object({ + at: isoTimestampSchema, + stream: z.enum(["stdout", "stderr"]), + text: z.string(), + })), + stderr: z.array(z.string()), + stdout: z.array(z.string()), + totalLines: z.number().int().nonnegative(), + truncated: z.boolean(), +}).meta({ ref: "OpenWorkServerV2RuntimeOutputSnapshot" }); + +const runtimeTargetSchema = z.enum([ + "darwin-arm64", + "darwin-x64", + "linux-arm64", + "linux-x64", + "windows-arm64", + "windows-x64", +]).meta({ ref: "OpenWorkServerV2RuntimeTarget" }); + +const runtimeManifestSchema = z.object({ + files: z.object({ + opencode: z.object({ + path: z.string(), + sha256: z.string(), + size: z.number().int().nonnegative(), + }), + "opencode-router": z.object({ + path: z.string(), + sha256: z.string(), + size: z.number().int().nonnegative(), + }), + }), + generatedAt: isoTimestampSchema, + manifestVersion: z.literal(1), + opencodeVersion: z.string(), + rootDir: z.string(), + routerVersion: z.string(), + serverVersion: z.string(), + source: z.enum(["development", "release"]), + target: runtimeTargetSchema, +}).meta({ ref: "OpenWorkServerV2RuntimeManifest" }); + +const lastExitSchema = z.object({ + at: isoTimestampSchema, + code: z.number().int().nullable(), + output: runtimeOutputSnapshotSchema, + reason: z.string(), + signal: z.string().nullable(), +}).meta({ ref: "OpenWorkServerV2RuntimeLastExit" }); + +const routerEnablementSchema = z.object({ + enabled: z.boolean(), + enabledBindingCount: z.number().int().nonnegative(), + enabledIdentityCount: z.number().int().nonnegative(), + forced: z.boolean(), + reason: z.string(), +}).meta({ ref: "OpenWorkServerV2RouterEnablement" }); + +const routerMaterializationSchema = z.object({ + bindingCount: z.number().int().nonnegative(), + configPath: z.string(), + dataDir: z.string(), + dbPath: z.string(), + identityCount: z.number().int().nonnegative(), + logFile: z.string(), +}).meta({ ref: "OpenWorkServerV2RouterMaterialization" }); + +const runtimeChildStatusSchema = z.enum(["crashed", "disabled", "error", "restart_scheduled", "running", "starting", "stopped"]); + +const runtimeUpgradeStateSchema = z.object({ + error: z.string().nullable(), + finishedAt: isoTimestampSchema.nullable(), + startedAt: isoTimestampSchema.nullable(), + status: z.enum(["completed", "failed", "idle", "running"]), +}).meta({ ref: "OpenWorkServerV2RuntimeUpgradeState" }); + +export const opencodeHealthDataSchema = z.object({ + baseUrl: z.string().nullable(), + binaryPath: z.string().nullable(), + diagnostics: runtimeOutputSnapshotSchema, + lastError: z.string().nullable(), + lastExit: lastExitSchema.nullable(), + lastReadyAt: isoTimestampSchema.nullable(), + lastStartedAt: isoTimestampSchema.nullable(), + manifest: runtimeManifestSchema.nullable(), + pid: z.number().int().nullable(), + running: z.boolean(), + source: z.enum(["development", "release"]), + status: runtimeChildStatusSchema, + version: z.string().nullable(), +}).meta({ ref: "OpenWorkServerV2OpencodeHealthData" }); + +export const routerHealthDataSchema = z.object({ + baseUrl: z.string().nullable(), + binaryPath: z.string().nullable(), + diagnostics: runtimeOutputSnapshotSchema, + enablement: routerEnablementSchema, + healthUrl: z.string().nullable(), + lastError: z.string().nullable(), + lastExit: lastExitSchema.nullable(), + lastReadyAt: isoTimestampSchema.nullable(), + lastStartedAt: isoTimestampSchema.nullable(), + manifest: runtimeManifestSchema.nullable(), + materialization: routerMaterializationSchema.nullable(), + pid: z.number().int().nullable(), + running: z.boolean(), + source: z.enum(["development", "release"]), + status: runtimeChildStatusSchema, + version: z.string().nullable(), +}).meta({ ref: "OpenWorkServerV2RouterHealthData" }); + +export const runtimeSummaryDataSchema = z.object({ + bootstrapPolicy: z.enum(["disabled", "eager", "manual"]), + manifest: runtimeManifestSchema.nullable(), + opencode: opencodeHealthDataSchema, + restartPolicy: z.object({ + backoffMs: z.number().int().nonnegative(), + maxAttempts: z.number().int().nonnegative(), + windowMs: z.number().int().nonnegative(), + }), + router: routerHealthDataSchema, + upgrade: runtimeUpgradeStateSchema, + source: z.enum(["development", "release"]), + target: runtimeTargetSchema, +}).meta({ ref: "OpenWorkServerV2RuntimeSummaryData" }); + +export const runtimeUpgradeDataSchema = z.object({ + state: runtimeUpgradeStateSchema, + summary: runtimeSummaryDataSchema, +}).meta({ ref: "OpenWorkServerV2RuntimeUpgradeData" }); + +export const runtimeVersionsDataSchema = z.object({ + active: z.object({ + opencodeVersion: z.string().nullable(), + routerVersion: z.string().nullable(), + serverVersion: z.string(), + }), + manifest: runtimeManifestSchema.nullable(), + pinned: z.object({ + opencodeVersion: z.string().nullable(), + routerVersion: z.string().nullable(), + serverVersion: z.string(), + }), + target: runtimeTargetSchema, +}).meta({ ref: "OpenWorkServerV2RuntimeVersionsData" }); + +export const opencodeHealthResponseSchema = successResponseSchema("OpenWorkServerV2OpencodeHealthResponse", opencodeHealthDataSchema); +export const routerHealthResponseSchema = successResponseSchema("OpenWorkServerV2RouterHealthResponse", routerHealthDataSchema); +export const runtimeSummaryResponseSchema = successResponseSchema("OpenWorkServerV2RuntimeSummaryResponse", runtimeSummaryDataSchema); +export const runtimeVersionsResponseSchema = successResponseSchema("OpenWorkServerV2RuntimeVersionsResponse", runtimeVersionsDataSchema); +export const runtimeUpgradeResponseSchema = successResponseSchema("OpenWorkServerV2RuntimeUpgradeResponse", runtimeUpgradeDataSchema); diff --git a/apps/server-v2/src/schemas/sessions.ts b/apps/server-v2/src/schemas/sessions.ts new file mode 100644 index 00000000..9f8bfa29 --- /dev/null +++ b/apps/server-v2/src/schemas/sessions.ts @@ -0,0 +1,249 @@ +import { z } from "zod"; +import { identifierSchema, successResponseSchema, workspaceIdParamsSchema } from "./common.js"; + +const jsonRecordSchema = z.record(z.string(), z.unknown()); + +export const sessionStatusSchema = z.discriminatedUnion("type", [ + z.object({ type: z.literal("idle") }), + z.object({ type: z.literal("busy") }), + z.object({ + type: z.literal("retry"), + attempt: z.number(), + message: z.string(), + next: z.number(), + }), +]).meta({ ref: "OpenWorkServerV2SessionStatus" }); + +const sessionTimeSchema = z.object({ + archived: z.number().optional(), + completed: z.number().optional(), + created: z.number().optional(), + updated: z.number().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionTime" }); + +const sessionSummarySchema = z.object({ + additions: z.number().optional(), + deletions: z.number().optional(), + files: z.number().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionSummary" }); + +export const sessionSchema = z.object({ + directory: z.string().nullish(), + id: identifierSchema, + parentID: z.string().nullish(), + revert: z.object({ + messageID: identifierSchema, + }).partial().nullish(), + slug: z.string().nullish(), + summary: sessionSummarySchema.optional(), + time: sessionTimeSchema.optional(), + title: z.string().nullish(), +}).passthrough().meta({ ref: "OpenWorkServerV2Session" }); + +const sessionMessageInfoSchema = z.object({ + id: identifierSchema, + parentID: z.string().nullish(), + role: z.string(), + sessionID: identifierSchema, + time: sessionTimeSchema.optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionMessageInfo" }); + +export const sessionMessagePartSchema = z.object({ + id: identifierSchema, + messageID: identifierSchema, + sessionID: identifierSchema, + type: z.string().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionMessagePart" }); + +export const sessionMessageSchema = z.object({ + info: sessionMessageInfoSchema, + parts: z.array(sessionMessagePartSchema), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionMessage" }); + +export const sessionTodoSchema = z.object({ + content: z.string(), + priority: z.string(), + status: z.string(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionTodo" }); + +export const sessionSnapshotSchema = z.object({ + messages: z.array(sessionMessageSchema), + session: sessionSchema, + status: sessionStatusSchema, + todos: z.array(sessionTodoSchema), +}).meta({ ref: "OpenWorkServerV2SessionSnapshot" }); + +export const workspaceEventSchema = z.object({ + properties: z.unknown().optional(), + type: z.string(), +}).meta({ ref: "OpenWorkServerV2WorkspaceEvent" }); + +export const sessionListQuerySchema = z.object({ + limit: z.coerce.number().int().positive().max(500).optional(), + roots: z.coerce.boolean().optional(), + search: z.string().trim().min(1).optional(), + start: z.coerce.number().int().nonnegative().optional(), +}).meta({ ref: "OpenWorkServerV2SessionListQuery" }); + +export const sessionMessagesQuerySchema = z.object({ + limit: z.coerce.number().int().positive().max(500).optional(), +}).meta({ ref: "OpenWorkServerV2SessionMessagesQuery" }); + +export const sessionIdParamsSchema = workspaceIdParamsSchema.extend({ + sessionId: identifierSchema.describe("Stable session identifier within the resolved workspace backend."), +}).meta({ ref: "OpenWorkServerV2SessionIdParams" }); + +export const messageIdParamsSchema = sessionIdParamsSchema.extend({ + messageId: identifierSchema.describe("Stable message identifier within the resolved session."), +}).meta({ ref: "OpenWorkServerV2MessageIdParams" }); + +export const messagePartParamsSchema = messageIdParamsSchema.extend({ + partId: identifierSchema.describe("Stable message part identifier within the resolved message."), +}).meta({ ref: "OpenWorkServerV2MessagePartParams" }); + +export const sessionCreateRequestSchema = z.object({ + parentSessionId: identifierSchema.optional(), + title: z.string().trim().min(1).max(300).optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionCreateRequest" }); + +export const sessionUpdateRequestSchema = z.object({ + archived: z.boolean().optional(), + title: z.string().trim().min(1).max(300).optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionUpdateRequest" }); + +export const sessionForkRequestSchema = z.object({ + title: z.string().trim().min(1).max(300).optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionForkRequest" }); + +export const sessionSummarizeRequestSchema = z.object({ + modelID: z.string().trim().min(1).optional(), + providerID: z.string().trim().min(1).optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2SessionSummarizeRequest" }); + +export const messageSendRequestSchema = z.object({ + parts: z.array(z.unknown()).optional(), + role: z.string().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2MessageSendRequest" }); + +export const promptAsyncRequestSchema = z.object({ + agent: z.string().optional(), + messageID: identifierSchema.optional(), + model: z.object({ + modelID: z.string(), + providerID: z.string(), + }).optional(), + noReply: z.boolean().optional(), + parts: z.array(z.unknown()).optional(), + reasoning_effort: z.string().optional(), + system: z.string().optional(), + tools: z.record(z.string(), z.boolean()).optional(), + variant: z.string().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2PromptAsyncRequest" }); + +export const commandRequestSchema = z.object({ + agent: z.string().optional(), + arguments: z.string().optional(), + command: z.string().min(1), + messageID: identifierSchema.optional(), + model: z.string().optional(), + parts: z.array(z.unknown()).optional(), + reasoning_effort: z.string().optional(), + variant: z.string().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2CommandRequest" }); + +export const shellRequestSchema = z.object({ + command: z.string().min(1), +}).passthrough().meta({ ref: "OpenWorkServerV2ShellRequest" }); + +export const revertRequestSchema = z.object({ + messageID: identifierSchema, +}).meta({ ref: "OpenWorkServerV2RevertRequest" }); + +export const messagePartUpdateRequestSchema = z.object({ + text: z.string().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2MessagePartUpdateRequest" }); + +export const sessionListDataSchema = z.object({ + items: z.array(sessionSchema), +}).meta({ ref: "OpenWorkServerV2SessionListData" }); + +export const sessionStatusesDataSchema = z.object({ + items: z.record(z.string(), sessionStatusSchema), +}).meta({ ref: "OpenWorkServerV2SessionStatusesData" }); + +export const sessionTodoListDataSchema = z.object({ + items: z.array(sessionTodoSchema), +}).meta({ ref: "OpenWorkServerV2SessionTodoListData" }); + +export const messageListDataSchema = z.object({ + items: z.array(sessionMessageSchema), +}).meta({ ref: "OpenWorkServerV2MessageListData" }); + +export const acceptedActionDataSchema = z.object({ + accepted: z.literal(true), +}).meta({ ref: "OpenWorkServerV2AcceptedActionData" }); + +export const deletedActionDataSchema = z.object({ + deleted: z.literal(true), +}).meta({ ref: "OpenWorkServerV2DeletedActionData" }); + +export const sessionResponseSchema = successResponseSchema("OpenWorkServerV2SessionResponse", sessionSchema); +export const sessionListResponseSchema = successResponseSchema("OpenWorkServerV2SessionListResponse", sessionListDataSchema); +export const sessionStatusesResponseSchema = successResponseSchema( + "OpenWorkServerV2SessionStatusesResponse", + sessionStatusesDataSchema, +); +export const sessionStatusResponseSchema = successResponseSchema("OpenWorkServerV2SessionStatusResponse", sessionStatusSchema); +export const sessionTodoListResponseSchema = successResponseSchema( + "OpenWorkServerV2SessionTodoListResponse", + sessionTodoListDataSchema, +); +export const sessionSnapshotResponseSchema = successResponseSchema( + "OpenWorkServerV2SessionSnapshotResponse", + sessionSnapshotSchema, +); +export const messageResponseSchema = successResponseSchema("OpenWorkServerV2MessageResponse", sessionMessageSchema); +export const messageListResponseSchema = successResponseSchema("OpenWorkServerV2MessageListResponse", messageListDataSchema); +export const acceptedActionResponseSchema = successResponseSchema( + "OpenWorkServerV2AcceptedActionResponse", + acceptedActionDataSchema, +); +export const deletedActionResponseSchema = successResponseSchema( + "OpenWorkServerV2DeletedActionResponse", + deletedActionDataSchema, +); + +export type SessionRecord = z.infer; +export type SessionMessageRecord = z.infer; +export type SessionSnapshotRecord = z.infer; +export type SessionStatusRecord = z.infer; +export type SessionTodoRecord = z.infer; +export type WorkspaceEventRecord = z.infer; + +export function parseSessionData(value: unknown) { + return sessionSchema.parse(value); +} + +export function parseSessionListData(value: unknown) { + return z.array(sessionSchema).parse(value); +} + +export function parseSessionMessageData(value: unknown) { + return sessionMessageSchema.parse(value); +} + +export function parseSessionMessagesData(value: unknown) { + return z.array(sessionMessageSchema).parse(value); +} + +export function parseSessionStatusesData(value: unknown) { + return z.record(z.string(), sessionStatusSchema).parse(value); +} + +export function parseSessionTodosData(value: unknown) { + return z.array(sessionTodoSchema).parse(value); +} + +export function parseWorkspaceEventData(value: unknown) { + return workspaceEventSchema.parse(value); +} diff --git a/apps/server-v2/src/schemas/system.ts b/apps/server-v2/src/schemas/system.ts new file mode 100644 index 00000000..16620ee0 --- /dev/null +++ b/apps/server-v2/src/schemas/system.ts @@ -0,0 +1,133 @@ +import { z } from "zod"; +import { identifierSchema, isoTimestampSchema, successResponseSchema } from "./common.js"; +import { runtimeSummaryDataSchema } from "./runtime.js"; + +const jsonObjectSchema = z.record(z.string(), z.unknown()); + +export const routeNamespacesSchema = z.object({ + root: z.literal("/"), + openapi: z.literal("/openapi.json"), + system: z.literal("/system"), + workspaces: z.literal("/workspaces"), + workspaceResource: z.string().startsWith("/workspaces/"), +}).meta({ ref: "OpenWorkServerV2RouteNamespaces" }); + +export const contractMetadataSchema = z.object({ + source: z.literal("hono-openapi"), + openapiPath: z.literal("/openapi.json"), + sdkPackage: z.literal("@openwork/server-sdk"), +}).meta({ ref: "OpenWorkServerV2ContractMetadata" }); + +export const databaseStatusSchema = z.object({ + bootstrapMode: z.enum(["fresh", "existing"]), + configured: z.literal(true), + importWarnings: z.number().int().nonnegative(), + kind: z.literal("sqlite"), + migrations: z.object({ + appliedThisRun: z.array(z.string()), + currentVersion: z.string(), + totalApplied: z.number().int().nonnegative(), + }).meta({ ref: "OpenWorkServerV2MigrationStatus" }), + path: z.string(), + phaseOwner: z.literal(2), + status: z.enum(["ready", "warning"]), + summary: z.string(), + workingDirectory: z.string(), +}).meta({ ref: "OpenWorkServerV2DatabaseStatus" }); + +export const importSourceReportSchema = z.object({ + details: jsonObjectSchema, + sourcePath: z.string().nullable(), + status: z.enum(["error", "imported", "skipped", "unavailable"]), + warnings: z.array(z.string()), +}).meta({ ref: "OpenWorkServerV2ImportSourceReport" }); + +export const startupDiagnosticsSchema = z.object({ + completedAt: isoTimestampSchema, + importReports: z.object({ + cloudSignin: importSourceReportSchema, + desktopWorkspaceState: importSourceReportSchema, + orchestratorAuth: importSourceReportSchema, + orchestratorState: importSourceReportSchema, + }).meta({ ref: "OpenWorkServerV2ImportReports" }), + legacyWorkspaceImport: z.object({ + completedAt: isoTimestampSchema.nullable(), + skipped: z.boolean(), + }).meta({ ref: "OpenWorkServerV2LegacyWorkspaceImportState" }), + mode: z.enum(["fresh", "existing"]), + migrations: z.object({ + applied: z.array(z.string()), + currentVersion: z.string(), + totalApplied: z.number().int().nonnegative(), + }).meta({ ref: "OpenWorkServerV2StartupMigrationSummary" }), + registry: z.object({ + hiddenWorkspaceIds: z.array(identifierSchema), + localServerCreated: z.boolean(), + localServerId: identifierSchema, + totalServers: z.number().int().nonnegative(), + totalVisibleWorkspaces: z.number().int().nonnegative(), + }).meta({ ref: "OpenWorkServerV2StartupRegistrySummary" }), + warnings: z.array(z.string()), + workingDirectory: z.object({ + databasePath: z.string(), + rootDir: z.string(), + workspacesDir: z.string(), + }).meta({ ref: "OpenWorkServerV2WorkingDirectory" }), +}).meta({ ref: "OpenWorkServerV2StartupDiagnostics" }); + +export const rootInfoDataSchema = z.object({ + service: z.literal("openwork-server-v2"), + packageName: z.literal("openwork-server-v2"), + version: z.string(), + environment: z.string(), + routes: routeNamespacesSchema, + contract: contractMetadataSchema, +}).meta({ ref: "OpenWorkServerV2RootInfoData" }); + +export const healthDataSchema = z.object({ + service: z.literal("openwork-server-v2"), + status: z.literal("ok"), + startedAt: isoTimestampSchema, + uptimeMs: z.number().int().nonnegative(), + database: databaseStatusSchema, +}).meta({ ref: "OpenWorkServerV2HealthData" }); + +export const runtimeInfoSchema = z.object({ + environment: z.string(), + hostname: z.string(), + pid: z.number().int().nonnegative(), + platform: z.string(), + runtime: z.literal("bun"), + runtimeVersion: z.string().nullable(), +}).meta({ ref: "OpenWorkServerV2RuntimeInfo" }); + +export const metadataDataSchema = z.object({ + foundation: z.object({ + phase: z.literal(8), + middlewareOrder: z.array(identifierSchema).min(1), + routeNamespaces: routeNamespacesSchema, + database: databaseStatusSchema, + startup: startupDiagnosticsSchema, + }).meta({ ref: "OpenWorkServerV2FoundationInfo" }), + requestContext: z.object({ + actorKind: z.enum(["anonymous", "client", "host"]), + requestIdHeader: z.literal("X-Request-Id"), + }).meta({ ref: "OpenWorkServerV2RequestContextInfo" }), + runtime: runtimeInfoSchema, + runtimeSupervisor: runtimeSummaryDataSchema, + contract: contractMetadataSchema, +}).meta({ ref: "OpenWorkServerV2MetadataData" }); + +export const rootInfoResponseSchema = successResponseSchema("OpenWorkServerV2RootInfoResponse", rootInfoDataSchema); +export const healthResponseSchema = successResponseSchema("OpenWorkServerV2HealthResponse", healthDataSchema); +export const metadataResponseSchema = successResponseSchema("OpenWorkServerV2MetadataResponse", metadataDataSchema); + +export const openApiDocumentSchema = z.object({ + openapi: z.string(), + info: z.object({ + title: z.string(), + version: z.string(), + }).passthrough(), + paths: z.record(z.string(), z.unknown()), + components: z.object({}).passthrough().optional(), +}).passthrough().meta({ ref: "OpenWorkServerV2OpenApiDocument" }); diff --git a/apps/server-v2/src/services/auth-service.ts b/apps/server-v2/src/services/auth-service.ts new file mode 100644 index 00000000..53cbd136 --- /dev/null +++ b/apps/server-v2/src/services/auth-service.ts @@ -0,0 +1,124 @@ +import { HTTPException } from "hono/http-exception"; + +export type RequestActor = { + kind: "anonymous" | "client" | "host"; +}; + +export type AuthSummary = { + actorKind: RequestActor["kind"]; + configured: { + clientToken: boolean; + hostToken: boolean; + }; + headers: { + authorization: "Authorization"; + hostToken: "X-OpenWork-Host-Token"; + }; + required: boolean; + scopes: { + hiddenWorkspaceReads: "host"; + serverInventory: "host"; + visibleRead: "client_or_host"; + }; +}; + +function readBearer(headers: Headers) { + const raw = headers.get("authorization")?.trim() ?? ""; + const match = raw.match(/^Bearer\s+(.+)$/i); + return match?.[1]?.trim() ?? ""; +} + +function trimToken(value: string | undefined) { + const trimmed = value?.trim() ?? ""; + return trimmed || null; +} + +export type AuthService = ReturnType; + +export function createAuthService() { + const clientToken = trimToken( + process.env.OPENWORK_SERVER_V2_CLIENT_TOKEN + ?? process.env.OPENWORK_CLIENT_TOKEN + ?? process.env.OPENWORK_TOKEN, + ); + const hostToken = trimToken( + process.env.OPENWORK_SERVER_V2_HOST_TOKEN + ?? process.env.OPENWORK_HOST_TOKEN, + ); + const required = Boolean(clientToken || hostToken); + + function resolveActor(headers: Headers): RequestActor { + const hostHeader = headers.get("x-openwork-host-token")?.trim() ?? ""; + if (hostToken && hostHeader && hostHeader === hostToken) { + return { kind: "host" }; + } + + const bearer = readBearer(headers); + if (hostToken && bearer && bearer === hostToken) { + return { kind: "host" }; + } + + if (clientToken && bearer && bearer === clientToken) { + return { kind: "client" }; + } + + return { kind: "anonymous" }; + } + + function getSummary(actor: RequestActor): AuthSummary { + return { + actorKind: actor.kind, + configured: { + clientToken: Boolean(clientToken), + hostToken: Boolean(hostToken), + }, + headers: { + authorization: "Authorization", + hostToken: "X-OpenWork-Host-Token", + }, + required, + scopes: { + hiddenWorkspaceReads: "host", + serverInventory: "host", + visibleRead: "client_or_host", + }, + }; + } + + function requireVisibleRead(actor: RequestActor) { + if (!required) { + return; + } + + if (actor.kind === "anonymous") { + throw new HTTPException(401, { + message: "A client or host token is required for this route.", + }); + } + } + + function requireHost(actor: RequestActor) { + if (!required) { + return; + } + + if (actor.kind === "anonymous") { + throw new HTTPException(401, { + message: "A host token is required for this route.", + }); + } + + if (actor.kind !== "host") { + throw new HTTPException(403, { + message: "Host scope is required for this route.", + }); + } + } + + return { + getSummary, + requireHost, + requireVisibleRead, + resolveActor, + }; +} diff --git a/apps/server-v2/src/services/capabilities-service.ts b/apps/server-v2/src/services/capabilities-service.ts new file mode 100644 index 00000000..ae784b1a --- /dev/null +++ b/apps/server-v2/src/services/capabilities-service.ts @@ -0,0 +1,182 @@ +import type { RequestActor } from "./auth-service.js"; +import type { RuntimeService } from "./runtime-service.js"; +import type { AuthService } from "./auth-service.js"; + +export type CapabilitiesData = { + auth: ReturnType; + bundles: { + fetch: true; + publish: true; + workspaceExport: true; + workspaceImport: true; + }; + cloud: { + persistence: true; + validation: true; + }; + config: { + projection: true; + rawRead: true; + rawWrite: true; + read: true; + write: true; + }; + files: { + artifacts: true; + contentRoutes: true; + fileSessions: true; + inbox: true; + mutations: true; + }; + managed: { + assignments: true; + mcps: true; + plugins: true; + providerConfigs: true; + skills: true; + }; + reload: { + manualEngineReload: true; + reconciliation: true; + watch: true; + workspaceEvents: true; + }; + registry: { + backendResolution: true; + hiddenWorkspaceFiltering: true; + remoteServerConnections: true; + remoteWorkspaceSync: true; + serverInventory: true; + workspaceDetail: true; + workspaceList: true; + }; + sessions: { + events: true; + list: true; + messages: true; + mutations: true; + promptAsync: true; + revertHistory: true; + }; + runtime: { + opencodeHealth: true; + routerHealth: true; + runtimeSummary: true; + runtimeUpgrade: true; + runtimeVersions: true; + }; + router: { + bindings: true; + identities: true; + outboundSend: true; + productRoutes: true; + }; + shares: { + workspaceScoped: true; + }; + workspaces: { + activate: true; + createLocal: true; + }; + transport: { + rootMounted: true; + sdkPackage: "@openwork/server-sdk"; + v2: true; + }; +}; + +export type CapabilitiesService = ReturnType; + +export function createCapabilitiesService(input: { + auth: AuthService; + runtime: RuntimeService; +}) { + return { + getCapabilities(actor: RequestActor): CapabilitiesData { + const runtimeSummary = input.runtime.getRuntimeSummary(); + void runtimeSummary; + return { + auth: input.auth.getSummary(actor), + bundles: { + fetch: true, + publish: true, + workspaceExport: true, + workspaceImport: true, + }, + cloud: { + persistence: true, + validation: true, + }, + config: { + projection: true, + rawRead: true, + rawWrite: true, + read: true, + write: true, + }, + files: { + artifacts: true, + contentRoutes: true, + fileSessions: true, + inbox: true, + mutations: true, + }, + managed: { + assignments: true, + mcps: true, + plugins: true, + providerConfigs: true, + skills: true, + }, + reload: { + manualEngineReload: true, + reconciliation: true, + watch: true, + workspaceEvents: true, + }, + registry: { + backendResolution: true, + hiddenWorkspaceFiltering: true, + remoteServerConnections: true, + remoteWorkspaceSync: true, + serverInventory: true, + workspaceDetail: true, + workspaceList: true, + }, + sessions: { + events: true, + list: true, + messages: true, + mutations: true, + promptAsync: true, + revertHistory: true, + }, + runtime: { + opencodeHealth: true, + routerHealth: true, + runtimeSummary: true, + runtimeUpgrade: true, + runtimeVersions: true, + }, + router: { + bindings: true, + identities: true, + outboundSend: true, + productRoutes: true, + }, + shares: { + workspaceScoped: true, + }, + workspaces: { + activate: true, + createLocal: true, + }, + transport: { + rootMounted: true, + sdkPackage: "@openwork/server-sdk", + v2: true, + }, + }; + }, + }; +} diff --git a/apps/server-v2/src/services/config-materialization-service.ts b/apps/server-v2/src/services/config-materialization-service.ts new file mode 100644 index 00000000..124c0f4e --- /dev/null +++ b/apps/server-v2/src/services/config-materialization-service.ts @@ -0,0 +1,832 @@ +import fs from "node:fs"; +import path from "node:path"; +import { HTTPException } from "hono/http-exception"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { JsonObject, ManagedConfigRecord, WorkspaceRecord } from "../database/types.js"; +import type { ServerWorkingDirectory } from "../database/working-directory.js"; +import { ensureWorkspaceConfigDir } from "../database/working-directory.js"; +import { RouteError } from "../http.js"; +import { requestRemoteOpenwork, resolveRemoteWorkspaceTarget } from "../adapters/remote-openwork.js"; + +const MANAGED_SKILL_DOMAIN = "openwork-managed"; +const OPENWORK_CONFIG_VERSION = 1; + +type WorkspaceConfigSnapshot = { + effective: { + opencode: JsonObject; + openwork: JsonObject; + }; + materialized: { + compatibilityOpencodePath: string | null; + compatibilityOpenworkPath: string | null; + configDir: string | null; + configOpencodePath: string | null; + configOpenworkPath: string | null; + }; + stored: { + opencode: JsonObject; + openwork: JsonObject; + }; + updatedAt: string; + workspaceId: string; +}; + +function asObject(value: unknown): JsonObject { + return value && typeof value === "object" && !Array.isArray(value) ? { ...(value as JsonObject) } : {}; +} + +function normalizeStringArray(value: unknown) { + if (!Array.isArray(value)) { + return [] as string[]; + } + return Array.from(new Set(value.filter((item): item is string => typeof item === "string").map((item) => item.trim()).filter(Boolean))); +} + +function normalizeAuthorizedFolderPath(input: string | null | undefined) { + const trimmed = (input ?? "").trim(); + if (!trimmed) return ""; + return trimmed.replace(/[\\/]\*+$/, ""); +} + +function authorizedFolderToExternalDirectoryKey(folder: string) { + const normalized = normalizeAuthorizedFolderPath(folder); + if (!normalized) return ""; + return normalized === "/" ? "/*" : `${normalized}/*`; +} + +function externalDirectoryKeyToAuthorizedFolder(key: string, value: unknown) { + if (value !== "allow") return null; + const trimmed = key.trim(); + if (!trimmed) return null; + if (trimmed === "/*") return "/"; + if (!trimmed.endsWith("/*")) return null; + return normalizeAuthorizedFolderPath(trimmed.slice(0, -2)); +} + +function normalizeExternalDirectory(value: unknown) { + const folders = new Set(); + const hiddenEntries: JsonObject = {}; + + for (const folder of normalizeStringArray(value)) { + const normalized = normalizeAuthorizedFolderPath(folder); + if (normalized) { + folders.add(normalized); + } + } + + if (value && typeof value === "object" && !Array.isArray(value)) { + for (const [key, entryValue] of Object.entries(value as JsonObject)) { + const folder = externalDirectoryKeyToAuthorizedFolder(key, entryValue); + if (folder) { + folders.add(folder); + } else { + hiddenEntries[key] = entryValue; + } + } + } + + return { + folders: Array.from(folders), + hiddenEntries, + }; +} + +function buildExternalDirectory(folders: string[], hiddenEntries: JsonObject) { + const next: JsonObject = { ...hiddenEntries }; + for (const folder of folders) { + const key = authorizedFolderToExternalDirectoryKey(folder); + if (!key) continue; + next[key] = "allow"; + } + return Object.keys(next).length ? next : undefined; +} + +function withoutWorkspaceRoot(folders: string[], workspace: WorkspaceRecord) { + const workspaceRoot = normalizeAuthorizedFolderPath(workspace.dataDir); + if (!workspaceRoot) { + return folders; + } + return folders.filter((folder) => normalizeAuthorizedFolderPath(folder) !== workspaceRoot); +} + +function canonicalizeWorkspaceConfigState(workspace: WorkspaceRecord, config: { openwork: JsonObject; opencode: JsonObject }) { + const nextOpenwork = asObject(config.openwork); + nextOpenwork.authorizedRoots = withoutWorkspaceRoot(normalizeStringArray(nextOpenwork.authorizedRoots), workspace); + + const nextOpencode = asObject(config.opencode); + const permission = asObject(nextOpencode.permission); + const externalDirectory = normalizeExternalDirectory(permission.external_directory); + const nextExternalDirectory = buildExternalDirectory(withoutWorkspaceRoot(externalDirectory.folders, workspace), externalDirectory.hiddenEntries); + if (nextExternalDirectory) { + permission.external_directory = nextExternalDirectory; + } else { + delete permission.external_directory; + } + if (Object.keys(permission).length) { + nextOpencode.permission = permission; + } else { + delete nextOpencode.permission; + } + + return { + openwork: nextOpenwork, + opencode: nextOpencode, + }; +} + +function mergeObjects(base: JsonObject, patch: JsonObject): JsonObject { + const next: JsonObject = { ...base }; + for (const [key, value] of Object.entries(patch)) { + if ( + value && + typeof value === "object" && + !Array.isArray(value) && + base[key] && + typeof base[key] === "object" && + !Array.isArray(base[key]) + ) { + next[key] = mergeObjects(asObject(base[key]), asObject(value)); + continue; + } + next[key] = value; + } + return next; +} + +function writeJsonFile(filePath: string, value: unknown) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, `${JSON.stringify(value, null, 2)}\n`, "utf8"); +} + +function readJsonFile(filePath: string) { + if (!fs.existsSync(filePath)) { + return null; + } + try { + return JSON.parse(fs.readFileSync(filePath, "utf8")) as JsonObject; + } catch { + return null; + } +} + +function readJsoncFile(filePath: string) { + if (!fs.existsSync(filePath)) { + return null; + } + try { + return asObject(parseJsoncText(fs.readFileSync(filePath, "utf8"))); + } catch { + return null; + } +} + +function parseJsoncText(content: string) { + const withoutLineComments = content.replace(/^\s*\/\/.*$/gm, ""); + const withoutBlockComments = withoutLineComments.replace(/\/\*[\s\S]*?\*\//g, ""); + const withoutTrailingCommas = withoutBlockComments.replace(/,\s*([}\]])/g, "$1"); + return JSON.parse(withoutTrailingCommas); +} + +function normalizePluginKey(spec: string) { + const trimmed = spec.trim(); + if (!trimmed) { + return ""; + } + if (trimmed.startsWith("@")) { + const atIndex = trimmed.indexOf("@", 1); + return atIndex > 0 ? trimmed.slice(0, atIndex) : trimmed; + } + const atIndex = trimmed.indexOf("@"); + return atIndex > 0 ? trimmed.slice(0, atIndex) : trimmed; +} + +function nowIso() { + return new Date().toISOString(); +} + +function parseManagedSkillMetadata(content: string, fallbackName: string) { + const frontmatter = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n?/); + const nameMatch = frontmatter?.[1]?.match(/^name:\s*(.+)$/m); + const descriptionMatch = frontmatter?.[1]?.match(/^description:\s*(.+)$/m); + const displayName = nameMatch?.[1]?.trim() || fallbackName; + const key = displayName.toLowerCase().replace(/[^a-z0-9-]+/g, "-").replace(/^-+|-+$/g, "") || fallbackName; + return { + description: descriptionMatch?.[1]?.trim() || displayName, + displayName, + key, + }; +} + +function readManagedSkillFiles(rootDir: string | null) { + if (!rootDir || !fs.existsSync(rootDir)) { + return [] as Array<{ content: string; key: string; path: string }>; + } + + const items: Array<{ content: string; key: string; path: string }> = []; + const visit = (directory: string, depth: number) => { + if (depth > 2) { + return; + } + for (const entry of fs.readdirSync(directory, { withFileTypes: true })) { + if (!entry.isDirectory()) { + continue; + } + const direct = path.join(directory, entry.name, "SKILL.md"); + if (fs.existsSync(direct) && fs.statSync(direct).isFile()) { + items.push({ content: fs.readFileSync(direct, "utf8"), key: entry.name, path: direct }); + continue; + } + visit(path.join(directory, entry.name), depth + 1); + } + }; + + visit(rootDir, 0); + return items; +} + +function extractRecognizedOpencodeSections(opencode: JsonObject) { + const base = { ...opencode }; + const plugins = Array.isArray(base.plugin) + ? base.plugin.filter((value): value is string => typeof value === "string" && Boolean(value.trim())).map((value) => value.trim()) + : typeof base.plugin === "string" && base.plugin.trim() + ? [base.plugin.trim()] + : []; + const mcps = asObject(base.mcp); + const providers = asObject((base as Record).provider); + delete base.plugin; + delete base.mcp; + delete (base as Record).provider; + + return { + base, + mcps: Object.entries(mcps).map(([key, value]) => ({ config: asObject(value), displayName: key, key })), + plugins: plugins.map((spec) => ({ + config: { spec }, + displayName: normalizePluginKey(spec) || spec, + key: normalizePluginKey(spec) || spec, + })), + providers: Object.entries(providers).map(([key, value]) => ({ config: asObject(value), displayName: key, key })), + }; +} + +function dedupeAssignments(items: string[]) { + return Array.from(new Set(items.filter(Boolean))); +} + +export type ConfigMaterializationService = ReturnType; +export type { WorkspaceConfigSnapshot }; + +export function createConfigMaterializationService(input: { + repositories: ServerRepositories; + serverId: string; + workingDirectory: ServerWorkingDirectory; +}) { + function getWorkspaceOrThrow(workspaceId: string) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return workspace; + } + + function getRemoteServerOrThrow(workspace: WorkspaceRecord) { + const server = input.repositories.servers.getById(workspace.serverId); + if (!server) { + throw new RouteError(502, "bad_gateway", `Workspace ${workspace.id} points at missing remote server ${workspace.serverId}.`); + } + return server; + } + + function ensureWorkspaceLocal(workspace: WorkspaceRecord) { + if (workspace.kind === "remote") { + throw new RouteError( + 501, + "not_implemented", + "Phase 7 local file/config ownership currently supports local, control, and help workspaces only. Remote config and file mutation stay on the direct remote path during migration.", + ); + } + } + + function workspaceOpencodeConfigPath(workspace: WorkspaceRecord) { + const configDir = workspace.configDir?.trim(); + if (!configDir) { + throw new RouteError(500, "internal_error", `Workspace ${workspace.id} is missing its config directory.`); + } + return path.join(configDir, "opencode.jsonc"); + } + + function workspaceOpenworkConfigPath(workspace: WorkspaceRecord) { + const configDir = workspace.configDir?.trim(); + if (!configDir) { + throw new RouteError(500, "internal_error", `Workspace ${workspace.id} is missing its config directory.`); + } + return path.join(configDir, ".opencode", "openwork.json"); + } + + function compatibilityOpencodeConfigPath(workspace: WorkspaceRecord) { + const dataDir = workspace.dataDir?.trim(); + return dataDir ? path.join(dataDir, "opencode.jsonc") : null; + } + + function compatibilityOpenworkConfigPath(workspace: WorkspaceRecord) { + const dataDir = workspace.dataDir?.trim(); + return dataDir ? path.join(dataDir, ".opencode", "openwork.json") : null; + } + + function workspaceSkillRoots(workspace: WorkspaceRecord) { + const configDir = workspace.configDir?.trim(); + const dataDir = workspace.dataDir?.trim(); + return { + compatibility: dataDir ? path.join(dataDir, ".opencode", "skills", MANAGED_SKILL_DOMAIN) : null, + managedConfig: configDir ? path.join(configDir, ".opencode", "skills", MANAGED_SKILL_DOMAIN) : null, + sourceConfig: configDir ? path.join(configDir, ".opencode", "skills") : null, + sourceData: dataDir ? path.join(dataDir, ".opencode", "skills") : null, + }; + } + + function derivePreset(workspace: WorkspaceRecord) { + const notes = asObject(workspace.notes); + const legacyDesktop = asObject(notes.legacyDesktop); + const preset = typeof legacyDesktop.preset === "string" ? legacyDesktop.preset.trim() : ""; + if (preset) { + return preset; + } + return workspace.kind === "local" ? "starter" : "remote"; + } + + function buildDefaultOpenwork(workspace: WorkspaceRecord) { + return { + authorizedRoots: [], + blueprint: null, + reload: null, + version: OPENWORK_CONFIG_VERSION, + workspace: { + configDir: workspace.configDir, + createdAt: Date.parse(workspace.createdAt) || Date.now(), + dataDir: workspace.dataDir, + name: workspace.displayName, + preset: derivePreset(workspace), + }, + } satisfies JsonObject; + } + + function buildDefaultOpencode() { + return { + $schema: "https://opencode.ai/config.json", + } satisfies JsonObject; + } + + function ensureServerConfigState() { + const existing = input.repositories.serverConfigState.getByServerId(input.serverId); + if (existing) { + return existing; + } + return input.repositories.serverConfigState.upsert({ + opencode: buildDefaultOpencode(), + serverId: input.serverId, + }); + } + + function readLegacyWorkspaceState(workspace: WorkspaceRecord) { + const openwork = + readJsonFile(workspaceOpenworkConfigPath(workspace)) + ?? (compatibilityOpenworkConfigPath(workspace) ? readJsonFile(compatibilityOpenworkConfigPath(workspace)!) : null) + ?? buildDefaultOpenwork(workspace); + const opencode = + readJsoncFile(workspaceOpencodeConfigPath(workspace)) + ?? (compatibilityOpencodeConfigPath(workspace) ? readJsoncFile(compatibilityOpencodeConfigPath(workspace)!) : null) + ?? buildDefaultOpencode(); + return { + openwork: asObject(openwork), + opencode: asObject(opencode), + }; + } + + function ensureWorkspaceConfigState(workspace: WorkspaceRecord) { + ensureWorkspaceLocal(workspace); + ensureWorkspaceConfigDir(input.workingDirectory, workspace.id); + const existing = input.repositories.workspaceConfigState.getByWorkspaceId(workspace.id); + if (existing) { + return existing; + } + const legacy = readLegacyWorkspaceState(workspace); + const canonical = canonicalizeWorkspaceConfigState(workspace, legacy); + return input.repositories.workspaceConfigState.upsert({ + openwork: canonical.openwork, + opencode: canonical.opencode, + workspaceId: workspace.id, + }); + } + + function upsertManagedRecords( + workspaceId: string, + kind: "mcps" | "plugins" | "providerConfigs", + items: Array<{ config: JsonObject; displayName: string; key: string }>, + ) { + if (kind === "mcps") { + const ids = items.map((item) => input.repositories.mcps.upsert({ + auth: null, + cloudItemId: null, + config: item.config, + displayName: item.displayName, + id: `mcp_${workspaceId}_${item.key}`, + key: item.key, + metadata: { absorbed: true, workspaceId }, + source: "imported", + }).id); + input.repositories.workspaceMcps.replaceAssignments(workspaceId, dedupeAssignments(ids)); + return; + } + + if (kind === "plugins") { + const ids = items.map((item) => input.repositories.plugins.upsert({ + auth: null, + cloudItemId: null, + config: item.config, + displayName: item.displayName, + id: `plugin_${workspaceId}_${item.key}`, + key: item.key, + metadata: { absorbed: true, workspaceId }, + source: "imported", + }).id); + input.repositories.workspacePlugins.replaceAssignments(workspaceId, dedupeAssignments(ids)); + return; + } + + const ids = items.map((item) => input.repositories.providerConfigs.upsert({ + auth: null, + cloudItemId: null, + config: item.config, + displayName: item.displayName, + id: `provider_${workspaceId}_${item.key}`, + key: item.key, + metadata: { absorbed: true, workspaceId }, + source: "imported", + }).id); + input.repositories.workspaceProviderConfigs.replaceAssignments(workspaceId, dedupeAssignments(ids)); + } + + function absorbManagedSkills(workspace: WorkspaceRecord) { + const items = [ + ...readManagedSkillFiles(workspaceSkillRoots(workspace).sourceConfig), + ...readManagedSkillFiles(workspaceSkillRoots(workspace).sourceData), + ]; + const seen = new Set(); + const ids: string[] = []; + for (const item of items) { + const meta = parseManagedSkillMetadata(item.content, item.key); + if (!meta.key || seen.has(meta.key)) { + continue; + } + seen.add(meta.key); + ids.push(input.repositories.skills.upsert({ + auth: null, + cloudItemId: null, + config: { content: item.content }, + displayName: meta.displayName, + id: `skill_${workspace.id}_${meta.key}`, + key: meta.key, + metadata: { + absorbed: true, + description: meta.description, + originPath: item.path, + workspaceId: workspace.id, + }, + source: "imported", + }).id); + } + input.repositories.workspaceSkills.replaceAssignments(workspace.id, dedupeAssignments(ids)); + } + + function absorbWorkspaceConfigState(workspace: WorkspaceRecord) { + ensureWorkspaceConfigState(workspace); + const legacy = readLegacyWorkspaceState(workspace); + const recognized = extractRecognizedOpencodeSections(legacy.opencode); + upsertManagedRecords(workspace.id, "mcps", recognized.mcps); + upsertManagedRecords(workspace.id, "plugins", recognized.plugins); + upsertManagedRecords(workspace.id, "providerConfigs", recognized.providers); + absorbManagedSkills(workspace); + const canonical = canonicalizeWorkspaceConfigState(workspace, { + openwork: mergeObjects(buildDefaultOpenwork(workspace), legacy.openwork), + opencode: mergeObjects(buildDefaultOpencode(), recognized.base), + }); + return input.repositories.workspaceConfigState.upsert({ + openwork: canonical.openwork, + opencode: canonical.opencode, + workspaceId: workspace.id, + }); + } + + function listAssignedRecords( + workspaceId: string, + assignmentTable: "workspaceMcps" | "workspacePlugins" | "workspaceProviderConfigs", + repo: "mcps" | "plugins" | "providerConfigs", + ) { + return input.repositories[assignmentTable] + .listForWorkspace(workspaceId) + .map((assignment) => input.repositories[repo].getById(assignment.itemId)) + .filter(Boolean) as ManagedConfigRecord[]; + } + + function listAssignedSkills(workspaceId: string) { + return input.repositories.workspaceSkills + .listForWorkspace(workspaceId) + .map((assignment) => input.repositories.skills.getById(assignment.itemId)) + .filter(Boolean) as ManagedConfigRecord[]; + } + + function computeSnapshot(workspace: WorkspaceRecord): WorkspaceConfigSnapshot { + const workspaceState = ensureWorkspaceConfigState(workspace); + const serverState = ensureServerConfigState(); + const canonicalState = canonicalizeWorkspaceConfigState(workspace, { + openwork: workspaceState.openwork, + opencode: workspaceState.opencode, + }); + const storedOpenwork = mergeObjects(buildDefaultOpenwork(workspace), canonicalState.openwork); + const storedOpencode = mergeObjects(buildDefaultOpencode(), canonicalState.opencode); + const effectiveOpenwork = mergeObjects(buildDefaultOpenwork(workspace), storedOpenwork); + const effectiveOpencode = mergeObjects(asObject(serverState.opencode), storedOpencode); + + const mcps = listAssignedRecords(workspace.id, "workspaceMcps", "mcps"); + if (mcps.length) { + effectiveOpencode.mcp = Object.fromEntries(mcps.map((item) => [item.key ?? item.displayName, item.config])); + } + + const plugins = listAssignedRecords(workspace.id, "workspacePlugins", "plugins"); + if (plugins.length) { + effectiveOpencode.plugin = plugins.map((item) => { + const config = asObject(item.config); + return typeof config.spec === "string" && config.spec.trim() ? config.spec.trim() : item.key ?? item.displayName; + }).filter(Boolean); + } + + const providers = listAssignedRecords(workspace.id, "workspaceProviderConfigs", "providerConfigs"); + if (providers.length) { + (effectiveOpencode as Record).provider = Object.fromEntries( + providers.map((item) => [item.key ?? item.displayName, item.config]), + ); + } + + const permission = asObject(effectiveOpencode.permission); + const externalDirectory = normalizeExternalDirectory(permission.external_directory); + const authorizedRoots = withoutWorkspaceRoot(normalizeStringArray([ + ...normalizeStringArray(effectiveOpenwork.authorizedRoots), + ...externalDirectory.folders, + ]), workspace); + const nextExternalDirectory = buildExternalDirectory(authorizedRoots, externalDirectory.hiddenEntries); + if (nextExternalDirectory) { + permission.external_directory = nextExternalDirectory; + } else { + delete permission.external_directory; + } + effectiveOpencode.permission = permission; + effectiveOpenwork.authorizedRoots = authorizedRoots; + + return { + effective: { + opencode: effectiveOpencode, + openwork: effectiveOpenwork, + }, + materialized: { + compatibilityOpencodePath: compatibilityOpencodeConfigPath(workspace), + compatibilityOpenworkPath: compatibilityOpenworkConfigPath(workspace), + configDir: workspace.configDir, + configOpencodePath: workspaceOpencodeConfigPath(workspace), + configOpenworkPath: workspaceOpenworkConfigPath(workspace), + }, + stored: { + opencode: storedOpencode, + openwork: storedOpenwork, + }, + updatedAt: workspaceState.updatedAt, + workspaceId: workspace.id, + }; + } + + function materializeSkills(workspace: WorkspaceRecord) { + const skills = listAssignedSkills(workspace.id); + const roots = workspaceSkillRoots(workspace); + if (roots.managedConfig) { + fs.rmSync(roots.managedConfig, { force: true, recursive: true }); + fs.mkdirSync(roots.managedConfig, { recursive: true }); + } + if (roots.compatibility) { + fs.rmSync(roots.compatibility, { force: true, recursive: true }); + fs.mkdirSync(roots.compatibility, { recursive: true }); + } + for (const skill of skills) { + const content = typeof asObject(skill.config).content === "string" ? String(asObject(skill.config).content) : ""; + if (!content) { + continue; + } + const skillKey = skill.key?.trim() || skill.id; + if (roots.managedConfig) { + const destination = path.join(roots.managedConfig, skillKey, "SKILL.md"); + fs.mkdirSync(path.dirname(destination), { recursive: true }); + fs.writeFileSync(destination, content.endsWith("\n") ? content : `${content}\n`, "utf8"); + } + if (roots.compatibility) { + const meta = asObject(skill.metadata); + const originPath = typeof meta.originPath === "string" ? meta.originPath : ""; + if (originPath && workspace.dataDir && originPath.startsWith(workspace.dataDir)) { + continue; + } + const destination = path.join(roots.compatibility, skillKey, "SKILL.md"); + fs.mkdirSync(path.dirname(destination), { recursive: true }); + fs.writeFileSync(destination, content.endsWith("\n") ? content : `${content}\n`, "utf8"); + } + } + } + + function materializeWorkspaceSnapshot(workspaceId: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + ensureWorkspaceLocal(workspace); + const snapshot = computeSnapshot(workspace); + writeJsonFile(snapshot.materialized.configOpencodePath!, snapshot.effective.opencode); + writeJsonFile(snapshot.materialized.configOpenworkPath!, snapshot.effective.openwork); + if (snapshot.materialized.compatibilityOpencodePath) { + writeJsonFile(snapshot.materialized.compatibilityOpencodePath, snapshot.effective.opencode); + } + if (snapshot.materialized.compatibilityOpenworkPath) { + writeJsonFile(snapshot.materialized.compatibilityOpenworkPath, snapshot.effective.openwork); + } + materializeSkills(workspace); + return snapshot; + } + + function readRawProjectOpencodeConfig(workspaceId: string) { + const snapshot = materializeWorkspaceSnapshot(workspaceId); + return { + content: `${JSON.stringify(snapshot.effective.opencode, null, 2)}\n`, + exists: true, + path: snapshot.materialized.configOpencodePath, + updatedAt: snapshot.updatedAt, + }; + } + + function readRawGlobalOpencodeConfig() { + const state = ensureServerConfigState(); + const opencode = mergeObjects(buildDefaultOpencode(), state.opencode); + const filePath = path.join(input.workingDirectory.managedDir, "opencode.global.jsonc"); + writeJsonFile(filePath, opencode); + return { + content: `${JSON.stringify(opencode, null, 2)}\n`, + exists: true, + path: filePath, + updatedAt: state.updatedAt, + }; + } + + return { + absorbWorkspaceConfig(workspaceId: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + ensureWorkspaceLocal(workspace); + absorbWorkspaceConfigState(workspace); + return materializeWorkspaceSnapshot(workspaceId); + }, + + ensureWorkspaceConfig(workspaceId: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + ensureWorkspaceLocal(workspace); + ensureWorkspaceConfigState(workspace); + return materializeWorkspaceSnapshot(workspaceId); + }, + + async getWorkspaceConfigSnapshot(workspaceId: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + if (workspace.kind === "remote") { + const server = getRemoteServerOrThrow(workspace); + const target = resolveRemoteWorkspaceTarget(server, workspace); + return requestRemoteOpenwork({ + path: `/workspaces/${encodeURIComponent(target.remoteWorkspaceId)}/config`, + server, + timeoutMs: 10_000, + }); + } + ensureWorkspaceLocal(workspace); + return computeSnapshot(workspace); + }, + + listWatchRoots(workspaceId: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + ensureWorkspaceLocal(workspace); + return [ + workspace.configDir, + workspace.dataDir, + workspace.dataDir ? path.join(workspace.dataDir, ".opencode") : null, + ].filter((value): value is string => Boolean(value)); + }, + + async patchWorkspaceConfig(workspaceId: string, patch: { openwork?: JsonObject; opencode?: JsonObject }) { + const workspace = getWorkspaceOrThrow(workspaceId); + if (workspace.kind === "remote") { + const server = getRemoteServerOrThrow(workspace); + const target = resolveRemoteWorkspaceTarget(server, workspace); + return requestRemoteOpenwork({ + body: patch, + method: "PATCH", + path: `/workspaces/${encodeURIComponent(target.remoteWorkspaceId)}/config`, + server, + timeoutMs: 15_000, + }); + } + ensureWorkspaceLocal(workspace); + const current = ensureWorkspaceConfigState(workspace); + const nextOpenwork = patch.openwork ? mergeObjects(current.openwork, asObject(patch.openwork)) : current.openwork; + let nextOpencode = current.opencode; + if (patch.opencode) { + const merged = mergeObjects(current.opencode, asObject(patch.opencode)); + const recognized = extractRecognizedOpencodeSections(merged); + upsertManagedRecords(workspace.id, "mcps", recognized.mcps); + upsertManagedRecords(workspace.id, "plugins", recognized.plugins); + upsertManagedRecords(workspace.id, "providerConfigs", recognized.providers); + nextOpencode = recognized.base; + } + const canonical = canonicalizeWorkspaceConfigState(workspace, { + openwork: nextOpenwork, + opencode: nextOpencode, + }); + input.repositories.workspaceConfigState.upsert({ + openwork: canonical.openwork, + opencode: canonical.opencode, + workspaceId: workspace.id, + }); + return materializeWorkspaceSnapshot(workspaceId); + }, + + async readRawOpencodeConfig(workspaceId: string, scope: "global" | "project") { + const workspace = getWorkspaceOrThrow(workspaceId); + if (workspace.kind === "remote") { + const server = getRemoteServerOrThrow(workspace); + const target = resolveRemoteWorkspaceTarget(server, workspace); + const query = `?scope=${encodeURIComponent(scope)}`; + return requestRemoteOpenwork<{ content: string; exists: boolean; path: string | null; updatedAt: string }>({ + path: `/workspaces/${encodeURIComponent(target.remoteWorkspaceId)}/config/opencode-raw${query}`, + server, + timeoutMs: 10_000, + }); + } + return scope === "global" ? readRawGlobalOpencodeConfig() : readRawProjectOpencodeConfig(workspaceId); + }, + + reconcileAllWorkspaces() { + const workspaces = input.repositories.workspaces.list({ includeHidden: true }).filter((workspace) => workspace.kind !== "remote"); + for (const workspace of workspaces) { + absorbWorkspaceConfigState(workspace); + materializeWorkspaceSnapshot(workspace.id); + } + return { + reconciledAt: nowIso(), + workspaceIds: workspaces.map((workspace) => workspace.id), + }; + }, + + writeGlobalOpencodeConfig(content: string) { + const parsed = asObject(parseJsoncText(content)); + const recognized = extractRecognizedOpencodeSections(parsed); + if (recognized.mcps.length || recognized.plugins.length || recognized.providers.length) { + throw new RouteError( + 400, + "invalid_request", + "Global raw OpenCode config writes cannot include workspace-managed MCP, plugin, or provider sections during Phase 7.", + ); + } + input.repositories.serverConfigState.upsert({ + opencode: recognized.base, + serverId: input.serverId, + }); + return readRawGlobalOpencodeConfig(); + }, + + async writeWorkspaceRawOpencodeConfig(workspaceId: string, content: string) { + const workspace = getWorkspaceOrThrow(workspaceId); + if (workspace.kind === "remote") { + const server = getRemoteServerOrThrow(workspace); + const target = resolveRemoteWorkspaceTarget(server, workspace); + return requestRemoteOpenwork<{ content: string; exists: boolean; path: string | null; updatedAt: string }>({ + body: { content, scope: "project" }, + method: "POST", + path: `/workspaces/${encodeURIComponent(target.remoteWorkspaceId)}/config/opencode-raw`, + server, + timeoutMs: 15_000, + }); + } + ensureWorkspaceLocal(workspace); + const parsed = asObject(parseJsoncText(content)); + const recognized = extractRecognizedOpencodeSections(parsed); + upsertManagedRecords(workspace.id, "mcps", recognized.mcps); + upsertManagedRecords(workspace.id, "plugins", recognized.plugins); + upsertManagedRecords(workspace.id, "providerConfigs", recognized.providers); + const canonical = canonicalizeWorkspaceConfigState(workspace, { + openwork: ensureWorkspaceConfigState(workspace).openwork, + opencode: recognized.base, + }); + input.repositories.workspaceConfigState.upsert({ + openwork: canonical.openwork, + opencode: canonical.opencode, + workspaceId: workspace.id, + }); + return readRawProjectOpencodeConfig(workspaceId); + }, + }; +} diff --git a/apps/server-v2/src/services/managed-resource-service.ts b/apps/server-v2/src/services/managed-resource-service.ts new file mode 100644 index 00000000..14c67575 --- /dev/null +++ b/apps/server-v2/src/services/managed-resource-service.ts @@ -0,0 +1,1442 @@ +import fs from "node:fs"; +import path from "node:path"; +import { createHash, randomBytes, randomUUID } from "node:crypto"; +import { HTTPException } from "hono/http-exception"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { CloudSigninRecord, JsonObject, ManagedConfigRecord, WorkspaceRecord, WorkspaceShareRecord } from "../database/types.js"; +import type { ServerWorkingDirectory } from "../database/working-directory.js"; +import type { ConfigMaterializationService } from "./config-materialization-service.js"; +import type { WorkspaceFileService } from "./workspace-file-service.js"; +import { RouteError } from "../http.js"; + +const DEFAULT_HUB_REPO = { + owner: "different-ai", + repo: "openwork-hub", + ref: "main", +} as const; + +const ALLOWED_BUNDLE_TYPES = new Set(["skill", "skills-set", "workspace-profile"]); +const ALLOWED_PORTABLE_PREFIXES = [".opencode/agents/", ".opencode/plugins/", ".opencode/tools/"]; +const RESERVED_PORTABLE_SEGMENTS = new Set([".DS_Store", "Thumbs.db", "node_modules"]); +const SKILL_NAME_REGEX = /^[a-z0-9]+(-[a-z0-9]+)*$/; +const COMMAND_NAME_REGEX = /^[A-Za-z0-9_-]+$/; +const MCP_NAME_REGEX = /^[A-Za-z0-9_-]+$/; + +type ManagedKind = "mcps" | "plugins" | "providerConfigs" | "skills"; +type ManagedSummary = ManagedConfigRecord & { workspaceIds: string[] }; +type WorkspaceExportSensitiveMode = "auto" | "include" | "exclude"; +type WorkspaceExportWarning = { detail: string; id: string; label: string }; +type HubRepo = { owner: string; repo: string; ref: string }; +type PortableFile = { content: string; path: string }; + +function nowIso() { + return new Date().toISOString(); +} + +function asObject(value: unknown): JsonObject { + return value && typeof value === "object" && !Array.isArray(value) ? { ...(value as JsonObject) } : {}; +} + +function cloneJson(value: T): T { + return JSON.parse(JSON.stringify(value)); +} + +function normalizeString(value: unknown) { + return typeof value === "string" ? value.trim() : ""; +} + +function normalizeUrl(value: unknown) { + const trimmed = normalizeString(value).replace(/\/+$/, ""); + if (!trimmed) { + return ""; + } + try { + const url = new URL(trimmed); + if (url.protocol !== "http:" && url.protocol !== "https:") { + return ""; + } + return url.toString().replace(/\/+$/, ""); + } catch { + return ""; + } +} + +function readRecord(value: unknown): Record | null { + return value && typeof value === "object" && !Array.isArray(value) ? { ...(value as Record) } : null; +} + +function parseFrontmatter(content: string) { + const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n?/); + if (!match) { + return { body: content, data: {} as Record }; + } + const raw = match[1] ?? ""; + const data: Record = {}; + for (const line of raw.split(/\r?\n/)) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith("#")) { + continue; + } + const separatorIndex = trimmed.indexOf(":"); + if (separatorIndex <= 0) { + continue; + } + const key = trimmed.slice(0, separatorIndex).trim(); + const value = trimmed.slice(separatorIndex + 1).trim(); + if (!key) { + continue; + } + if (value === "true") { + data[key] = true; + continue; + } + if (value === "false") { + data[key] = false; + continue; + } + if (/^-?\d+(?:\.\d+)?$/.test(value)) { + data[key] = Number(value); + continue; + } + data[key] = value.replace(/^['"]|['"]$/g, ""); + } + return { + body: content.slice(match[0].length), + data, + }; +} + +function buildFrontmatter(data: Record) { + const yaml = Object.entries(data) + .filter(([, value]) => value !== undefined && value !== null && value !== "") + .map(([key, value]) => `${key}: ${typeof value === "string" ? String(value).replace(/\n/g, " ") : String(value)}`) + .join("\n"); + return `---\n${yaml}\n---\n`; +} + +function validateSkillName(name: string) { + if (!name || name.length < 1 || name.length > 64 || !SKILL_NAME_REGEX.test(name)) { + throw new RouteError(400, "invalid_request", "Skill name must be kebab-case (1-64 chars)."); + } +} + +function validateCommandName(name: string) { + if (!name || !COMMAND_NAME_REGEX.test(name)) { + throw new RouteError(400, "invalid_request", "Command name must be alphanumeric with _ or -."); + } +} + +function validateMcpName(name: string) { + if (!name || name.startsWith("-") || !MCP_NAME_REGEX.test(name)) { + throw new RouteError(400, "invalid_request", "MCP name must be alphanumeric and not start with -."); + } +} + +function validateMcpConfig(config: Record) { + const type = config.type; + if (type !== "local" && type !== "remote") { + throw new RouteError(400, "invalid_request", "MCP config type must be local or remote."); + } + if (type === "local") { + const command = config.command; + if (!Array.isArray(command) || command.length === 0) { + throw new RouteError(400, "invalid_request", "Local MCP requires command array."); + } + } + if (type === "remote") { + const url = config.url; + if (!url || typeof url !== "string") { + throw new RouteError(400, "invalid_request", "Remote MCP requires url."); + } + } +} + +function normalizeManagedKey(value: string, fallback: string) { + const trimmed = value.trim().toLowerCase().replace(/[^a-z0-9_-]+/g, "-").replace(/^-+|-+$/g, ""); + return trimmed || fallback; +} + +function normalizePortablePath(input: unknown) { + const normalized = String(input ?? "") + .replaceAll("\\", "/") + .replace(/\/+/g, "/") + .replace(/^\.\//, "") + .replace(/^\/+/, "") + .trim(); + if (!normalized) { + throw new RouteError(400, "invalid_request", "Portable file path is required."); + } + if (normalized.includes("\0")) { + throw new RouteError(400, "invalid_request", `Portable file path contains an invalid byte: ${normalized}`); + } + const segments = normalized.split("/"); + if (segments.some((segment) => !segment || segment === "." || segment === "..")) { + throw new RouteError(400, "invalid_request", `Portable file path is invalid: ${normalized}`); + } + return normalized; +} + +function isAllowedPortableFilePath(input: unknown) { + const filePath = normalizePortablePath(input); + if (!ALLOWED_PORTABLE_PREFIXES.some((prefix) => filePath.startsWith(prefix))) { + return false; + } + if (filePath.split("/").some((segment) => /^\.env(?:\..+)?$/i.test(segment) || RESERVED_PORTABLE_SEGMENTS.has(segment))) { + return false; + } + return true; +} + +function resolveSafeChild(baseDir: string, child: string) { + const base = path.resolve(baseDir); + const target = path.resolve(baseDir, child); + if (target !== base && !target.startsWith(base + path.sep)) { + throw new RouteError(400, "invalid_request", "Invalid file path."); + } + return target; +} + +function listPortableFiles(workspaceRoot: string): PortableFile[] { + const root = path.resolve(workspaceRoot); + const portableRoot = path.join(root, ".opencode"); + if (!fs.existsSync(portableRoot)) { + return []; + } + + const output: PortableFile[] = []; + const walk = (currentPath: string) => { + for (const entry of fs.readdirSync(currentPath, { withFileTypes: true })) { + const absolutePath = path.join(currentPath, entry.name); + if (entry.isDirectory()) { + walk(absolutePath); + continue; + } + if (!entry.isFile()) { + continue; + } + const relativePath = normalizePortablePath(absolutePath.slice(root.length + 1)); + if (!isAllowedPortableFilePath(relativePath)) { + continue; + } + output.push({ content: fs.readFileSync(absolutePath, "utf8"), path: relativePath }); + } + }; + + walk(portableRoot); + output.sort((left, right) => left.path.localeCompare(right.path)); + return output; +} + +function writePortableFiles(workspaceRoot: string, files: unknown, options?: { replace?: boolean }) { + if (!Array.isArray(files) || files.length === 0) { + return [] as PortableFile[]; + } + const root = path.resolve(workspaceRoot); + const planned = files.map((entry) => { + if (!entry || typeof entry !== "object" || Array.isArray(entry)) { + throw new RouteError(400, "invalid_request", "Portable files must be objects with path and content."); + } + const record = entry as Record; + const filePath = normalizePortablePath(record.path); + if (!isAllowedPortableFilePath(filePath)) { + throw new RouteError(400, "invalid_request", `Portable file path is not allowed: ${filePath}`); + } + return { + absolutePath: resolveSafeChild(root, filePath), + content: typeof record.content === "string" ? record.content : String(record.content ?? ""), + path: filePath, + }; + }); + + if (options?.replace) { + for (const existing of listPortableFiles(workspaceRoot)) { + fs.rmSync(path.join(root, existing.path), { force: true }); + } + } + + for (const file of planned) { + fs.mkdirSync(path.dirname(file.absolutePath), { recursive: true }); + fs.writeFileSync(file.absolutePath, file.content, "utf8"); + } + + return planned; +} + +function sanitizePortableOpencodeConfig(opencode: Record | null | undefined) { + const source = opencode && typeof opencode === "object" && !Array.isArray(opencode) ? opencode : {}; + const next: Record = {}; + for (const key of ["agent", "command", "instructions", "mcp", "permission", "plugin", "share", "tools", "watcher"] as const) { + if (key in source) { + next[key] = cloneJson(source[key]); + } + } + return next; +} + +function sanitizeOpenworkTemplateConfig(openwork: Record | null | undefined) { + const next = cloneJson(openwork ?? {}); + const blueprint = readRecord(next.blueprint); + if (!blueprint) { + return next; + } + const materialized = readRecord(blueprint.materialized); + if (materialized) { + delete materialized.sessions; + if (Object.keys(materialized).length === 0) { + delete blueprint.materialized; + } else { + blueprint.materialized = materialized; + } + } + next.blueprint = blueprint; + return next; +} + +function hasWordPair(tokens: string[], left: string, right: string) { + return tokens.includes(left) && tokens.includes(right); +} + +function splitNameIntoTokens(value: string) { + return value + .replace(/([a-z0-9])([A-Z])/g, "$1 $2") + .split(/[^A-Za-z0-9]+/) + .map((token) => token.trim().toLowerCase()) + .filter(Boolean); +} + +function detectSensitiveStringSignals(value: string) { + const trimmed = value.trim(); + if (!trimmed) { + return [] as string[]; + } + const matches = new Set(); + for (const pattern of [ + { id: "Bearer", test: /\bBearer\s+[A-Za-z0-9._~+\/-]+=*/ }, + { id: "token", test: /\b(?:ghp|gho|github_pat|xox[baprs]|sk|rk|AKIA|ASIA|AIza)[-_A-Za-z0-9]{8,}\b/ }, + { id: "JWT", test: /\beyJ[A-Za-z0-9_-]+\.[A-Za-z0-9._-]+\.[A-Za-z0-9._-]+\b/ }, + { id: "apiKey", test: /\bapi[_-]?key\b/i }, + { id: "token", test: /\b(?:access[_-]?token|refresh[_-]?token|auth[_-]?token|token)\b/i }, + { id: "secret", test: /\b(?:client[_-]?secret|secret)\b/i }, + { id: "password", test: /\b(?:password|passwd)\b/i }, + { id: "credentials", test: /\bcredentials?\b/i }, + { id: "privateKey", test: /\bprivate[_-]?key\b/i }, + ]) { + if (pattern.test.test(trimmed)) { + matches.add(pattern.id); + } + } + if (/https?:\/\//i.test(trimmed) && trimmed.length > 32) { + matches.add("long URL"); + } + return Array.from(matches); +} + +function collectSignals(value: unknown, keyHint?: string): string[] { + const matches = new Set(); + if (keyHint) { + const tokens = splitNameIntoTokens(keyHint); + const normalized = tokens.join(""); + for (const pattern of [ + { id: "apiKey", test: () => normalized.includes("apikey") || hasWordPair(tokens, "api", "key") }, + { id: "token", test: () => tokens.includes("token") || normalized.includes("authtoken") }, + { id: "secret", test: () => tokens.includes("secret") || hasWordPair(tokens, "client", "secret") }, + { id: "password", test: () => tokens.includes("password") }, + { id: "credentials", test: () => tokens.includes("credential") || tokens.includes("credentials") }, + { id: "privateKey", test: () => hasWordPair(tokens, "private", "key") }, + ]) { + if (pattern.test()) { + matches.add(pattern.id); + } + } + } + + if (typeof value === "string") { + for (const match of detectSensitiveStringSignals(value)) { + matches.add(match); + } + return Array.from(matches); + } + if (Array.isArray(value)) { + for (const item of value) { + for (const match of collectSignals(item)) { + matches.add(match); + } + } + return Array.from(matches); + } + if (value && typeof value === "object") { + for (const [childKey, childValue] of Object.entries(value as Record)) { + for (const match of collectSignals(childValue, childKey)) { + matches.add(match); + } + } + } + return Array.from(matches); +} + +function describeSignals(intro: string, signals: string[]) { + const unique = Array.from(new Set(signals)); + if (!unique.length) { + return `${intro}.`; + } + return `${intro}: ${unique.slice(0, 4).join(", ")}${unique.length > 4 ? ", ..." : ""}.`; +} + +function sanitizeValue(value: unknown): unknown { + if (typeof value === "string") { + return detectSensitiveStringSignals(value).length ? undefined : value; + } + if (Array.isArray(value)) { + return value.map((item) => sanitizeValue(item)).filter((item) => item !== undefined); + } + if (value && typeof value === "object") { + const next: Record = {}; + for (const [key, child] of Object.entries(value as Record)) { + const directSignals = collectSignals(child, key); + if (directSignals.length) { + continue; + } + const sanitized = sanitizeValue(child); + if (sanitized === undefined) { + continue; + } + if (Array.isArray(sanitized) && sanitized.length === 0) { + continue; + } + if (sanitized && typeof sanitized === "object" && !Array.isArray(sanitized) && Object.keys(sanitized as Record).length === 0) { + continue; + } + next[key] = sanitized; + } + return next; + } + return value; +} + +function collectWorkspaceExportWarnings(input: { files: PortableFile[]; opencode: Record | null | undefined }) { + const warnings = new Map(); + const opencode = input.opencode ?? {}; + for (const [sectionKey, sectionValue] of Object.entries(opencode)) { + const signals = collectSignals(sectionValue); + if (!signals.length) { + continue; + } + const label = sectionKey.charAt(0).toUpperCase() + sectionKey.slice(1); + warnings.set(sectionKey, { + detail: describeSignals(`Contains secret-like ${sectionKey} config`, signals), + id: `${sectionKey}-config`, + label, + }); + } + for (const file of input.files) { + if (!file.path.startsWith(".opencode/plugins/") && !file.path.startsWith(".opencode/tools/")) { + continue; + } + const signals = collectSignals(file.content); + if (!signals.length) { + continue; + } + warnings.set(`portable-file:${file.path}`, { + detail: describeSignals("Contains secret-like file content", signals), + id: `portable-file:${file.path}`, + label: file.path, + }); + } + return Array.from(warnings.values()); +} + +function stripSensitiveWorkspaceExportData(input: { files: PortableFile[]; opencode: Record | null | undefined }) { + const opencode = cloneJson(input.opencode ?? {}); + for (const [sectionKey, sectionValue] of Object.entries(opencode)) { + const sanitized = sanitizeValue(sectionValue); + if (sanitized === undefined) { + delete opencode[sectionKey]; + continue; + } + if (Array.isArray(sanitized) && sanitized.length === 0) { + delete opencode[sectionKey]; + continue; + } + if (sanitized && typeof sanitized === "object" && !Array.isArray(sanitized) && Object.keys(sanitized as Record).length === 0) { + delete opencode[sectionKey]; + continue; + } + opencode[sectionKey] = sanitized; + } + const files = input.files.filter((file) => collectSignals(file.content).length === 0).map((file) => ({ ...file })); + return { files, opencode }; +} + +function normalizeBundleFetchUrl(bundleUrl: unknown) { + let inputUrl: URL; + try { + inputUrl = new URL(String(bundleUrl ?? "").trim()); + } catch { + throw new RouteError(400, "invalid_request", "Invalid shared bundle URL."); + } + if (inputUrl.protocol !== "https:" && inputUrl.protocol !== "http:") { + throw new RouteError(400, "invalid_request", "Shared bundle URL must use http(s)."); + } + const trustedBaseUrl = new URL(resolvePublisherBaseUrl().replace(/\/+$/, "")); + if (inputUrl.origin !== trustedBaseUrl.origin) { + throw new RouteError(400, "invalid_request", `Shared bundle URLs must use the configured OpenWork publisher (${trustedBaseUrl.origin}).`); + } + const segments = inputUrl.pathname.split("/").filter(Boolean); + if (segments[0] !== "b" || !segments[1]) { + throw new RouteError(400, "invalid_request", "Shared bundle URL must point to a bundle id."); + } + trustedBaseUrl.pathname = `/b/${segments[1]}/data`; + trustedBaseUrl.search = ""; + return trustedBaseUrl; +} + +function readErrorMessage(text: string) { + const trimmed = text.trim(); + if (!trimmed) { + return ""; + } + try { + const json = JSON.parse(trimmed) as Record; + return typeof json.message === "string" ? json.message.trim() : trimmed; + } catch { + return trimmed; + } +} + +function resolvePublisherBaseUrl() { + return String(process.env.OPENWORK_PUBLISHER_BASE_URL ?? "").trim() || "https://share.openworklabs.com"; +} + +function resolvePublisherOrigin() { + return String(process.env.OPENWORK_PUBLISHER_REQUEST_ORIGIN ?? "").trim() || "https://app.openwork.software"; +} + +function fetchTelegramBotInfo(token: string) { + const trimmed = token.trim(); + if (!trimmed) { + return Promise.resolve(null as { id: number; name?: string; username?: string } | null); + } + return fetch(`https://api.telegram.org/bot${trimmed}/getMe`, { + headers: { Accept: "application/json" }, + }).then(async (response) => { + if (!response.ok) { + return null; + } + const json = await response.json().catch(() => null) as Record | null; + const result = readRecord(json?.result); + if (!result) { + return null; + } + const id = Number(result.id); + return Number.isFinite(id) + ? { + id, + name: typeof result.first_name === "string" ? result.first_name : undefined, + username: typeof result.username === "string" ? result.username : undefined, + } + : null; + }).catch(() => null); +} + +function createPairingCode() { + return randomBytes(4).toString("hex").toUpperCase(); +} + +function pairingCodeHash(value: string) { + return createHash("sha256").update(value).digest("hex"); +} + +function extractTriggerFromBody(body: string) { + const lines = body.split(/\r?\n/); + let inWhenSection = false; + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed) { + continue; + } + if (/^#{1,6}\s+/.test(trimmed)) { + const heading = trimmed.replace(/^#{1,6}\s+/, "").trim(); + inWhenSection = /^when to use$/i.test(heading); + continue; + } + if (!inWhenSection) { + continue; + } + const cleaned = trimmed.replace(/^[-*+]\s+/, "").replace(/^\d+[.)]\s+/, "").trim(); + if (cleaned) { + return cleaned; + } + } + return ""; +} + +export type ManagedResourceService = ReturnType; + +export function createManagedResourceService(input: { + config: ConfigMaterializationService; + files: WorkspaceFileService; + repositories: ServerRepositories; + serverId: string; + workingDirectory: ServerWorkingDirectory; +}) { + const kindConfig = { + mcps: { + assignmentRepo: input.repositories.workspaceMcps, + itemRepo: input.repositories.mcps, + reloadReason: "mcp" as const, + triggerType: "mcp" as const, + }, + plugins: { + assignmentRepo: input.repositories.workspacePlugins, + itemRepo: input.repositories.plugins, + reloadReason: "plugins" as const, + triggerType: "plugin" as const, + }, + providerConfigs: { + assignmentRepo: input.repositories.workspaceProviderConfigs, + itemRepo: input.repositories.providerConfigs, + reloadReason: "config" as const, + triggerType: "config" as const, + }, + skills: { + assignmentRepo: input.repositories.workspaceSkills, + itemRepo: input.repositories.skills, + reloadReason: "skills" as const, + triggerType: "skill" as const, + }, + }; + + function getWorkspaceOrThrow(workspaceId: string) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return workspace; + } + + function ensureWorkspaceMutable(workspace: WorkspaceRecord) { + if (workspace.kind === "remote") { + throw new RouteError( + 501, + "not_implemented", + "Phase 8 managed-resource mutation currently supports local, control, and help workspaces only. Remote managed-resource mutation stays on the compatibility path until remote credentials and projection ownership fully migrate.", + ); + } + if (!workspace.dataDir?.trim()) { + throw new RouteError(400, "invalid_request", `Workspace ${workspace.id} does not have a local data directory.`); + } + return workspace; + } + + function workspaceSkillPath(workspace: WorkspaceRecord, key: string) { + const baseDir = workspace.configDir?.trim() || workspace.dataDir?.trim() || ""; + return path.join(baseDir, ".opencode", "skills", "openwork-managed", key, "SKILL.md"); + } + + function workspaceCommandDir(workspace: WorkspaceRecord) { + return path.join(workspace.dataDir!.trim(), ".opencode", "commands"); + } + + function listWorkspaceCommands(workspace: WorkspaceRecord) { + ensureWorkspaceMutable(workspace); + const directory = workspaceCommandDir(workspace); + if (!fs.existsSync(directory)) { + return [] as Array<{ description?: string; name: string; template: string }>; + } + const items: Array<{ description?: string; name: string; template: string }> = []; + for (const entry of fs.readdirSync(directory, { withFileTypes: true })) { + if (!entry.isFile() || !entry.name.endsWith(".md")) { + continue; + } + const filePath = path.join(directory, entry.name); + const content = fs.readFileSync(filePath, "utf8"); + const parsed = parseFrontmatter(content); + const name = typeof parsed.data.name === "string" ? parsed.data.name : entry.name.replace(/\.md$/, ""); + if (!COMMAND_NAME_REGEX.test(name)) { + continue; + } + items.push({ + description: typeof parsed.data.description === "string" ? parsed.data.description : undefined, + name, + template: parsed.body.trim(), + }); + } + items.sort((left, right) => left.name.localeCompare(right.name)); + return items; + } + + function upsertWorkspaceCommand(workspace: WorkspaceRecord, payload: { description?: string; name: string; template: string }) { + ensureWorkspaceMutable(workspace); + validateCommandName(payload.name); + if (!payload.template.trim()) { + throw new RouteError(400, "invalid_request", "Command template is required."); + } + const directory = workspaceCommandDir(workspace); + fs.mkdirSync(directory, { recursive: true }); + const filePath = path.join(directory, `${payload.name}.md`); + const content = `${buildFrontmatter({ description: payload.description, name: payload.name })}\n${payload.template.trim()}\n`; + fs.writeFileSync(filePath, content, "utf8"); + return filePath; + } + + function clearWorkspaceCommands(workspace: WorkspaceRecord) { + ensureWorkspaceMutable(workspace); + fs.rmSync(workspaceCommandDir(workspace), { force: true, recursive: true }); + } + + function summaryForKind(kind: ManagedKind, item: ManagedConfigRecord): ManagedSummary { + return { + ...item, + workspaceIds: kindConfig[kind].assignmentRepo.listForItem(item.id).map((assignment) => assignment.workspaceId), + }; + } + + async function materializeAssignments(kind: ManagedKind, workspaceIds: string[], action: "added" | "removed" | "updated", name: string) { + for (const workspaceId of Array.from(new Set(workspaceIds.filter(Boolean)))) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace || workspace.kind === "remote") { + continue; + } + input.config.ensureWorkspaceConfig(workspaceId); + input.files.emitReloadEvent(workspaceId, kindConfig[kind].reloadReason, { + action, + name, + path: kind === "skills" ? workspaceSkillPath(workspace, normalizeManagedKey(name, workspaceId)) : undefined, + type: kindConfig[kind].triggerType, + }); + await input.files.recordWorkspaceAudit( + workspaceId, + `${kind}.${action}`, + workspace.dataDir ?? workspaceId, + `${action === "removed" ? "Removed" : action === "updated" ? "Updated" : "Added"} ${kind} item ${name} through Server V2.`, + ); + } + } + + function upsertManaged(kind: ManagedKind, payload: { + auth?: JsonObject | null; + cloudItemId?: string | null; + config?: JsonObject; + displayName: string; + id?: string; + key?: string | null; + metadata?: JsonObject | null; + source?: ManagedConfigRecord["source"]; + workspaceIds?: string[]; + }) { + const displayName = payload.displayName.trim(); + if (!displayName) { + throw new RouteError(400, "invalid_request", "displayName is required."); + } + const key = normalizeManagedKey(payload.key?.trim() || displayName, kind.slice(0, -1)); + const id = payload.id?.trim() || `${kind.slice(0, -1)}_${randomUUID()}`; + const workspaceIds = Array.from(new Set((payload.workspaceIds ?? []).map((value) => value.trim()).filter(Boolean))); + for (const workspaceId of workspaceIds) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + } + const item = kindConfig[kind].itemRepo.upsert({ + auth: payload.auth ?? null, + cloudItemId: payload.cloudItemId ?? null, + config: payload.config ?? {}, + displayName, + id, + key, + metadata: payload.metadata ?? null, + source: payload.source ?? "openwork_managed", + }); + if (payload.workspaceIds) { + const currentAssignments = kindConfig[kind].assignmentRepo.listForItem(item.id).map((assignment) => assignment.workspaceId); + for (const workspace of input.repositories.workspaces.list({ includeHidden: true })) { + if (workspace.kind === "remote") { + continue; + } + const nextAssigned = workspaceIds.includes(workspace.id); + const currentlyAssigned = currentAssignments.includes(workspace.id); + if (nextAssigned === currentlyAssigned) { + continue; + } + const currentForWorkspace = kindConfig[kind].assignmentRepo.listForWorkspace(workspace.id).map((assignment) => assignment.itemId); + const nextForWorkspace = nextAssigned + ? Array.from(new Set([...currentForWorkspace, item.id])) + : currentForWorkspace.filter((candidate) => candidate !== item.id); + kindConfig[kind].assignmentRepo.replaceAssignments(workspace.id, nextForWorkspace); + } + } + return summaryForKind(kind, item); + } + + async function updateAssignments(kind: ManagedKind, itemId: string, workspaceIds: string[]) { + const item = kindConfig[kind].itemRepo.getById(itemId); + if (!item) { + throw new HTTPException(404, { message: `${kind} item not found: ${itemId}` }); + } + const normalizedWorkspaceIds = Array.from(new Set(workspaceIds.map((value) => value.trim()).filter(Boolean))); + for (const workspaceId of normalizedWorkspaceIds) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + } + const changedWorkspaceIds = new Set(); + for (const workspace of input.repositories.workspaces.list({ includeHidden: true })) { + if (workspace.kind === "remote") { + continue; + } + const currentForWorkspace = kindConfig[kind].assignmentRepo.listForWorkspace(workspace.id).map((assignment) => assignment.itemId); + const currentlyAssigned = currentForWorkspace.includes(itemId); + const nextAssigned = normalizedWorkspaceIds.includes(workspace.id); + if (currentlyAssigned === nextAssigned) { + continue; + } + const nextItemIds = nextAssigned + ? Array.from(new Set([...currentForWorkspace, itemId])) + : currentForWorkspace.filter((candidate) => candidate !== itemId); + kindConfig[kind].assignmentRepo.replaceAssignments(workspace.id, nextItemIds); + changedWorkspaceIds.add(workspace.id); + } + await materializeAssignments(kind, Array.from(changedWorkspaceIds), "updated", item.displayName); + return summaryForKind(kind, item); + } + + return { + listManaged(kind: ManagedKind) { + return kindConfig[kind].itemRepo.list().map((item) => summaryForKind(kind, item)); + }, + + createManaged(kind: ManagedKind, payload: Parameters[1]) { + return upsertManaged(kind, payload); + }, + + async deleteManaged(kind: ManagedKind, itemId: string) { + const item = kindConfig[kind].itemRepo.getById(itemId); + if (!item) { + throw new HTTPException(404, { message: `${kind} item not found: ${itemId}` }); + } + const workspaceIds = kindConfig[kind].assignmentRepo.listForItem(itemId).map((assignment) => assignment.workspaceId); + kindConfig[kind].assignmentRepo.deleteForItem(itemId); + kindConfig[kind].itemRepo.deleteById(itemId); + await materializeAssignments(kind, workspaceIds, "removed", item.displayName); + return { deleted: true, id: itemId }; + }, + + updateManaged(kind: ManagedKind, itemId: string, payload: Omit[1], "id">) { + const existing = kindConfig[kind].itemRepo.getById(itemId); + if (!existing) { + throw new HTTPException(404, { message: `${kind} item not found: ${itemId}` }); + } + return upsertManaged(kind, { + auth: payload.auth ?? existing.auth, + cloudItemId: payload.cloudItemId ?? existing.cloudItemId, + config: payload.config ?? existing.config, + displayName: payload.displayName || existing.displayName, + id: itemId, + key: payload.key ?? existing.key, + metadata: payload.metadata ?? existing.metadata, + source: payload.source ?? existing.source, + workspaceIds: payload.workspaceIds, + }); + }, + + updateAssignments, + + listWorkspaceMcp(workspaceId: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + return kindConfig.mcps.assignmentRepo.listForWorkspace(workspaceId) + .map((assignment) => input.repositories.mcps.getById(assignment.itemId)) + .filter(Boolean) + .map((item) => ({ + config: item!.config, + name: item!.key ?? item!.displayName, + source: "config.project" as const, + })); + }, + + async addWorkspaceMcp(workspaceId: string, payload: { config: Record; name: string }) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + validateMcpName(payload.name); + validateMcpConfig(payload.config); + const key = normalizeManagedKey(payload.name, "mcp"); + const existing = this.listManaged("mcps").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + const item = upsertManaged("mcps", { + config: payload.config, + displayName: payload.name, + id: existing?.id, + key, + metadata: { workspaceId }, + workspaceIds: [workspace.id], + }); + await materializeAssignments("mcps", [workspaceId], existing ? "updated" : "added", item.displayName); + return { items: this.listWorkspaceMcp(workspaceId) }; + }, + + async removeWorkspaceMcp(workspaceId: string, name: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const key = normalizeManagedKey(name, "mcp"); + const assignment = this.listManaged("mcps").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + if (!assignment) { + return { items: this.listWorkspaceMcp(workspaceId) }; + } + const nextWorkspaceIds = assignment.workspaceIds.filter((candidate) => candidate !== workspaceId); + if (nextWorkspaceIds.length === 0) { + await this.deleteManaged("mcps", assignment.id); + } else { + await updateAssignments("mcps", assignment.id, nextWorkspaceIds); + } + return { items: this.listWorkspaceMcp(workspaceId) }; + }, + + listWorkspacePlugins(workspaceId: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const items = kindConfig.plugins.assignmentRepo.listForWorkspace(workspaceId) + .map((assignment) => input.repositories.plugins.getById(assignment.itemId)) + .filter(Boolean) + .map((item) => ({ + scope: "project" as const, + source: "config" as const, + spec: typeof asObject(item!.config).spec === "string" ? String(asObject(item!.config).spec) : item!.displayName, + })); + return { items, loadOrder: ["config.global", "config.project", "dir.global", "dir.project"] }; + }, + + async addWorkspacePlugin(workspaceId: string, spec: string) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const normalizedSpec = spec.trim(); + if (!normalizedSpec) { + throw new RouteError(400, "invalid_request", "Plugin spec is required."); + } + const key = normalizeManagedKey(normalizedSpec.replace(/^file:/, ""), "plugin"); + const existing = this.listManaged("plugins").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + const item = upsertManaged("plugins", { + config: { spec: normalizedSpec }, + displayName: normalizedSpec, + id: existing?.id, + key, + metadata: { workspaceId }, + workspaceIds: [workspace.id], + }); + await materializeAssignments("plugins", [workspaceId], existing ? "updated" : "added", item.displayName); + return this.listWorkspacePlugins(workspaceId); + }, + + async removeWorkspacePlugin(workspaceId: string, spec: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const key = normalizeManagedKey(spec.replace(/^file:/, ""), "plugin"); + const assignment = this.listManaged("plugins").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + if (!assignment) { + return this.listWorkspacePlugins(workspaceId); + } + const nextWorkspaceIds = assignment.workspaceIds.filter((candidate) => candidate !== workspaceId); + if (nextWorkspaceIds.length === 0) { + await this.deleteManaged("plugins", assignment.id); + } else { + await updateAssignments("plugins", assignment.id, nextWorkspaceIds); + } + return this.listWorkspacePlugins(workspaceId); + }, + + listWorkspaceSkills(workspaceId: string) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + return kindConfig.skills.assignmentRepo.listForWorkspace(workspaceId) + .map((assignment) => input.repositories.skills.getById(assignment.itemId)) + .filter(Boolean) + .map((item) => ({ + description: typeof asObject(item!.metadata).description === "string" ? String(asObject(item!.metadata).description) : item!.displayName, + name: item!.key ?? item!.displayName, + path: workspaceSkillPath(workspace, item!.key ?? item!.id), + scope: "project" as const, + trigger: typeof asObject(item!.metadata).trigger === "string" ? String(asObject(item!.metadata).trigger) : undefined, + })); + }, + + getWorkspaceSkill(workspaceId: string, name: string) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const key = normalizeManagedKey(name, "skill"); + const skill = this.listManaged("skills").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + if (!skill) { + throw new HTTPException(404, { message: `Skill not found: ${name}` }); + } + const content = typeof asObject(skill.config).content === "string" ? String(asObject(skill.config).content) : ""; + return { + content, + item: { + description: typeof asObject(skill.metadata).description === "string" ? String(asObject(skill.metadata).description) : skill.displayName, + name: skill.key ?? skill.displayName, + path: workspaceSkillPath(workspace, skill.key ?? skill.id), + scope: "project" as const, + trigger: typeof asObject(skill.metadata).trigger === "string" ? String(asObject(skill.metadata).trigger) : undefined, + }, + }; + }, + + async upsertWorkspaceSkill(workspaceId: string, payload: { + cloudItemId?: string | null; + content: string; + description?: string; + metadata?: JsonObject | null; + name: string; + source?: ManagedConfigRecord["source"]; + trigger?: string; + }) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + validateSkillName(payload.name); + if (!payload.content.trim()) { + throw new RouteError(400, "invalid_request", "Skill content is required."); + } + const parsed = parseFrontmatter(payload.content); + const frontmatterName = typeof parsed.data.name === "string" ? parsed.data.name.trim() : ""; + if (frontmatterName && frontmatterName !== payload.name) { + throw new RouteError(400, "invalid_request", "Skill frontmatter name must match payload name."); + } + const nextDescription = normalizeString(parsed.data.description) || normalizeString(payload.description) || payload.name; + const trigger = normalizeString(parsed.data.trigger) || normalizeString(parsed.data.when) || normalizeString(payload.trigger) || extractTriggerFromBody(parsed.body); + const content = Object.keys(parsed.data).length > 0 + ? `${buildFrontmatter({ ...parsed.data, description: nextDescription, name: payload.name })}${parsed.body.replace(/^\n/, "")}` + : `${buildFrontmatter({ description: nextDescription, name: payload.name, ...(trigger ? { trigger } : {}) })}${payload.content.replace(/^\n/, "")}`; + const existing = this.listManaged("skills").find((item) => item.key === payload.name && item.workspaceIds.includes(workspaceId)) ?? null; + const nextMetadata = { + ...(existing?.metadata ?? {}), + ...(payload.metadata ?? {}), + description: nextDescription, + trigger, + workspaceId, + } satisfies JsonObject; + const item = upsertManaged("skills", { + cloudItemId: payload.cloudItemId ?? existing?.cloudItemId ?? null, + config: { content: content.endsWith("\n") ? content : `${content}\n` }, + displayName: payload.name, + id: existing?.id, + key: payload.name, + metadata: nextMetadata, + source: payload.source ?? existing?.source ?? "openwork_managed", + workspaceIds: [workspace.id], + }); + await materializeAssignments("skills", [workspaceId], existing ? "updated" : "added", item.displayName); + return this.getWorkspaceSkill(workspaceId, payload.name).item; + }, + + async deleteWorkspaceSkill(workspaceId: string, name: string) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const key = normalizeManagedKey(name, "skill"); + const assignment = this.listManaged("skills").find((item) => item.key === key && item.workspaceIds.includes(workspaceId)) ?? null; + if (!assignment) { + throw new HTTPException(404, { message: `Skill not found: ${name}` }); + } + const nextWorkspaceIds = assignment.workspaceIds.filter((candidate) => candidate !== workspaceId); + if (nextWorkspaceIds.length === 0) { + await this.deleteManaged("skills", assignment.id); + } else { + await updateAssignments("skills", assignment.id, nextWorkspaceIds); + } + return { path: workspaceSkillPath(workspace, key).replace(/[/\\]SKILL\.md$/, "") }; + }, + + async listHubSkills(repo?: Partial) { + const resolvedRepo: HubRepo = { + owner: normalizeString(repo?.owner) || DEFAULT_HUB_REPO.owner, + repo: normalizeString(repo?.repo) || DEFAULT_HUB_REPO.repo, + ref: normalizeString(repo?.ref) || DEFAULT_HUB_REPO.ref, + }; + const listing = await fetch(`https://api.github.com/repos/${encodeURIComponent(resolvedRepo.owner)}/${encodeURIComponent(resolvedRepo.repo)}/contents/skills?ref=${encodeURIComponent(resolvedRepo.ref)}`, { + headers: { Accept: "application/vnd.github+json", "User-Agent": "openwork-server-v2" }, + }); + if (!listing.ok) { + throw new RouteError(502, "bad_gateway", `Failed to fetch hub catalog (${listing.status}).`); + } + const items = await listing.json() as Array>; + const rawBase = `https://raw.githubusercontent.com/${encodeURIComponent(resolvedRepo.owner)}/${encodeURIComponent(resolvedRepo.repo)}/${encodeURIComponent(resolvedRepo.ref)}`; + const result: Array<{ description: string; name: string; source: { owner: string; path: string; ref: string; repo: string }; trigger?: string }> = []; + for (const entry of Array.isArray(items) ? items : []) { + const name = typeof entry?.name === "string" ? entry.name.trim() : ""; + const type = typeof entry?.type === "string" ? entry.type : ""; + if (!name || type !== "dir") { + continue; + } + try { + const content = await fetch(`${rawBase}/skills/${encodeURIComponent(name)}/SKILL.md`, { + headers: { Accept: "text/plain", "User-Agent": "openwork-server-v2" }, + }).then((response) => response.ok ? response.text() : ""); + if (!content) { + continue; + } + const parsed = parseFrontmatter(content); + const description = typeof parsed.data.description === "string" ? parsed.data.description : ""; + const trigger = typeof parsed.data.trigger === "string" ? parsed.data.trigger : extractTriggerFromBody(parsed.body); + result.push({ + description, + name, + source: { owner: resolvedRepo.owner, path: `skills/${name}`, ref: resolvedRepo.ref, repo: resolvedRepo.repo }, + ...(trigger ? { trigger } : {}), + }); + } catch { + // ignore individual skill failures + } + } + result.sort((left, right) => left.name.localeCompare(right.name)); + return { items: result }; + }, + + async installHubSkill(workspaceId: string, inputValue: { name: string; overwrite?: boolean; repo?: Partial }) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + validateSkillName(inputValue.name); + const repo: HubRepo = { + owner: normalizeString(inputValue.repo?.owner) || DEFAULT_HUB_REPO.owner, + repo: normalizeString(inputValue.repo?.repo) || DEFAULT_HUB_REPO.repo, + ref: normalizeString(inputValue.repo?.ref) || DEFAULT_HUB_REPO.ref, + }; + const rawUrl = `https://raw.githubusercontent.com/${encodeURIComponent(repo.owner)}/${encodeURIComponent(repo.repo)}/${encodeURIComponent(repo.ref)}/skills/${encodeURIComponent(inputValue.name)}/SKILL.md`; + const response = await fetch(rawUrl, { + headers: { Accept: "text/plain", "User-Agent": "openwork-server-v2" }, + }); + if (!response.ok) { + throw new RouteError(404, "not_found", `Hub skill not found: ${inputValue.name}`); + } + const content = await response.text(); + const existing = this.listManaged("skills").find((item) => item.key === inputValue.name && item.workspaceIds.includes(workspaceId)) ?? null; + if (existing && inputValue.overwrite !== true) { + return { action: "updated" as const, name: inputValue.name, path: workspaceSkillPath(workspace, inputValue.name).replace(/[/\\]SKILL\.md$/, ""), skipped: 1, written: 0 }; + } + const parsed = parseFrontmatter(content); + const description = typeof parsed.data.description === "string" ? parsed.data.description : inputValue.name; + const trigger = typeof parsed.data.trigger === "string" ? parsed.data.trigger : extractTriggerFromBody(parsed.body); + await this.upsertWorkspaceSkill(workspaceId, { + content, + description, + metadata: { + description, + install: { + kind: "hub", + owner: repo.owner, + path: `skills/${inputValue.name}`, + ref: repo.ref, + repo: repo.repo, + url: rawUrl, + }, + trigger, + workspaceId, + }, + name: inputValue.name, + source: "imported", + trigger, + }); + return { action: existing ? "updated" as const : "added" as const, name: inputValue.name, path: workspaceSkillPath(workspace, inputValue.name).replace(/[/\\]SKILL\.md$/, ""), skipped: 0, written: 1 }; + }, + + getCloudSignin() { + return input.repositories.cloudSignin.getPrimary(); + }, + + clearCloudSignin() { + input.repositories.cloudSignin.deletePrimary(); + return null; + }, + + upsertCloudSignin(payload: { + auth?: JsonObject | null; + cloudBaseUrl: string; + metadata?: JsonObject | null; + orgId?: string | null; + userId?: string | null; + }) { + const cloudBaseUrl = normalizeUrl(payload.cloudBaseUrl); + if (!cloudBaseUrl) { + throw new RouteError(400, "invalid_request", "cloudBaseUrl must be a valid http(s) URL."); + } + return input.repositories.cloudSignin.upsert({ + auth: payload.auth ?? null, + cloudBaseUrl, + id: input.repositories.cloudSignin.getPrimary()?.id ?? `cloud_${input.serverId}`, + lastValidatedAt: null, + metadata: payload.metadata ?? null, + orgId: payload.orgId ?? null, + serverId: input.serverId, + userId: payload.userId ?? null, + }); + }, + + async validateCloudSignin() { + const current = input.repositories.cloudSignin.getPrimary(); + if (!current) { + throw new RouteError(404, "not_found", "Cloud signin is not configured."); + } + const auth = asObject(current.auth); + const token = typeof auth.authToken === "string" ? auth.authToken.trim() : typeof auth.token === "string" ? auth.token.trim() : ""; + if (!token) { + throw new RouteError(400, "invalid_request", "Cloud signin does not include an auth token."); + } + const response = await fetch(`${current.cloudBaseUrl.replace(/\/+$/, "")}/v1/me`, { + headers: { + Accept: "application/json", + Authorization: `Bearer ${token}`, + }, + }); + if (!response.ok) { + throw new RouteError(502, "bad_gateway", `Cloud validation failed (${response.status}).`); + } + const payload = await response.json().catch(() => ({})) as Record; + const validatedUser = readRecord(payload.user) ?? readRecord(payload.me) ?? null; + const validated = input.repositories.cloudSignin.upsert({ + ...current, + lastValidatedAt: nowIso(), + metadata: { + ...(current.metadata ?? {}), + validatedUser, + }, + userId: typeof validatedUser?.id === "string" && validatedUser.id.trim() ? validatedUser.id.trim() : current.userId, + }); + return { + lastValidatedAt: validated.lastValidatedAt, + ok: true, + record: validated, + }; + }, + + getWorkspaceShare(workspaceId: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + return input.repositories.workspaceShares.getLatestByWorkspace(workspaceId); + }, + + exposeWorkspaceShare(workspaceId: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const existing = input.repositories.workspaceShares.getLatestByWorkspace(workspaceId); + const record = input.repositories.workspaceShares.upsert({ + accessKey: randomBytes(24).toString("base64url"), + audit: { + exposedAt: nowIso(), + previousShareId: existing?.id ?? null, + }, + id: existing?.id ?? `share_${workspaceId}`, + lastUsedAt: existing?.lastUsedAt ?? null, + revokedAt: null, + status: "active", + workspaceId, + }); + return record; + }, + + revokeWorkspaceShare(workspaceId: string) { + ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const existing = input.repositories.workspaceShares.getLatestByWorkspace(workspaceId); + if (!existing) { + throw new HTTPException(404, { message: `Workspace share not found: ${workspaceId}` }); + } + return input.repositories.workspaceShares.upsert({ + ...existing, + accessKey: null, + audit: { + ...(existing.audit ?? {}), + revokedAt: nowIso(), + }, + revokedAt: nowIso(), + status: "revoked", + }); + }, + + async exportWorkspace(workspaceId: string, options?: { sensitiveMode?: WorkspaceExportSensitiveMode }) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const sensitiveMode = options?.sensitiveMode ?? "auto"; + const snapshot = await input.config.getWorkspaceConfigSnapshot(workspaceId); + let opencode = sanitizePortableOpencodeConfig(snapshot.effective.opencode); + const openwork = sanitizeOpenworkTemplateConfig(snapshot.stored.openwork); + const skills = kindConfig.skills.assignmentRepo.listForWorkspace(workspaceId) + .map((assignment) => input.repositories.skills.getById(assignment.itemId)) + .filter(Boolean) + .map((item) => ({ + content: typeof asObject(item!.config).content === "string" ? String(asObject(item!.config).content) : "", + description: typeof asObject(item!.metadata).description === "string" ? String(asObject(item!.metadata).description) : undefined, + name: item!.key ?? item!.displayName, + trigger: typeof asObject(item!.metadata).trigger === "string" ? String(asObject(item!.metadata).trigger) : undefined, + })) + .filter((item) => item.content); + const commands = listWorkspaceCommands(workspace); + let files = listPortableFiles(workspace.dataDir!); + const warnings = collectWorkspaceExportWarnings({ files, opencode: snapshot.effective.opencode }); + if (warnings.length && sensitiveMode === "auto") { + return { conflict: true as const, warnings }; + } + if (sensitiveMode === "exclude") { + const sanitized = stripSensitiveWorkspaceExportData({ files, opencode }); + files = sanitized.files; + opencode = sanitized.opencode; + } + return { + commands, + exportedAt: Date.now(), + ...(files.length ? { files } : {}), + openwork, + opencode, + skills, + workspaceId, + }; + }, + + async importWorkspace(workspaceId: string, payload: Record) { + const workspace = ensureWorkspaceMutable(getWorkspaceOrThrow(workspaceId)); + const modes = asObject(payload.mode); + const opencode = readRecord(payload.opencode); + const openwork = readRecord(payload.openwork); + const skills = Array.isArray(payload.skills) ? payload.skills : []; + const commands = Array.isArray(payload.commands) ? payload.commands : []; + const files = Array.isArray(payload.files) ? payload.files : []; + + if (opencode) { + const sanitizedOpencode = sanitizePortableOpencodeConfig(opencode); + await input.config.patchWorkspaceConfig(workspaceId, { + opencode: modes.opencode === "replace" ? sanitizedOpencode : sanitizedOpencode, + }); + } + + if (openwork) { + await input.config.patchWorkspaceConfig(workspaceId, { + openwork: sanitizeOpenworkTemplateConfig(openwork), + }); + } + + if (skills.length > 0 && modes.skills === "replace") { + for (const item of this.listManaged("skills").filter((skill) => skill.workspaceIds.includes(workspaceId))) { + const nextWorkspaceIds = item.workspaceIds.filter((candidate) => candidate !== workspaceId); + if (nextWorkspaceIds.length === 0) { + await this.deleteManaged("skills", item.id); + } else { + await updateAssignments("skills", item.id, nextWorkspaceIds); + } + } + } + + for (const item of skills) { + const record = item && typeof item === "object" ? item as Record : null; + if (!record) { + continue; + } + const name = normalizeString(record.name); + const content = typeof record.content === "string" ? record.content : ""; + const description = normalizeString(record.description) || undefined; + const trigger = normalizeString(record.trigger) || undefined; + if (name && content) { + await this.upsertWorkspaceSkill(workspaceId, { + content, + description, + metadata: { + description, + importedVia: "portable_bundle", + sourceBundleWorkspaceId: normalizeString(payload.workspaceId) || null, + trigger, + workspaceId, + }, + name, + source: "imported", + trigger, + }); + } + } + + if (commands.length > 0) { + if (modes.commands === "replace") { + clearWorkspaceCommands(workspace); + } + for (const item of commands) { + const record = item && typeof item === "object" ? item as Record : null; + if (!record) { + continue; + } + const parsedContent = typeof record.content === "string" ? parseFrontmatter(record.content) : null; + const name = normalizeString(record.name) || normalizeString(parsedContent?.data.name); + const description = normalizeString(record.description) || normalizeString(parsedContent?.data.description) || undefined; + const template = typeof record.template === "string" + ? record.template + : parsedContent + ? parsedContent.body.trim() + : ""; + if (name && template) { + upsertWorkspaceCommand(workspace, { description, name, template }); + } + } + } + + if (files.length > 0) { + writePortableFiles(workspace.dataDir!, files, { replace: modes.files === "replace" }); + } + + input.config.ensureWorkspaceConfig(workspaceId); + input.files.emitReloadEvent(workspaceId, "config", { + action: "updated", + name: "workspace-import", + type: "config", + }); + await input.files.recordWorkspaceAudit(workspaceId, "workspace.import", workspace.dataDir ?? workspaceId, "Imported portable workspace data through Server V2."); + return { ok: true }; + }, + + async publishSharedBundle(inputValue: { bundleType: string; name?: string; payload: unknown; timeoutMs?: number }) { + const bundleType = normalizeString(inputValue.bundleType); + if (!ALLOWED_BUNDLE_TYPES.has(bundleType)) { + throw new RouteError(400, "invalid_request", `Unsupported bundle type: ${bundleType || "unknown"}`); + } + const timeoutMs = typeof inputValue.timeoutMs === "number" && Number.isFinite(inputValue.timeoutMs) ? inputValue.timeoutMs : 15_000; + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), Math.max(1_000, timeoutMs)); + try { + const response = await fetch(`${resolvePublisherBaseUrl().replace(/\/+$/, "")}/v1/bundles`, { + body: JSON.stringify(inputValue.payload), + headers: { + Accept: "application/json", + "Content-Type": "application/json", + Origin: resolvePublisherOrigin(), + ...(normalizeString(inputValue.name) ? { "X-OpenWork-Name": normalizeString(inputValue.name) } : {}), + "X-OpenWork-Bundle-Type": bundleType, + "X-OpenWork-Schema-Version": "v1", + }, + method: "POST", + redirect: "manual", + signal: controller.signal, + }); + if (response.status >= 300 && response.status < 400) { + throw new RouteError(502, "bad_gateway", "Publisher redirects are not allowed."); + } + if (!response.ok) { + const details = readErrorMessage(await response.text()); + throw new RouteError(502, "bad_gateway", `Publish failed (${response.status})${details ? `: ${details}` : ""}`); + } + const json = await response.json() as Record; + const url = normalizeString(json.url); + if (!url) { + throw new RouteError(502, "bad_gateway", "Publisher response missing url."); + } + return { url }; + } catch (error) { + if (error instanceof RouteError) { + throw error; + } + throw new RouteError(502, "bad_gateway", `Failed to publish bundle: ${error instanceof Error ? error.message : String(error)}`); + } finally { + clearTimeout(timer); + } + }, + + async fetchSharedBundle(bundleUrl: unknown, options?: { timeoutMs?: number }) { + const url = normalizeBundleFetchUrl(bundleUrl); + const timeoutMs = typeof options?.timeoutMs === "number" && Number.isFinite(options.timeoutMs) ? options.timeoutMs : 15_000; + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), Math.max(1_000, timeoutMs)); + try { + const response = await fetch(url.toString(), { + headers: { Accept: "application/json" }, + method: "GET", + redirect: "manual", + signal: controller.signal, + }); + if (response.status >= 300 && response.status < 400) { + throw new RouteError(502, "bad_gateway", "Shared bundle redirects are not allowed."); + } + if (!response.ok) { + const details = readErrorMessage(await response.text()); + throw new RouteError(502, "bad_gateway", `Failed to fetch bundle (${response.status})${details ? `: ${details}` : ""}`); + } + return await response.json(); + } catch (error) { + if (error instanceof RouteError) { + throw error; + } + throw new RouteError(502, "bad_gateway", `Failed to fetch bundle: ${error instanceof Error ? error.message : String(error)}`); + } finally { + clearTimeout(timer); + } + }, + }; +} diff --git a/apps/server-v2/src/services/registry-service.ts b/apps/server-v2/src/services/registry-service.ts new file mode 100644 index 00000000..6cb9f58b --- /dev/null +++ b/apps/server-v2/src/services/registry-service.ts @@ -0,0 +1,283 @@ +import path from "node:path"; +import { + createInternalWorkspaceId, + createLocalWorkspaceId, + createRemoteWorkspaceId, + createServerId, + deriveWorkspaceSlugSource, +} from "../database/identifiers.js"; +import type { ServerRepositories } from "../database/repositories.js"; +import { ensureWorkspaceConfigDir, type ServerWorkingDirectory } from "../database/working-directory.js"; +import type { + BackendKind, + HostingKind, + JsonObject, + ServerRecord, + WorkspaceKind, + WorkspaceRecord, +} from "../database/types.js"; + +export type LegacyRemoteWorkspaceInput = { + baseUrl: string; + displayName: string; + directory?: string | null; + legacyNotes: JsonObject; + remoteType: "openwork" | "opencode"; + remoteWorkspaceId?: string | null; + serverAuth?: JsonObject | null; + serverBaseUrl: string; + serverHostingKind: HostingKind; + serverLabel: string; + workspaceStatus?: WorkspaceRecord["status"]; +}; + +export type LegacyLocalWorkspaceInput = { + dataDir: string; + displayName: string; + kind?: Extract; + legacyNotes?: JsonObject | null; + opencodeProjectId?: string | null; + status?: WorkspaceRecord["status"]; +}; + +type EnsureLocalServerInput = { + baseUrl?: string | null; + capabilities?: JsonObject; + hostingKind: HostingKind; + label: string; + notes?: JsonObject | null; +}; + +function mergeJson(base: JsonObject | null | undefined, next: JsonObject | null | undefined) { + if (!base && !next) { + return null; + } + + return { + ...(base ?? {}), + ...(next ?? {}), + }; +} + +function resolveSlug(repositories: ServerRepositories, workspaceId: string, baseSlug: string) { + let suffix = 1; + let candidate = baseSlug; + while (repositories.workspaces.findSlugConflict(candidate, workspaceId)) { + suffix += 1; + candidate = `${baseSlug}-${suffix}`; + } + return candidate; +} + +export function createRegistryService(input: { + localServerCapabilities: JsonObject; + repositories: ServerRepositories; + workingDirectory: ServerWorkingDirectory; +}) { + const localServerId = createServerId("local", "primary"); + const { repositories } = input; + + function upsertWorkspace(inputWorkspace: Omit) { + const slugBase = deriveWorkspaceSlugSource({ + dataDir: inputWorkspace.dataDir, + displayName: inputWorkspace.displayName, + fallback: inputWorkspace.kind, + }); + const slug = resolveSlug(repositories, inputWorkspace.id, slugBase); + return repositories.workspaces.upsert({ + ...inputWorkspace, + slug, + }); + } + + return { + attachLocalServerBaseUrl(baseUrl: string) { + const existing = input.repositories.servers.getById(localServerId); + if (!existing) { + return this.ensureLocalServer({ + baseUrl, + hostingKind: "self_hosted", + label: "Local OpenWork Server", + }); + } + + return input.repositories.servers.upsert({ + ...existing, + baseUrl, + capabilities: existing.capabilities, + }); + }, + + ensureLocalServer(server: EnsureLocalServerInput): { created: boolean; server: ServerRecord } { + const existing = input.repositories.servers.getById(localServerId); + const next = input.repositories.servers.upsert({ + auth: existing?.auth ?? null, + baseUrl: server.baseUrl ?? existing?.baseUrl ?? null, + capabilities: { + ...input.localServerCapabilities, + ...(existing?.capabilities ?? {}), + ...(server.capabilities ?? {}), + }, + hostingKind: server.hostingKind, + id: localServerId, + isEnabled: true, + isLocal: true, + kind: "local", + label: server.label, + lastSeenAt: new Date().toISOString(), + notes: mergeJson(existing?.notes, server.notes), + source: existing?.source ?? "seeded", + }); + + return { + created: !existing, + server: next, + }; + }, + + ensureHiddenWorkspace(kind: "control" | "help") { + const workspaceId = createInternalWorkspaceId(kind); + const displayName = kind === "control" ? "Control Workspace" : "Help Workspace"; + const workspace = upsertWorkspace({ + configDir: ensureWorkspaceConfigDir(input.workingDirectory, workspaceId), + dataDir: null, + displayName, + id: workspaceId, + isHidden: true, + kind, + notes: { + internal: true, + seededBy: "server-v2-phase-2", + }, + opencodeProjectId: null, + remoteWorkspaceId: null, + serverId: localServerId, + status: "ready", + }); + + const backendKind: BackendKind = "local_opencode"; + input.repositories.workspaceRuntimeState.upsert({ + backendKind, + health: { + hidden: true, + internalWorkspace: kind, + }, + lastError: null, + lastSessionRefreshAt: null, + lastSyncAt: null, + workspaceId: workspace.id, + }); + + return workspace; + }, + + importLocalWorkspace(workspace: LegacyLocalWorkspaceInput) { + const workspaceKind = workspace.kind ?? "local"; + const workspaceId = workspaceKind === "local" ? createLocalWorkspaceId(workspace.dataDir) : createInternalWorkspaceId(workspaceKind); + const configDir = ensureWorkspaceConfigDir(input.workingDirectory, workspaceId); + const record = upsertWorkspace({ + configDir, + dataDir: workspace.dataDir, + displayName: workspace.displayName || path.basename(workspace.dataDir), + id: workspaceId, + isHidden: workspaceKind !== "local", + kind: workspaceKind, + notes: mergeJson(workspace.legacyNotes ?? null, { + importSource: "desktop_or_orchestrator", + workspaceKind, + }), + opencodeProjectId: workspace.opencodeProjectId ?? null, + remoteWorkspaceId: null, + serverId: localServerId, + status: workspace.status ?? "imported", + }); + + input.repositories.workspaceRuntimeState.upsert({ + backendKind: "local_opencode", + health: { + configDir, + imported: true, + }, + lastError: null, + lastSessionRefreshAt: null, + lastSyncAt: null, + workspaceId: record.id, + }); + + return record; + }, + + importRemoteWorkspace(workspace: LegacyRemoteWorkspaceInput) { + const serverId = createServerId("remote", workspace.serverBaseUrl); + const existingServer = input.repositories.servers.getById(serverId); + input.repositories.servers.upsert({ + auth: workspace.serverAuth ?? existingServer?.auth ?? null, + baseUrl: workspace.serverBaseUrl, + capabilities: mergeJson(existingServer?.capabilities ?? {}, { + legacyRemoteType: workspace.remoteType, + phase: 2, + source: "desktop-import", + }) ?? {}, + hostingKind: workspace.serverHostingKind, + id: serverId, + isEnabled: true, + isLocal: false, + kind: "remote", + label: workspace.serverLabel, + lastSeenAt: existingServer?.lastSeenAt ?? null, + notes: mergeJson(existingServer?.notes, workspace.legacyNotes), + source: existingServer?.source ?? "imported", + }); + + const workspaceId = createRemoteWorkspaceId({ + baseUrl: workspace.serverBaseUrl, + directory: workspace.directory, + remoteType: workspace.remoteType, + remoteWorkspaceId: workspace.remoteWorkspaceId, + }); + const record = upsertWorkspace({ + configDir: null, + dataDir: null, + displayName: workspace.displayName, + id: workspaceId, + isHidden: false, + kind: "remote", + notes: mergeJson(workspace.legacyNotes, { + directory: workspace.directory ?? null, + remoteType: workspace.remoteType, + }), + opencodeProjectId: null, + remoteWorkspaceId: workspace.remoteWorkspaceId ?? null, + serverId, + status: workspace.workspaceStatus ?? "imported", + }); + + input.repositories.workspaceRuntimeState.upsert({ + backendKind: "remote_openwork", + health: { + imported: true, + remoteServerId: serverId, + }, + lastError: null, + lastSessionRefreshAt: null, + lastSyncAt: null, + workspaceId: record.id, + }); + + return record; + }, + + listServers() { + return input.repositories.servers.list(); + }, + + listWorkspaces(includeHidden = false) { + return input.repositories.workspaces.list({ includeHidden }); + }, + + localServerId, + }; +} + +type RegistryService = ReturnType; +export type { RegistryService }; diff --git a/apps/server-v2/src/services/remote-server-service.ts b/apps/server-v2/src/services/remote-server-service.ts new file mode 100644 index 00000000..a62d45fb --- /dev/null +++ b/apps/server-v2/src/services/remote-server-service.ts @@ -0,0 +1,285 @@ +import { HTTPException } from "hono/http-exception"; +import { requestRemoteOpenwork } from "../adapters/remote-openwork.js"; +import { createRemoteWorkspaceId, createServerId } from "../database/identifiers.js"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { HostingKind, JsonObject, ServerRecord, WorkspaceRecord } from "../database/types.js"; + +type RemoteWorkspaceSnapshot = { + directory: string | null; + displayName: string; + remoteWorkspaceId: string; +}; + +function normalizeUrl(value: string) { + const trimmed = value.trim(); + if (!trimmed) { + throw new Error("baseUrl is required."); + } + const withProtocol = /^https?:\/\//.test(trimmed) ? trimmed : `http://${trimmed}`; + const url = new URL(withProtocol); + return url.toString().replace(/\/+$/, ""); +} + +function stripWorkspaceMount(value: string) { + const url = new URL(normalizeUrl(value)); + const segments = url.pathname.split("/").filter(Boolean); + const last = segments[segments.length - 1] ?? ""; + const prev = segments[segments.length - 2] ?? ""; + if (prev === "w" && last) { + url.pathname = `/${segments.slice(0, -2).join("/")}`; + } + return url.toString().replace(/\/+$/, ""); +} + +function detectRemoteHostingKind(value: string): HostingKind { + const hostname = new URL(value).hostname.toLowerCase(); + if ( + hostname === "app.openworklabs.com" + || hostname === "app.openwork.software" + || hostname.endsWith(".openworklabs.com") + || hostname.endsWith(".openwork.software") + ) { + return "cloud"; + } + return "self_hosted"; +} + +function asObject(value: unknown) { + return value && typeof value === "object" && !Array.isArray(value) ? value as Record : {}; +} + +function pickString(record: Record, keys: string[]) { + for (const key of keys) { + const value = record[key]; + if (typeof value === "string" && value.trim()) { + return value.trim(); + } + } + return null; +} + +function normalizeRemoteWorkspaceItems(payload: unknown): RemoteWorkspaceSnapshot[] { + const record = asObject(payload); + const items = Array.isArray(record.items) ? record.items : []; + return items.flatMap((entry) => { + const item = asObject(entry); + const backend = asObject(item.backend); + const local = asObject(backend.local); + const remote = asObject(backend.remote); + const remoteWorkspaceId = pickString(item, ["id"]); + if (!remoteWorkspaceId) { + return []; + } + const displayName = pickString(item, ["displayName", "name"]) ?? remoteWorkspaceId; + const directory = pickString(local, ["dataDir", "directory"]) + ?? pickString(remote, ["directory"]) + ?? pickString(item, ["path", "directory"]); + return [{ + directory, + displayName, + remoteWorkspaceId, + } satisfies RemoteWorkspaceSnapshot]; + }); +} + +export type RemoteServerService = ReturnType; + +export function createRemoteServerService(input: { + repositories: ServerRepositories; +}) { + function buildServerRecord(payload: { + baseUrl: string; + hostToken?: string | null; + label?: string | null; + token?: string | null; + }) { + const baseUrl = stripWorkspaceMount(payload.baseUrl); + const serverId = createServerId("remote", baseUrl); + const existing = input.repositories.servers.getById(serverId); + const auth: JsonObject = { + ...(existing?.auth ?? {}), + ...(payload.token?.trim() ? { openworkToken: payload.token.trim(), openworkClientToken: payload.token.trim() } : {}), + ...(payload.hostToken?.trim() ? { openworkHostToken: payload.hostToken.trim() } : {}), + }; + const label = payload.label?.trim() || existing?.label || new URL(baseUrl).host; + const server = input.repositories.servers.upsert({ + auth: Object.keys(auth).length > 0 ? auth : existing?.auth ?? null, + baseUrl, + capabilities: { + ...(existing?.capabilities ?? {}), + phase: 9, + remoteWorkspaceDiscovery: true, + remoteWorkspaceRouting: true, + }, + hostingKind: existing?.hostingKind ?? detectRemoteHostingKind(baseUrl), + id: serverId, + isEnabled: true, + isLocal: false, + kind: "remote", + label, + lastSeenAt: new Date().toISOString(), + notes: { + ...(existing?.notes ?? {}), + connectedVia: "server-v2-phase9", + }, + source: existing?.source ?? "connected", + }); + return server; + } + + async function fetchRemoteWorkspaces(server: ServerRecord) { + const response = await requestRemoteOpenwork({ + path: "/workspaces", + server, + timeoutMs: 10_000, + }); + return normalizeRemoteWorkspaceItems(response); + } + + function updateWorkspaceRuntime(workspace: WorkspaceRecord, details: Record) { + const current = input.repositories.workspaceRuntimeState.getByWorkspaceId(workspace.id); + input.repositories.workspaceRuntimeState.upsert({ + backendKind: "remote_openwork", + health: { + ...(current?.health ?? {}), + ...details, + }, + lastError: null, + lastSessionRefreshAt: current?.lastSessionRefreshAt ?? null, + lastSyncAt: new Date().toISOString(), + workspaceId: workspace.id, + }); + } + + function markMissingWorkspace(workspace: WorkspaceRecord) { + input.repositories.workspaces.upsert({ + ...workspace, + notes: { + ...(workspace.notes ?? {}), + sync: { + missing: true, + recordedAt: new Date().toISOString(), + }, + }, + status: "attention", + }); + const current = input.repositories.workspaceRuntimeState.getByWorkspaceId(workspace.id); + input.repositories.workspaceRuntimeState.upsert({ + backendKind: "remote_openwork", + health: current?.health ?? null, + lastError: { + code: "not_found", + message: "Remote workspace was not returned during the latest sync.", + recordedAt: new Date().toISOString(), + }, + lastSessionRefreshAt: current?.lastSessionRefreshAt ?? null, + lastSyncAt: new Date().toISOString(), + workspaceId: workspace.id, + }); + } + + function syncRemoteWorkspaceRecords(server: ServerRecord, discovered: RemoteWorkspaceSnapshot[], hints?: { directory?: string | null; workspaceId?: string | null }) { + const existing = input.repositories.workspaces.listByServerId(server.id, { includeHidden: true }).filter((workspace) => workspace.kind === "remote"); + const seenWorkspaceIds = new Set(); + const synced: WorkspaceRecord[] = []; + + for (const remoteWorkspace of discovered) { + const workspaceId = createRemoteWorkspaceId({ + baseUrl: server.baseUrl ?? "", + remoteType: "openwork", + remoteWorkspaceId: remoteWorkspace.remoteWorkspaceId, + }); + seenWorkspaceIds.add(workspaceId); + const previous = input.repositories.workspaces.getById(workspaceId); + const workspace = input.repositories.workspaces.upsert({ + configDir: null, + dataDir: null, + displayName: remoteWorkspace.displayName, + id: workspaceId, + isHidden: false, + kind: "remote", + notes: { + ...(previous?.notes ?? {}), + directory: remoteWorkspace.directory, + remoteType: "openwork", + sync: { + directoryHint: hints?.directory?.trim() || null, + syncedAt: new Date().toISOString(), + }, + }, + opencodeProjectId: null, + remoteWorkspaceId: remoteWorkspace.remoteWorkspaceId, + serverId: server.id, + slug: previous?.slug ?? workspaceId, + status: "ready", + }); + synced.push(workspace); + updateWorkspaceRuntime(workspace, { + remoteServerId: server.id, + remoteWorkspaceId: remoteWorkspace.remoteWorkspaceId, + }); + } + + for (const workspace of existing) { + if (!seenWorkspaceIds.has(workspace.id)) { + markMissingWorkspace(workspace); + } + } + + const requestedWorkspaceId = hints?.workspaceId?.trim(); + const requestedDirectory = hints?.directory?.trim(); + const selected = synced.find((workspace) => workspace.remoteWorkspaceId === requestedWorkspaceId) + ?? synced.find((workspace) => typeof workspace.notes?.directory === "string" && requestedDirectory && workspace.notes.directory === requestedDirectory) + ?? synced[0] + ?? null; + + return { + selectedWorkspaceId: selected?.id ?? null, + workspaces: synced, + }; + } + + return { + async connect(inputValue: { + baseUrl: string; + directory?: string | null; + hostToken?: string | null; + label?: string | null; + token?: string | null; + workspaceId?: string | null; + }) { + const server = buildServerRecord(inputValue); + const discovered = await fetchRemoteWorkspaces(server); + if (discovered.length === 0) { + throw new HTTPException(404, { message: "Remote OpenWork server did not return any visible workspaces." }); + } + const result = syncRemoteWorkspaceRecords(server, discovered, { + directory: inputValue.directory, + workspaceId: inputValue.workspaceId, + }); + return { + selectedWorkspaceId: result.selectedWorkspaceId, + server, + workspaces: result.workspaces, + }; + }, + + async sync(serverId: string, hints?: { directory?: string | null; workspaceId?: string | null }) { + const server = input.repositories.servers.getById(serverId); + if (!server || server.kind !== "remote") { + throw new HTTPException(404, { message: `Remote server not found: ${serverId}` }); + } + const discovered = await fetchRemoteWorkspaces(server); + const result = syncRemoteWorkspaceRecords(server, discovered, hints); + input.repositories.servers.upsert({ + ...server, + lastSeenAt: new Date().toISOString(), + }); + return { + selectedWorkspaceId: result.selectedWorkspaceId, + server: input.repositories.servers.getById(server.id)!, + workspaces: result.workspaces, + }; + }, + }; +} diff --git a/apps/server-v2/src/services/router-product-service.ts b/apps/server-v2/src/services/router-product-service.ts new file mode 100644 index 00000000..01328d69 --- /dev/null +++ b/apps/server-v2/src/services/router-product-service.ts @@ -0,0 +1,408 @@ +import { createHash, randomUUID } from "node:crypto"; +import { HTTPException } from "hono/http-exception"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { JsonObject, RouterBindingRecord, RouterIdentityRecord } from "../database/types.js"; +import type { RuntimeService } from "./runtime-service.js"; +import { RouteError } from "../http.js"; + +function asObject(value: unknown): JsonObject { + return value && typeof value === "object" && !Array.isArray(value) ? { ...(value as JsonObject) } : {}; +} + +function normalizeString(value: unknown) { + return typeof value === "string" ? value.trim() : ""; +} + +function nowIso() { + return new Date().toISOString(); +} + +async function fetchTelegramBotInfo(token: string) { + const trimmed = token.trim(); + if (!trimmed) { + return null as { id: number; name?: string; username?: string } | null; + } + try { + const response = await fetch(`https://api.telegram.org/bot${trimmed}/getMe`, { + headers: { Accept: "application/json" }, + }); + if (!response.ok) { + return null; + } + const json = await response.json().catch(() => null) as Record | null; + const result = asObject(json?.result); + const id = Number(result.id); + if (!Number.isFinite(id)) { + return null; + } + return { + id, + name: typeof result.first_name === "string" ? result.first_name : undefined, + username: typeof result.username === "string" ? result.username : undefined, + }; + } catch { + return null; + } +} + +function createPairingCode() { + return Math.random().toString(36).slice(2, 8).toUpperCase(); +} + +function pairingCodeHash(value: string) { + return createHash("sha256").update(value).digest("hex"); +} + +export type RouterProductService = ReturnType; + +export function createRouterProductService(input: { + repositories: ServerRepositories; + runtime: RuntimeService; + serverId: string; +}) { + function getIdentityOrThrow(identityId: string) { + const identity = input.repositories.routerIdentities.getById(identityId); + if (!identity || identity.serverId !== input.serverId) { + throw new HTTPException(404, { message: `Router identity not found: ${identityId}` }); + } + return identity; + } + + function getBindingOrThrow(bindingId: string) { + const binding = input.repositories.routerBindings.getById(bindingId); + if (!binding || binding.serverId !== input.serverId) { + throw new HTTPException(404, { message: `Router binding not found: ${bindingId}` }); + } + return binding; + } + + function listIdentities(kind?: "slack" | "telegram") { + return input.repositories.routerIdentities.listByServer(input.serverId).filter((identity) => !kind || identity.kind === kind); + } + + function listBindings(filters?: { channel?: string; identityId?: string }) { + const identitiesById = new Map(listIdentities().map((identity) => [identity.id, identity] as const)); + return input.repositories.routerBindings.listByServer(input.serverId) + .filter((binding) => { + const identity = identitiesById.get(binding.routerIdentityId); + if (!identity) { + return false; + } + if (filters?.identityId?.trim() && binding.routerIdentityId !== filters.identityId.trim()) { + return false; + } + if (filters?.channel?.trim() && identity.kind !== filters.channel.trim()) { + return false; + } + return true; + }) + .map((binding) => ({ + channel: identitiesById.get(binding.routerIdentityId)?.kind ?? "unknown", + directory: normalizeString(asObject(binding.config).directory) || normalizeString(asObject(binding.config).workspacePath), + identityId: binding.routerIdentityId, + peerId: binding.bindingKey, + updatedAt: Date.parse(binding.updatedAt) || undefined, + })); + } + + async function apply() { + const health = await input.runtime.applyRouterConfig(); + return { + applied: health.status === "running" || health.status === "disabled", + applyError: health.status === "error" ? health.lastError ?? "Router apply failed." : undefined, + applyStatus: health.status === "error" ? 502 : undefined, + health, + ok: true, + }; + } + + function buildHealthSnapshot() { + const runtimeHealth = input.runtime.getRouterHealth(); + const telegram = listIdentities("telegram").filter((identity) => identity.isEnabled); + const slack = listIdentities("slack").filter((identity) => identity.isEnabled); + return { + config: { + groupsEnabled: false, + }, + channels: { + slack: slack.length > 0, + telegram: telegram.length > 0, + whatsapp: false, + }, + ok: runtimeHealth.status === "running" || runtimeHealth.status === "disabled", + opencode: { + healthy: runtimeHealth.status === "running", + url: runtimeHealth.baseUrl ?? runtimeHealth.healthUrl ?? "", + version: runtimeHealth.version ?? undefined, + }, + }; + } + + function buildIdentityItem(identity: RouterIdentityRecord) { + const config = asObject(identity.config); + return { + access: typeof config.access === "string" && (config.access === "private" || config.access === "public") ? config.access : undefined, + enabled: identity.isEnabled, + id: identity.id, + pairingRequired: config.access === "private" || undefined, + running: input.runtime.getRouterHealth().status === "running", + }; + } + + function resolveIdentityForChannel(channel: "slack" | "telegram", identityId?: string) { + if (identityId?.trim()) { + const identity = getIdentityOrThrow(identityId.trim()); + if (identity.kind !== channel) { + throw new RouteError(400, "invalid_request", `Identity ${identityId} is not a ${channel} identity.`); + } + return identity; + } + const fallback = listIdentities(channel).find((identity) => identity.isEnabled) ?? listIdentities(channel)[0] ?? null; + if (!fallback) { + throw new RouteError(400, "invalid_request", `No ${channel} identity is configured.`); + } + return fallback; + } + + async function proxyRouter(pathname: string, init?: { body?: unknown; method?: string }) { + const health = input.runtime.getRouterHealth(); + if (!health.baseUrl || health.status !== "running") { + throw new RouteError(503, "service_unavailable", "Router is not running."); + } + const response = await fetch(`${health.baseUrl.replace(/\/+$/, "")}${pathname}`, { + body: init?.body === undefined ? undefined : JSON.stringify(init.body), + headers: { Accept: "application/json", "Content-Type": "application/json" }, + method: init?.method ?? "GET", + }); + const text = await response.text(); + const json = text ? JSON.parse(text) : null; + if (!response.ok) { + throw new RouteError(response.status, "bad_gateway", typeof json?.error === "string" ? json.error : `Router request failed (${response.status}).`); + } + return json as T; + } + + return { + async apply() { + return apply(); + }, + + async deleteSlackIdentity(identityId: string) { + const identity = getIdentityOrThrow(identityId); + if (identity.kind !== "slack") { + throw new RouteError(400, "invalid_request", "Router identity is not a Slack identity."); + } + input.repositories.routerIdentities.deleteById(identityId); + const applied = await apply(); + return { + ...applied, + slack: { + deleted: true, + id: identityId, + }, + }; + }, + + async deleteTelegramIdentity(identityId: string) { + const identity = getIdentityOrThrow(identityId); + if (identity.kind !== "telegram") { + throw new RouteError(400, "invalid_request", "Router identity is not a Telegram identity."); + } + input.repositories.routerIdentities.deleteById(identityId); + const applied = await apply(); + return { + ...applied, + telegram: { + deleted: true, + id: identityId, + }, + }; + }, + + getHealth() { + return buildHealthSnapshot(); + }, + + async getTelegramInfo() { + const identity = listIdentities("telegram")[0] ?? null; + if (!identity) { + return { bot: null, configured: false, enabled: false, ok: true }; + } + const token = normalizeString(asObject(identity.auth).token) || normalizeString(asObject(identity.config).token); + return { + bot: await fetchTelegramBotInfo(token), + configured: Boolean(token), + enabled: identity.isEnabled, + ok: true, + }; + }, + + listBindings(filters?: { channel?: string; identityId?: string }) { + return { items: listBindings(filters), ok: true }; + }, + + listRouterBindings() { + return input.repositories.routerBindings.listByServer(input.serverId); + }, + + listRouterIdentities() { + return input.repositories.routerIdentities.listByServer(input.serverId); + }, + + listSlackIdentities() { + return { items: listIdentities("slack").map(buildIdentityItem), ok: true }; + }, + + listTelegramIdentities() { + return { items: listIdentities("telegram").map(buildIdentityItem), ok: true }; + }, + + async sendMessage(inputValue: { + autoBind?: boolean; + channel: "slack" | "telegram"; + directory?: string; + identityId?: string; + peerId?: string; + text: string; + }) { + const payload = { + ...(inputValue.autoBind ? { autoBind: true } : {}), + channel: inputValue.channel, + ...(normalizeString(inputValue.directory) ? { directory: normalizeString(inputValue.directory) } : {}), + ...(normalizeString(inputValue.identityId) ? { identityId: normalizeString(inputValue.identityId) } : {}), + ...(normalizeString(inputValue.peerId) ? { peerId: normalizeString(inputValue.peerId) } : {}), + text: inputValue.text, + }; + return await proxyRouter>("/send", { body: payload, method: "POST" }); + }, + + async setBinding(inputValue: { channel: "slack" | "telegram"; directory: string; identityId?: string; peerId: string }) { + const identity = resolveIdentityForChannel(inputValue.channel, inputValue.identityId); + const existing = input.repositories.routerBindings.listByServer(input.serverId) + .find((binding) => binding.routerIdentityId === identity.id && binding.bindingKey === inputValue.peerId) ?? null; + input.repositories.routerBindings.upsert({ + config: { directory: inputValue.directory }, + bindingKey: inputValue.peerId, + id: existing?.id ?? `binding_${randomUUID()}`, + isEnabled: true, + routerIdentityId: identity.id, + serverId: input.serverId, + }); + await apply(); + return { ok: true }; + }, + + async setSlackTokens(botToken: string, appToken: string) { + return this.upsertSlackIdentity({ appToken, botToken, enabled: true, id: "default" }); + }, + + async setTelegramEnabled(enabled: boolean, options?: { clearToken?: boolean }) { + const identity = listIdentities("telegram")[0] ?? null; + if (!identity) { + throw new RouteError(404, "not_found", "Telegram identity is not configured."); + } + input.repositories.routerIdentities.upsert({ + ...identity, + auth: options?.clearToken ? { ...identity.auth, token: null } : identity.auth, + isEnabled: enabled, + }); + const applied = await apply(); + return { + ...applied, + enabled, + }; + }, + + async setTelegramToken(token: string) { + return this.upsertTelegramIdentity({ access: "public", enabled: true, id: "default", token }); + }, + + async upsertRouterBinding(payload: Omit & { createdAt?: string; updatedAt?: string }) { + const binding = input.repositories.routerBindings.upsert(payload); + await apply(); + return binding; + }, + + async upsertRouterIdentity(payload: Omit & { createdAt?: string; updatedAt?: string }) { + const identity = input.repositories.routerIdentities.upsert(payload); + await apply(); + return identity; + }, + + async updateBinding(bindingId: string, payload: { config?: JsonObject; isEnabled?: boolean; routerIdentityId?: string }) { + const binding = getBindingOrThrow(bindingId); + return await this.upsertRouterBinding({ + ...binding, + config: payload.config ?? binding.config, + isEnabled: payload.isEnabled ?? binding.isEnabled, + routerIdentityId: payload.routerIdentityId ?? binding.routerIdentityId, + }); + }, + + async updateIdentity(identityId: string, payload: { auth?: JsonObject | null; config?: JsonObject; displayName?: string; isEnabled?: boolean }) { + const identity = getIdentityOrThrow(identityId); + return await this.upsertRouterIdentity({ + ...identity, + auth: payload.auth ?? identity.auth, + config: payload.config ?? identity.config, + displayName: payload.displayName ?? identity.displayName, + isEnabled: payload.isEnabled ?? identity.isEnabled, + }); + }, + + async upsertSlackIdentity(payload: { appToken: string; botToken: string; enabled?: boolean; id?: string }) { + const id = normalizeString(payload.id) || `router_slack_${randomUUID()}`; + input.repositories.routerIdentities.upsert({ + auth: { + appToken: payload.appToken.trim(), + botToken: payload.botToken.trim(), + }, + config: {}, + displayName: id, + id, + isEnabled: payload.enabled !== false, + kind: "slack", + serverId: input.serverId, + }); + const applied = await apply(); + return { + ...applied, + slack: { + enabled: payload.enabled !== false, + id, + }, + }; + }, + + async upsertTelegramIdentity(payload: { access?: "private" | "public"; enabled?: boolean; id?: string; token: string }) { + const id = normalizeString(payload.id) || `router_telegram_${randomUUID()}`; + const pairingCode = payload.access === "private" ? createPairingCode() : null; + input.repositories.routerIdentities.upsert({ + auth: { + token: payload.token.trim(), + }, + config: { + ...(payload.access ? { access: payload.access } : {}), + ...(pairingCode ? { pairingCodeHash: pairingCodeHash(pairingCode) } : {}), + }, + displayName: id, + id, + isEnabled: payload.enabled !== false, + kind: "telegram", + serverId: input.serverId, + }); + const applied = await apply(); + const bot = await fetchTelegramBotInfo(payload.token); + return { + ...applied, + telegram: { + access: payload.access, + ...(bot ? { bot } : {}), + enabled: payload.enabled !== false, + id, + ...(pairingCode ? { pairingCode, pairingRequired: true } : {}), + }, + }; + }, + }; +} diff --git a/apps/server-v2/src/services/runtime-service.test.ts b/apps/server-v2/src/services/runtime-service.test.ts new file mode 100644 index 00000000..a56f2d2d --- /dev/null +++ b/apps/server-v2/src/services/runtime-service.test.ts @@ -0,0 +1,307 @@ +import { afterEach, expect, test } from "bun:test"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { createHash } from "node:crypto"; +import { fileURLToPath } from "node:url"; +import { createServerPersistence } from "../database/persistence.js"; +import { resolveRuntimeTarget, type RuntimeManifest } from "../runtime/manifest.js"; +import { createRuntimeService } from "./runtime-service.js"; + +const cleanupPaths: string[] = []; + +afterEach(async () => { + while (cleanupPaths.length > 0) { + const target = cleanupPaths.pop(); + if (!target) { + continue; + } + fs.rmSync(target, { force: true, recursive: true }); + } +}); + +function makeTempDir(name: string) { + const directory = fs.mkdtempSync(path.join(os.tmpdir(), `${name}-`)); + cleanupPaths.push(directory); + return directory; +} + +async function sha256(filePath: string) { + const contents = await Bun.file(filePath).arrayBuffer(); + return createHash("sha256").update(Buffer.from(contents)).digest("hex"); +} + +async function createFakeBinary(kind: "opencode" | "router", mode: string, exitAfterMs?: number) { + const wrapperDir = makeTempDir(`openwork-server-v2-${kind}`); + const binaryPath = path.join(wrapperDir, kind === "opencode" ? "opencode" : "opencode-router"); + const fixturePath = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..", "test-fixtures", "fake-runtime.ts"); + const script = [ + "#!/bin/sh", + `export FAKE_RUNTIME_KIND=${kind}`, + `export FAKE_RUNTIME_MODE=${mode}`, + ...(exitAfterMs ? [`export FAKE_RUNTIME_EXIT_AFTER_MS=${exitAfterMs}`] : []), + `exec ${JSON.stringify(process.execPath)} ${JSON.stringify(fixturePath)} \"$@\"`, + "", + ].join("\n"); + fs.writeFileSync(binaryPath, script, "utf8"); + fs.chmodSync(binaryPath, 0o755); + return binaryPath; +} + +async function createFakeAssetService(opencodePath: string, routerPath: string) { + const target = resolveRuntimeTarget(); + if (!target) { + throw new Error("Unsupported test target."); + } + const opencodeExists = fs.existsSync(opencodePath); + const opencodeStats = opencodeExists ? fs.statSync(opencodePath) : { size: 0 }; + const routerStats = fs.statSync(routerPath); + const manifest: RuntimeManifest = { + files: { + opencode: { + path: path.basename(opencodePath), + sha256: opencodeExists ? await sha256(opencodePath) : "missing", + size: opencodeStats.size, + }, + "opencode-router": { + path: path.basename(routerPath), + sha256: await sha256(routerPath), + size: routerStats.size, + }, + }, + generatedAt: new Date().toISOString(), + manifestVersion: 1, + opencodeVersion: "1.2.27", + rootDir: path.dirname(opencodePath), + routerVersion: "0.11.206", + serverVersion: "0.0.0-test", + source: "development", + target, + }; + + const opencodeBinary = { + absolutePath: opencodePath, + name: "opencode" as const, + sha256: manifest.files.opencode.sha256, + size: manifest.files.opencode.size, + source: "development" as const, + stagedRoot: path.dirname(opencodePath), + target, + version: "1.2.27", + }; + const routerBinary = { + absolutePath: routerPath, + name: "opencode-router" as const, + sha256: manifest.files["opencode-router"].sha256, + size: manifest.files["opencode-router"].size, + source: "development" as const, + stagedRoot: path.dirname(routerPath), + target, + version: "0.11.206", + }; + + return { + ensureOpencodeBinary: async () => opencodeBinary, + ensureRouterBinary: async () => routerBinary, + getDevelopmentRoot: () => path.dirname(opencodePath), + getPinnedOpencodeVersion: async () => "1.2.27", + getReleaseRoot: () => path.dirname(opencodePath), + getRouterVersion: async () => "0.11.206", + getSource: () => "development" as const, + getTarget: () => target, + resolveRuntimeBundle: async () => ({ + manifest, + opencode: opencodeBinary, + router: routerBinary, + }), + }; +} + +function createPersistence() { + const workingDirectory = makeTempDir("openwork-server-v2-runtime-service"); + return createServerPersistence({ + environment: "test", + localServer: { + baseUrl: null, + hostingKind: "self_hosted", + label: "Local OpenWork Server", + }, + version: "0.0.0-test", + workingDirectory, + }); +} + +test("runtime bootstrap starts OpenCode successfully and persists health", async () => { + const persistence = createPersistence(); + const opencodePath = await createFakeBinary("opencode", "success"); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(opencodePath, routerPath); + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await runtime.bootstrap(); + + const opencode = runtime.getOpencodeHealth(); + expect(opencode.running).toBe(true); + expect(opencode.status).toBe("running"); + expect(opencode.baseUrl).toContain("http://127.0.0.1:"); + expect(persistence.repositories.serverRuntimeState.getByServerId(persistence.registry.localServerId)?.opencodeStatus).toBe("running"); + + await runtime.dispose(); + persistence.close(); +}); + +test("runtime bootstrap surfaces missing OpenCode binaries clearly", async () => { + const persistence = createPersistence(); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(path.join(os.tmpdir(), `does-not-exist-opencode-${Date.now()}`), routerPath); + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await expect(runtime.bootstrap()).rejects.toThrow("executable not found"); + expect(runtime.getOpencodeHealth().status).toBe("error"); + + await runtime.dispose(); + persistence.close(); +}); + +test("runtime bootstrap surfaces OpenCode readiness timeouts", async () => { + const persistence = createPersistence(); + const opencodePath = await createFakeBinary("opencode", "timeout"); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(opencodePath, routerPath); + process.env.OPENWORK_SERVER_V2_OPENCODE_START_TIMEOUT_MS = "300"; + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await expect(runtime.bootstrap()).rejects.toThrow("did not become ready"); + expect(runtime.getOpencodeHealth().status).toBe("error"); + delete process.env.OPENWORK_SERVER_V2_OPENCODE_START_TIMEOUT_MS; + + await runtime.dispose(); + persistence.close(); +}); + +test("runtime supervisor records post-ready OpenCode crashes", async () => { + const persistence = createPersistence(); + const opencodePath = await createFakeBinary("opencode", "success", 150); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(opencodePath, routerPath); + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await runtime.bootstrap(); + await Bun.sleep(400); + + expect(runtime.getOpencodeHealth().status).toBe("crashed"); + expect(runtime.getOpencodeHealth().lastExit?.reason).toBe("unexpected_exit"); + + await runtime.dispose(); + persistence.close(); +}); + +test("runtime supervisor starts router when enabled and persists router state", async () => { + const persistence = createPersistence(); + persistence.repositories.routerIdentities.upsert({ + auth: { token: "telegram-token" }, + config: {}, + displayName: "Telegram Bot", + id: "router_identity_telegram", + isEnabled: true, + kind: "telegram", + serverId: persistence.registry.localServerId, + }); + persistence.repositories.routerBindings.upsert({ + bindingKey: "peer-1", + config: { directory: persistence.workingDirectory.rootDir }, + id: "router_binding_one", + isEnabled: true, + routerIdentityId: "router_identity_telegram", + serverId: persistence.registry.localServerId, + }); + + const opencodePath = await createFakeBinary("opencode", "success"); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(opencodePath, routerPath); + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await runtime.bootstrap(); + + const router = runtime.getRouterHealth(); + expect(router.enablement.enabled).toBe(true); + expect(router.running).toBe(true); + expect(router.status).toBe("running"); + expect(router.materialization?.bindingCount).toBe(1); + expect(persistence.repositories.serverRuntimeState.getByServerId(persistence.registry.localServerId)?.routerStatus).toBe("running"); + + await runtime.dispose(); + persistence.close(); +}); + +test("runtime upgrade restarts managed children and records upgrade state", async () => { + const persistence = createPersistence(); + const opencodePath = await createFakeBinary("opencode", "success"); + const routerPath = await createFakeBinary("router", "success"); + const assetService = await createFakeAssetService(opencodePath, routerPath); + const runtime = createRuntimeService({ + assetService, + bootstrapPolicy: "manual", + environment: "test", + repositories: persistence.repositories, + restartPolicy: { backoffMs: 25, maxAttempts: 0, windowMs: 1000 }, + serverId: persistence.registry.localServerId, + serverVersion: "0.0.0-test", + workingDirectory: persistence.workingDirectory, + }); + + await runtime.bootstrap(); + const upgraded = await runtime.upgradeRuntime(); + + expect(upgraded.state.status).toBe("completed"); + expect(upgraded.summary.opencode.running).toBe(true); + expect(upgraded.summary.upgrade.status).toBe("completed"); + + await runtime.dispose(); + persistence.close(); +}); diff --git a/apps/server-v2/src/services/runtime-service.ts b/apps/server-v2/src/services/runtime-service.ts new file mode 100644 index 00000000..05e18077 --- /dev/null +++ b/apps/server-v2/src/services/runtime-service.ts @@ -0,0 +1,1141 @@ +import fs from "node:fs"; +import net from "node:net"; +import path from "node:path"; +import { Database } from "bun:sqlite"; +import type { LocalOpencodeHandle, LocalProcessExit } from "../adapters/opencode/local.js"; +import { LocalOpencodeStartupError, createLocalOpencode } from "../adapters/opencode/local.js"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { ServerWorkingDirectory } from "../database/working-directory.js"; +import { createBoundedOutputCollector, formatRuntimeOutput, type RuntimeOutputSnapshot } from "../runtime/output-buffer.js"; +import type { ResolvedRuntimeBinary, RuntimeManifest } from "../runtime/manifest.js"; +import { createRuntimeAssetService, type RuntimeAssetService } from "../runtime/assets.js"; + +type RuntimeBootstrapPolicy = "disabled" | "eager" | "manual"; +type RuntimeChildStatus = "crashed" | "disabled" | "error" | "restart_scheduled" | "running" | "starting" | "stopped"; + +type RuntimeRestartPolicy = { + backoffMs: number; + maxAttempts: number; + windowMs: number; +}; + +type RuntimeLastExit = LocalProcessExit & { + output: RuntimeOutputSnapshot; + reason: string; +}; + +type RouterEnablementDecision = { + enabled: boolean; + enabledBindingCount: number; + enabledIdentityCount: number; + forced: boolean; + reason: string; +}; + +type RuntimeChildState = { + asset: ResolvedRuntimeBinary | null; + baseUrl: string | null; + healthUrl: string | null; + lastError: string | null; + lastExit: RuntimeLastExit | null; + lastReadyAt: string | null; + lastStartedAt: string | null; + pid: number | null; + recentOutput: RuntimeOutputSnapshot; + running: boolean; + status: RuntimeChildStatus; + version: string | null; +}; + +type RouterMaterialization = { + bindingCount: number; + configPath: string; + dataDir: string; + dbPath: string; + identityCount: number; + logFile: string; +}; + +type ManagedProcessHandle = { + close(): void; + getOutput(): RuntimeOutputSnapshot; + proc: Bun.Subprocess<"ignore", "pipe", "pipe">; + waitForExit(): Promise; +}; + +type RuntimeUpgradeState = { + error: string | null; + finishedAt: string | null; + startedAt: string | null; + status: "completed" | "failed" | "idle" | "running"; +}; + +export type RuntimeService = { + applyRouterConfig(): Promise>; + bootstrap(): Promise; + dispose(): Promise; + getBootstrapPolicy(): RuntimeBootstrapPolicy; + getOpencodeHealth(): { + baseUrl: string | null; + binaryPath: string | null; + diagnostics: RuntimeOutputSnapshot; + lastError: string | null; + lastExit: RuntimeLastExit | null; + lastReadyAt: string | null; + lastStartedAt: string | null; + manifest: RuntimeManifest | null; + pid: number | null; + running: boolean; + source: "development" | "release"; + status: RuntimeChildStatus; + version: string | null; + }; + getRouterHealth(): { + baseUrl: string | null; + binaryPath: string | null; + diagnostics: RuntimeOutputSnapshot; + enablement: RouterEnablementDecision; + healthUrl: string | null; + lastError: string | null; + lastExit: RuntimeLastExit | null; + lastReadyAt: string | null; + lastStartedAt: string | null; + manifest: RuntimeManifest | null; + materialization: RouterMaterialization | null; + pid: number | null; + running: boolean; + source: "development" | "release"; + status: RuntimeChildStatus; + version: string | null; + }; + getRuntimeSummary(): { + bootstrapPolicy: RuntimeBootstrapPolicy; + manifest: RuntimeManifest | null; + opencode: ReturnType; + restartPolicy: RuntimeRestartPolicy; + router: ReturnType; + upgrade: RuntimeUpgradeState; + source: "development" | "release"; + target: ReturnType; + }; + getRuntimeVersions(): { + active: { + opencodeVersion: string | null; + routerVersion: string | null; + serverVersion: string; + }; + manifest: RuntimeManifest | null; + pinned: { + opencodeVersion: string | null; + routerVersion: string | null; + serverVersion: string; + }; + target: ReturnType; + }; + getStateForPersistence(): ReturnType; + upgradeRuntime(): Promise<{ state: RuntimeUpgradeState; summary: ReturnType }>; +}; + +type CreateRuntimeServiceOptions = { + assetService?: RuntimeAssetService; + bootstrapPolicy?: RuntimeBootstrapPolicy; + environment: string; + repositories: ServerRepositories; + restartPolicy?: Partial; + serverId: string; + serverVersion: string; + workingDirectory: ServerWorkingDirectory; +}; + +function isTruthy(value: string | undefined) { + if (!value) { + return false; + } + + return ["1", "true", "yes", "on"].includes(value.trim().toLowerCase()); +} + +function emptyOutput(): RuntimeOutputSnapshot { + return { + combined: [], + stderr: [], + stdout: [], + totalLines: 0, + truncated: false, + }; +} + +function nowIso() { + return new Date().toISOString(); +} + +function resolveBootstrapPolicy(environment: string, explicit?: RuntimeBootstrapPolicy): RuntimeBootstrapPolicy { + if (explicit) { + return explicit; + } + + const fromEnv = process.env.OPENWORK_SERVER_V2_RUNTIME_BOOTSTRAP?.trim().toLowerCase(); + if (fromEnv === "disabled" || fromEnv === "manual" || fromEnv === "eager") { + return fromEnv; + } + + if (environment === "test") { + return "disabled"; + } + + return "eager"; +} + +function resolveRestartPolicy(overrides?: Partial): RuntimeRestartPolicy { + const maxAttempts = Number.parseInt(process.env.OPENWORK_SERVER_V2_RUNTIME_RESTART_MAX_ATTEMPTS ?? "2", 10); + const backoffMs = Number.parseInt(process.env.OPENWORK_SERVER_V2_RUNTIME_RESTART_BACKOFF_MS ?? "750", 10); + const windowMs = Number.parseInt(process.env.OPENWORK_SERVER_V2_RUNTIME_RESTART_WINDOW_MS ?? "30000", 10); + + return { + backoffMs: overrides?.backoffMs ?? (Number.isFinite(backoffMs) ? backoffMs : 750), + maxAttempts: overrides?.maxAttempts ?? (Number.isFinite(maxAttempts) ? maxAttempts : 2), + windowMs: overrides?.windowMs ?? (Number.isFinite(windowMs) ? windowMs : 30_000), + }; +} + +function pickLatestExit(opencode: RuntimeChildState, router: RuntimeChildState) { + const exits = [ + opencode.lastExit ? { component: "opencode" as const, ...opencode.lastExit } : null, + router.lastExit ? { component: "router" as const, ...router.lastExit } : null, + ].filter(Boolean) as Array; + exits.sort((left, right) => right.at.localeCompare(left.at)); + return exits[0] ?? null; +} + +async function getFreePort() { + return new Promise((resolve, reject) => { + const server = net.createServer(); + server.once("error", reject); + server.listen(0, "127.0.0.1", () => { + const address = server.address(); + if (!address || typeof address === "string") { + reject(new Error("Failed to allocate a free loopback port.")); + return; + } + + server.close((error) => { + if (error) { + reject(error); + return; + } + + resolve(address.port); + }); + }); + }); +} + +async function waitForOpencodeHealthy(handle: LocalOpencodeHandle, timeoutMs = 5_000, pollMs = 200) { + const startedAt = Date.now(); + let lastError = "OpenCode did not report healthy status yet."; + + while (Date.now() - startedAt < timeoutMs) { + try { + const health = await handle.client.global.health(); + const data = (health as { healthy?: boolean }).healthy; + if (data) { + return; + } + lastError = "OpenCode reported unhealthy state."; + } catch (error) { + lastError = error instanceof Error ? error.message : String(error); + } + + await Bun.sleep(pollMs); + } + + throw new Error(lastError); +} + +async function waitForHttpOk(url: string, timeoutMs = 10_000, pollMs = 250) { + const startedAt = Date.now(); + let lastError = `Timed out waiting for ${url}`; + + while (Date.now() - startedAt < timeoutMs) { + try { + const response = await fetch(url, { signal: AbortSignal.timeout(2_000) }); + if (response.ok) { + return; + } + lastError = `HTTP ${response.status} from ${url}`; + } catch (error) { + lastError = error instanceof Error ? error.message : String(error); + } + + await Bun.sleep(pollMs); + } + + throw new Error(lastError); +} + +async function spawnManagedBinary( + command: string[], + options: { + cwd: string; + env: Record; + timeoutMs: number; + readinessUrl: string; + }, +) { + const output = createBoundedOutputCollector({ maxBytes: 16_384, maxLines: 200 }); + const proc = Bun.spawn(command, { + cwd: options.cwd, + env: { + ...process.env, + ...options.env, + }, + stderr: "pipe", + stdin: "ignore", + stdout: "pipe", + }); + + const waitForExit = async (): Promise => { + const code = await proc.exited; + return { + at: nowIso(), + code, + signal: "signalCode" in proc && typeof proc.signalCode === "string" ? proc.signalCode : null, + }; + }; + + const pump = async (streamName: "stdout" | "stderr", stream: ReadableStream | null) => { + if (!stream) { + return; + } + const reader = stream.getReader(); + const decoder = new TextDecoder(); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + output.finish(streamName); + return; + } + output.pushChunk(streamName, decoder.decode(value, { stream: true })); + } + } finally { + output.finish(streamName); + reader.releaseLock(); + } + }; + + void pump("stdout", proc.stdout); + void pump("stderr", proc.stderr); + + try { + await waitForHttpOk(options.readinessUrl, options.timeoutMs, 200); + } catch (error) { + proc.kill(); + const exit = await waitForExit().catch(() => ({ at: nowIso(), code: null, signal: null })); + const snapshot = output.snapshot(); + const detail = error instanceof Error ? error.message : String(error); + throw new Error( + `Router failed to become ready at ${options.readinessUrl}: ${detail}. Last exit: ${exit.code ?? "null"}.\nCollected output:\n${formatRuntimeOutput(snapshot)}`, + ); + } + + const handle: ManagedProcessHandle = { + close() { + proc.kill(); + }, + getOutput() { + return output.snapshot(); + }, + proc, + waitForExit, + }; + + return handle; +} + +function ensureRouterStoreSchema(database: Database) { + database.exec("PRAGMA journal_mode = WAL"); + database.exec(` + CREATE TABLE IF NOT EXISTS sessions ( + channel TEXT NOT NULL, + identity_id TEXT NOT NULL, + peer_id TEXT NOT NULL, + session_id TEXT NOT NULL, + directory TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + PRIMARY KEY (channel, identity_id, peer_id) + ); + CREATE TABLE IF NOT EXISTS allowlist ( + channel TEXT NOT NULL, + peer_id TEXT NOT NULL, + created_at INTEGER NOT NULL, + PRIMARY KEY (channel, peer_id) + ); + CREATE TABLE IF NOT EXISTS bindings ( + channel TEXT NOT NULL, + identity_id TEXT NOT NULL, + peer_id TEXT NOT NULL, + directory TEXT NOT NULL, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + PRIMARY KEY (channel, identity_id, peer_id) + ); + CREATE TABLE IF NOT EXISTS settings ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); + `); +} + +function asRecord(value: unknown) { + return value && typeof value === "object" && !Array.isArray(value) ? (value as Record) : {}; +} + +function resolveDirectoryFromBindingConfig(config: unknown) { + const record = asRecord(config); + for (const key of ["directory", "dir", "path", "workspacePath"]) { + const value = record[key]; + if (typeof value === "string" && value.trim()) { + return value.trim(); + } + } + return null; +} + +function createInitialChildState(status: RuntimeChildStatus, version: string | null): RuntimeChildState { + return { + asset: null, + baseUrl: null, + healthUrl: null, + lastError: null, + lastExit: null, + lastReadyAt: null, + lastStartedAt: null, + pid: null, + recentOutput: emptyOutput(), + running: false, + status, + version, + }; +} + +export function createRuntimeService(options: CreateRuntimeServiceOptions): RuntimeService { + const bootstrapPolicy = resolveBootstrapPolicy(options.environment, options.bootstrapPolicy); + const restartPolicy = resolveRestartPolicy(options.restartPolicy); + const assetService = options.assetService ?? createRuntimeAssetService({ + environment: options.environment, + serverVersion: options.serverVersion, + workingDirectory: options.workingDirectory, + }); + const persisted = options.repositories.serverRuntimeState.getByServerId(options.serverId); + const startupDiagnostics = asRecord(asRecord(persisted?.health).startup); + const opencodeState = createInitialChildState(bootstrapPolicy === "disabled" ? "disabled" : "stopped", persisted?.opencodeVersion ?? null); + const routerState = createInitialChildState(persisted?.routerStatus === "running" ? "stopped" : "disabled", persisted?.routerVersion ?? null); + + let runtimeManifest: RuntimeManifest | null = null; + let routerMaterialization: RouterMaterialization | null = null; + let bootstrapPromise: Promise | null = null; + let shuttingDown = false; + let opencodeHandle: LocalOpencodeHandle | null = null; + let routerHandle: ManagedProcessHandle | null = null; + let opencodeStopping = false; + let routerStopping = false; + let routerEnablement: RouterEnablementDecision = { + enabled: false, + enabledBindingCount: 0, + enabledIdentityCount: 0, + forced: false, + reason: "router_not_evaluated", + }; + let upgradeState: RuntimeUpgradeState = { + error: null, + finishedAt: null, + startedAt: null, + status: "idle", + }; + + const restartHistory = { + opencode: [] as number[], + router: [] as number[], + }; + const restartTimers = { + opencode: null as ReturnType | null, + router: null as ReturnType | null, + }; + + const persistState = () => { + const health = { + startup: startupDiagnostics, + runtime: { + bootstrapPolicy, + manifest: runtimeManifest, + opencode: { + ...opencodeState, + binaryPath: opencodeState.asset?.absolutePath ?? null, + }, + restartPolicy, + router: { + ...routerState, + binaryPath: routerState.asset?.absolutePath ?? null, + enablement: routerEnablement, + materialization: routerMaterialization, + }, + target: assetService.getTarget(), + upgrade: upgradeState, + }, + }; + const latestExit = pickLatestExit(opencodeState, routerState); + + options.repositories.serverRuntimeState.upsert({ + health, + lastExit: latestExit, + lastStartedAt: [opencodeState.lastStartedAt, routerState.lastStartedAt].filter(Boolean).sort().reverse()[0] ?? null, + opencodeBaseUrl: opencodeState.baseUrl, + opencodeStatus: opencodeState.status, + opencodeVersion: opencodeState.version ?? runtimeManifest?.opencodeVersion ?? null, + restartPolicy: { + bootstrapPolicy, + ...restartPolicy, + }, + routerStatus: routerState.status, + routerVersion: routerState.version ?? runtimeManifest?.routerVersion ?? null, + runtimeVersion: options.serverVersion, + serverId: options.serverId, + }); + }; + + const withRestartRecord = (component: "opencode" | "router") => { + const now = Date.now(); + const withinWindow = restartHistory[component].filter((value) => now - value <= restartPolicy.windowMs); + restartHistory[component] = withinWindow; + if (withinWindow.length >= restartPolicy.maxAttempts) { + return false; + } + restartHistory[component].push(now); + return true; + }; + + const clearRestartTimer = (component: "opencode" | "router") => { + const timer = restartTimers[component]; + if (timer) { + clearTimeout(timer); + restartTimers[component] = null; + } + }; + + const resolveRouterEnablement = () => { + const identities = options.repositories.routerIdentities.listByServer(options.serverId); + const bindings = options.repositories.routerBindings.listByServer(options.serverId); + const enabledIdentityCount = identities.filter((identity) => identity.isEnabled).length; + const enabledBindingCount = bindings.filter((binding) => binding.isEnabled).length; + const forced = isTruthy(process.env.OPENWORK_SERVER_V2_ROUTER_FORCE) || isTruthy(process.env.OPENWORK_SERVER_V2_ROUTER_REQUIRED); + + if (forced) { + return { + enabled: true, + enabledBindingCount, + enabledIdentityCount, + forced: true, + reason: "router_forced_by_environment", + } satisfies RouterEnablementDecision; + } + + if (enabledIdentityCount > 0) { + return { + enabled: true, + enabledBindingCount, + enabledIdentityCount, + forced: false, + reason: "enabled_router_identities_present", + } satisfies RouterEnablementDecision; + } + + if (enabledBindingCount > 0) { + return { + enabled: true, + enabledBindingCount, + enabledIdentityCount, + forced: false, + reason: "enabled_router_bindings_present", + } satisfies RouterEnablementDecision; + } + + return { + enabled: false, + enabledBindingCount, + enabledIdentityCount, + forced: false, + reason: "no_enabled_router_identities_or_bindings", + } satisfies RouterEnablementDecision; + }; + + const materializeRouterConfig = () => { + const identities = options.repositories.routerIdentities.listByServer(options.serverId).filter((identity) => identity.isEnabled); + const bindings = options.repositories.routerBindings.listByServer(options.serverId).filter((binding) => binding.isEnabled); + const dataDir = path.join(options.workingDirectory.runtimeDir, "router"); + const configPath = path.join(dataDir, "opencode-router.json"); + const dbPath = path.join(dataDir, "opencode-router.db"); + const logFile = path.join(dataDir, "logs", "opencode-router.log"); + fs.mkdirSync(dataDir, { recursive: true }); + fs.mkdirSync(path.dirname(logFile), { recursive: true }); + const identityKindById = new Map(identities.map((identity) => [identity.id, identity.kind])); + + const telegramBots = identities.filter((identity) => identity.kind === "telegram").flatMap((identity) => { + const auth = asRecord(identity.auth); + const config = asRecord(identity.config); + const token = typeof auth.token === "string" ? auth.token.trim() : typeof config.token === "string" ? config.token.trim() : ""; + if (!token) { + return []; + } + return [{ + access: typeof config.access === "string" ? config.access : "public", + directory: typeof config.directory === "string" ? config.directory.trim() : undefined, + enabled: true, + id: identity.id, + pairingCodeHash: typeof config.pairingCodeHash === "string" ? config.pairingCodeHash.trim() : undefined, + token, + }]; + }); + const slackApps = identities.filter((identity) => identity.kind === "slack").flatMap((identity) => { + const auth = asRecord(identity.auth); + const config = asRecord(identity.config); + const botToken = typeof auth.botToken === "string" ? auth.botToken.trim() : typeof config.botToken === "string" ? config.botToken.trim() : ""; + const appToken = typeof auth.appToken === "string" ? auth.appToken.trim() : typeof config.appToken === "string" ? config.appToken.trim() : ""; + if (!botToken || !appToken) { + return []; + } + return [{ + appToken, + botToken, + directory: typeof config.directory === "string" ? config.directory.trim() : undefined, + enabled: true, + id: identity.id, + }]; + }); + + const configPayload = { + channels: { + slack: { + apps: slackApps, + enabled: slackApps.length > 0, + }, + telegram: { + bots: telegramBots, + enabled: telegramBots.length > 0, + }, + }, + groupsEnabled: false, + opencodeDirectory: options.workingDirectory.rootDir, + opencodeUrl: opencodeState.baseUrl ?? undefined, + version: 1, + }; + + fs.writeFileSync(configPath, `${JSON.stringify(configPayload, null, 2)}\n`, "utf8"); + + const database = new Database(dbPath, { create: true }); + try { + ensureRouterStoreSchema(database); + database.query("DELETE FROM bindings").run(); + const insert = database.query( + `INSERT OR REPLACE INTO bindings (channel, identity_id, peer_id, directory, created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6)`, + ); + const now = Date.now(); + let writtenBindings = 0; + for (const binding of bindings) { + const channel = identityKindById.get(binding.routerIdentityId); + const directory = resolveDirectoryFromBindingConfig(binding.config); + if ((channel !== "telegram" && channel !== "slack") || !directory) { + continue; + } + insert.run(channel, binding.routerIdentityId, binding.bindingKey, directory, now, now); + writtenBindings += 1; + } + + routerMaterialization = { + bindingCount: writtenBindings, + configPath, + dataDir, + dbPath, + identityCount: telegramBots.length + slackApps.length, + logFile, + }; + } finally { + database.close(false); + } + }; + + const updateRecentOutput = () => { + opencodeState.recentOutput = opencodeHandle?.server.getOutput() ?? opencodeState.recentOutput; + routerState.recentOutput = routerHandle?.getOutput() ?? routerState.recentOutput; + }; + + const stopRouter = async () => { + clearRestartTimer("router"); + if (!routerHandle) { + routerState.running = false; + routerState.pid = null; + if (routerState.status !== "disabled") { + routerState.status = "stopped"; + } + persistState(); + return; + } + + routerStopping = true; + const handle = routerHandle; + routerHandle = null; + handle.close(); + await handle.waitForExit().catch(() => null); + routerState.running = false; + routerState.pid = null; + routerState.recentOutput = handle.getOutput(); + routerState.status = routerEnablement.enabled ? "stopped" : "disabled"; + persistState(); + routerStopping = false; + }; + + const stopOpencode = async () => { + clearRestartTimer("opencode"); + if (!opencodeHandle) { + opencodeState.running = false; + opencodeState.pid = null; + opencodeState.status = bootstrapPolicy === "disabled" ? "disabled" : "stopped"; + persistState(); + return; + } + + opencodeStopping = true; + const handle = opencodeHandle; + opencodeHandle = null; + handle.server.close(); + await handle.server.waitForExit().catch(() => null); + opencodeState.running = false; + opencodeState.pid = null; + opencodeState.recentOutput = handle.server.getOutput(); + opencodeState.status = bootstrapPolicy === "disabled" ? "disabled" : "stopped"; + persistState(); + opencodeStopping = false; + }; + + const startRouter = async () => { + if (!routerEnablement.enabled) { + await stopRouter(); + routerState.status = "disabled"; + routerState.lastError = null; + persistState(); + return; + } + + if (routerHandle && routerState.running) { + return; + } + + if (!runtimeManifest) { + return; + } + + if (!opencodeState.running || !opencodeState.baseUrl) { + routerState.status = "error"; + routerState.lastError = "Router cannot start until OpenCode is running."; + persistState(); + return; + } + + routerState.asset = await assetService.ensureRouterBinary(); + routerState.version = routerState.asset.version; + routerState.status = "starting"; + routerState.lastError = null; + routerState.lastStartedAt = nowIso(); + materializeRouterConfig(); + const healthPort = Number.parseInt(process.env.OPENWORK_SERVER_V2_ROUTER_HEALTH_PORT ?? "0", 10) || await getFreePort(); + const healthUrl = `http://127.0.0.1:${healthPort}`; + routerState.healthUrl = healthUrl; + persistState(); + + try { + const handle = await spawnManagedBinary( + [ + routerState.asset.absolutePath, + "serve", + options.workingDirectory.rootDir, + "--opencode-url", + opencodeState.baseUrl, + ], + { + cwd: options.workingDirectory.rootDir, + env: { + OPENCODE_DIRECTORY: options.workingDirectory.rootDir, + OPENCODE_ROUTER_CONFIG_PATH: routerMaterialization?.configPath, + OPENCODE_ROUTER_DATA_DIR: routerMaterialization?.dataDir, + OPENCODE_ROUTER_DB_PATH: routerMaterialization?.dbPath, + OPENCODE_ROUTER_HEALTH_PORT: String(healthPort), + OPENCODE_ROUTER_LOG_FILE: routerMaterialization?.logFile, + OPENCODE_URL: opencodeState.baseUrl, + }, + readinessUrl: `${healthUrl}/health`, + timeoutMs: Number.parseInt(process.env.OPENWORK_SERVER_V2_ROUTER_START_TIMEOUT_MS ?? "10000", 10) || 10_000, + }, + ); + routerHandle = handle; + routerState.baseUrl = healthUrl; + routerState.lastReadyAt = nowIso(); + routerState.pid = handle.proc.pid ?? null; + routerState.recentOutput = handle.getOutput(); + routerState.running = true; + routerState.status = "running"; + persistState(); + + void handle.waitForExit().then((exit) => { + if (routerHandle === handle) { + routerHandle = null; + } + routerState.running = false; + routerState.pid = null; + routerState.recentOutput = handle.getOutput(); + routerState.lastExit = { + ...exit, + output: handle.getOutput(), + reason: routerStopping || shuttingDown ? "stopped" : "unexpected_exit", + }; + + if (routerStopping || shuttingDown) { + routerState.status = routerEnablement.enabled ? "stopped" : "disabled"; + persistState(); + return; + } + + routerState.status = "crashed"; + persistState(); + if (!withRestartRecord("router")) { + routerState.lastError = "Router restart policy exhausted."; + persistState(); + return; + } + + routerState.status = "restart_scheduled"; + persistState(); + clearRestartTimer("router"); + restartTimers.router = setTimeout(() => { + if (shuttingDown) { + return; + } + void startRouter().catch((error) => { + routerState.status = "error"; + routerState.lastError = error instanceof Error ? error.message : String(error); + persistState(); + }); + }, restartPolicy.backoffMs); + }); + } catch (error) { + routerState.running = false; + routerState.status = "error"; + routerState.lastError = error instanceof Error ? error.message : String(error); + persistState(); + } + }; + + const startOpencode = async () => { + const bundle = await assetService.resolveRuntimeBundle(); + runtimeManifest = bundle.manifest; + opencodeState.asset = bundle.opencode; + opencodeState.version = bundle.opencode.version; + routerState.asset = bundle.router; + routerState.version = bundle.router.version; + opencodeState.status = bootstrapPolicy === "disabled" ? "disabled" : "starting"; + opencodeState.lastError = null; + opencodeState.lastStartedAt = nowIso(); + persistState(); + + const configuredPort = Number.parseInt(process.env.OPENWORK_SERVER_V2_OPENCODE_PORT ?? "0", 10); + const handle = await createLocalOpencode({ + binary: bundle.opencode.absolutePath, + client: { + directory: options.workingDirectory.rootDir, + responseStyle: "data", + throwOnError: true, + }, + config: {}, + cwd: options.workingDirectory.rootDir, + hostname: process.env.OPENWORK_SERVER_V2_OPENCODE_HOST?.trim() || "127.0.0.1", + port: configuredPort > 0 ? configuredPort : await getFreePort(), + timeout: Number.parseInt(process.env.OPENWORK_SERVER_V2_OPENCODE_START_TIMEOUT_MS ?? "10000", 10) || 10_000, + }); + + try { + await waitForOpencodeHealthy(handle, 5_000, 200); + } catch (error) { + handle.server.close(); + const snapshot = handle.server.getOutput(); + throw new Error( + `OpenCode became reachable at ${handle.server.url}, but did not pass the SDK health probe: ${error instanceof Error ? error.message : String(error)}.\nCollected output:\n${formatRuntimeOutput(snapshot)}`, + ); + } + + opencodeHandle = handle; + opencodeState.baseUrl = handle.server.url; + opencodeState.lastReadyAt = nowIso(); + opencodeState.pid = handle.server.proc.pid ?? null; + opencodeState.recentOutput = handle.server.getOutput(); + opencodeState.running = true; + opencodeState.status = "running"; + persistState(); + + void handle.server.waitForExit().then(async (exit) => { + if (opencodeHandle === handle) { + opencodeHandle = null; + } + opencodeState.running = false; + opencodeState.pid = null; + opencodeState.recentOutput = handle.server.getOutput(); + opencodeState.lastExit = { + ...exit, + output: handle.server.getOutput(), + reason: opencodeStopping || shuttingDown ? "stopped" : "unexpected_exit", + }; + + await stopRouter(); + + if (opencodeStopping || shuttingDown) { + opencodeState.status = bootstrapPolicy === "disabled" ? "disabled" : "stopped"; + persistState(); + return; + } + + opencodeState.status = "crashed"; + persistState(); + + if (!withRestartRecord("opencode")) { + opencodeState.lastError = "OpenCode restart policy exhausted."; + persistState(); + return; + } + + opencodeState.status = "restart_scheduled"; + persistState(); + clearRestartTimer("opencode"); + restartTimers.opencode = setTimeout(() => { + if (shuttingDown) { + return; + } + + void bootstrap().catch((error) => { + opencodeState.status = "error"; + opencodeState.lastError = error instanceof Error ? error.message : String(error); + persistState(); + }); + }, restartPolicy.backoffMs); + }); + }; + + const bootstrap = async () => { + if (bootstrapPolicy === "disabled") { + opencodeState.status = "disabled"; + routerState.status = "disabled"; + persistState(); + return; + } + + if (bootstrapPromise) { + return bootstrapPromise; + } + + bootstrapPromise = (async () => { + routerEnablement = resolveRouterEnablement(); + persistState(); + try { + updateRecentOutput(); + if (!opencodeState.running) { + await startOpencode(); + } + } catch (error) { + opencodeState.running = false; + opencodeState.status = "error"; + opencodeState.lastError = error instanceof Error ? error.message : String(error); + if (error instanceof LocalOpencodeStartupError) { + opencodeState.recentOutput = error.output; + opencodeState.lastExit = { + at: nowIso(), + code: null, + output: error.output, + reason: error.code, + signal: null, + }; + } + persistState(); + throw error; + } + + await startRouter(); + })().finally(() => { + bootstrapPromise = null; + }); + + return bootstrapPromise; + }; + + const applyRouterConfig = async () => { + routerEnablement = resolveRouterEnablement(); + persistState(); + + if (bootstrapPolicy !== "disabled" && !opencodeState.running) { + await startOpencode(); + } + + if (routerHandle || routerState.running) { + await stopRouter(); + } + + if (bootstrapPolicy === "disabled") { + routerState.status = "disabled"; + persistState(); + return; + } + + await startRouter(); + }; + + persistState(); + + const service: RuntimeService = { + async applyRouterConfig() { + await applyRouterConfig(); + return this.getRouterHealth(); + }, + + async bootstrap() { + await bootstrap(); + }, + + async dispose() { + shuttingDown = true; + clearRestartTimer("opencode"); + clearRestartTimer("router"); + await stopRouter(); + await stopOpencode(); + persistState(); + }, + + getBootstrapPolicy() { + return bootstrapPolicy; + }, + + getOpencodeHealth() { + updateRecentOutput(); + return { + baseUrl: opencodeState.baseUrl, + binaryPath: opencodeState.asset?.absolutePath ?? null, + diagnostics: opencodeState.recentOutput, + lastError: opencodeState.lastError, + lastExit: opencodeState.lastExit, + lastReadyAt: opencodeState.lastReadyAt, + lastStartedAt: opencodeState.lastStartedAt, + manifest: runtimeManifest, + pid: opencodeState.pid, + running: opencodeState.running, + source: opencodeState.asset?.source ?? assetService.getSource(), + status: opencodeState.status, + version: opencodeState.version, + }; + }, + + getRouterHealth() { + updateRecentOutput(); + return { + baseUrl: routerState.baseUrl, + binaryPath: routerState.asset?.absolutePath ?? null, + diagnostics: routerState.recentOutput, + enablement: routerEnablement, + healthUrl: routerState.healthUrl, + lastError: routerState.lastError, + lastExit: routerState.lastExit, + lastReadyAt: routerState.lastReadyAt, + lastStartedAt: routerState.lastStartedAt, + manifest: runtimeManifest, + materialization: routerMaterialization, + pid: routerState.pid, + running: routerState.running, + source: routerState.asset?.source ?? assetService.getSource(), + status: routerState.status, + version: routerState.version, + }; + }, + + getRuntimeSummary() { + return { + bootstrapPolicy, + manifest: runtimeManifest, + opencode: this.getOpencodeHealth(), + restartPolicy, + router: this.getRouterHealth(), + upgrade: upgradeState, + source: assetService.getSource(), + target: assetService.getTarget(), + }; + }, + + getRuntimeVersions() { + const summary = this.getRuntimeSummary(); + return { + active: { + opencodeVersion: summary.opencode.version, + routerVersion: summary.router.version, + serverVersion: options.serverVersion, + }, + manifest: summary.manifest, + pinned: { + opencodeVersion: summary.manifest?.opencodeVersion ?? null, + routerVersion: summary.manifest?.routerVersion ?? null, + serverVersion: options.serverVersion, + }, + target: summary.target, + }; + }, + + getStateForPersistence(): ReturnType { + return this.getRuntimeSummary(); + }, + + async upgradeRuntime() { + upgradeState = { + error: null, + finishedAt: null, + startedAt: nowIso(), + status: "running", + }; + persistState(); + + try { + await stopRouter(); + await stopOpencode(); + runtimeManifest = null; + opencodeState.asset = null; + routerState.asset = null; + await bootstrap(); + upgradeState = { + error: null, + finishedAt: nowIso(), + startedAt: upgradeState.startedAt, + status: "completed", + }; + persistState(); + return { + state: upgradeState, + summary: this.getRuntimeSummary(), + }; + } catch (error) { + upgradeState = { + error: error instanceof Error ? error.message : String(error), + finishedAt: nowIso(), + startedAt: upgradeState.startedAt, + status: "failed", + }; + persistState(); + throw error; + } + }, + }; + + return service; +} diff --git a/apps/server-v2/src/services/scheduler-service.ts b/apps/server-v2/src/services/scheduler-service.ts new file mode 100644 index 00000000..45c5eec8 --- /dev/null +++ b/apps/server-v2/src/services/scheduler-service.ts @@ -0,0 +1,322 @@ +import { spawnSync } from "node:child_process"; +import { existsSync } from "node:fs"; +import { readdir, rm } from "node:fs/promises"; +import { homedir } from "node:os"; +import { join, resolve } from "node:path"; +import { RouteError } from "../http.js"; +import type { WorkspaceRegistryService } from "./workspace-registry-service.js"; + +export type ScheduledJobRun = { + prompt?: string; + command?: string; + arguments?: string; + files?: string[]; + agent?: string; + model?: string; + variant?: string; + title?: string; + share?: boolean; + continue?: boolean; + session?: string; + runFormat?: string; + attachUrl?: string; + port?: number; +}; + +export type ScheduledJob = { + scopeId?: string; + timeoutSeconds?: number; + invocation?: { command: string; args: string[] }; + slug: string; + name: string; + schedule: string; + prompt?: string; + attachUrl?: string; + run?: ScheduledJobRun; + source?: string; + workdir?: string; + createdAt: string; + updatedAt?: string; + lastRunAt?: string; + lastRunExitCode?: number; + lastRunError?: string; + lastRunSource?: string; + lastRunStatus?: string; +}; + +type JobEntry = { + job: ScheduledJob; + jobFile: string; +}; + +const SUPPORTED_PLATFORMS = new Set(["darwin", "linux"]); + +function ensureSchedulerSupported() { + if (SUPPORTED_PLATFORMS.has(process.platform)) { + return; + } + throw new RouteError(501, "not_implemented", "Scheduler is supported only on macOS and Linux."); +} + +function normalizePathForCompare(value: string) { + const trimmed = value.trim(); + return trimmed ? resolve(trimmed) : ""; +} + +function slugify(name: string) { + let out = ""; + let dash = false; + for (const char of name.trim().toLowerCase()) { + if (/[a-z0-9]/.test(char)) { + out += char; + dash = false; + continue; + } + if (!dash) { + out += "-"; + dash = true; + } + } + return out.replace(/^-+|-+$/g, ""); +} + +function findJobEntryByName(entries: JobEntry[], name: string) { + const trimmed = name.trim(); + if (!trimmed) { + return null; + } + const slug = slugify(trimmed); + const lower = trimmed.toLowerCase(); + return entries.find((entry) => + entry.job.slug === trimmed + || entry.job.slug === slug + || entry.job.slug.endsWith(`-${slug}`) + || entry.job.name.toLowerCase() === lower + || entry.job.name.toLowerCase().includes(lower), + ) ?? null; +} + +function schedulerSystemPaths(job: ScheduledJob, homeDir: string) { + const paths: string[] = []; + if (process.platform === "darwin") { + if (job.scopeId) { + paths.push(join(homeDir, "Library", "LaunchAgents", `com.opencode.job.${job.scopeId}.${job.slug}.plist`)); + } + paths.push(join(homeDir, "Library", "LaunchAgents", `com.opencode.job.${job.slug}.plist`)); + return paths; + } + + if (process.platform === "linux") { + const base = join(homeDir, ".config", "systemd", "user"); + if (job.scopeId) { + paths.push(join(base, `opencode-job-${job.scopeId}-${job.slug}.service`)); + paths.push(join(base, `opencode-job-${job.scopeId}-${job.slug}.timer`)); + } + paths.push(join(base, `opencode-job-${job.slug}.service`)); + paths.push(join(base, `opencode-job-${job.slug}.timer`)); + return paths; + } + + return paths; +} + +async function loadJobFile(path: string) { + const file = Bun.file(path); + if (!(await file.exists())) { + return null; + } + const parsed = await file.json().catch(() => null); + if (!parsed || typeof parsed !== "object") { + return null; + } + if (typeof (parsed as any).slug !== "string" || typeof (parsed as any).name !== "string" || typeof (parsed as any).schedule !== "string") { + return null; + } + return parsed as ScheduledJob; +} + +export type SchedulerService = ReturnType; + +export function createSchedulerService(input: { + workspaceRegistry: WorkspaceRegistryService; + homeDir?: string; +}) { + const resolvedHomeDir = (input.homeDir ?? process.env.HOME ?? homedir()).trim(); + + function requireHomeDir() { + if (!resolvedHomeDir) { + throw new RouteError(500, "internal_error", "Failed to resolve home directory."); + } + return resolvedHomeDir; + } + + function legacyJobsDir() { + return join(requireHomeDir(), ".config", "opencode", "jobs"); + } + + function schedulerScopesDir() { + return join(requireHomeDir(), ".config", "opencode", "scheduler", "scopes"); + } + + function legacyJobFilePath(slug: string) { + return join(legacyJobsDir(), `${slug}.json`); + } + + function scopedJobFilePath(scopeId: string, slug: string) { + return join(schedulerScopesDir(), scopeId, "jobs", `${slug}.json`); + } + + async function loadLegacyJobEntries() { + const jobsDir = legacyJobsDir(); + if (!existsSync(jobsDir)) { + return [] as JobEntry[]; + } + const entries = await readdir(jobsDir, { withFileTypes: true }); + const jobs: JobEntry[] = []; + for (const entry of entries) { + if (!entry.isFile() || !entry.name.endsWith(".json")) { + continue; + } + const jobFile = join(jobsDir, entry.name); + const job = await loadJobFile(jobFile); + if (job) { + jobs.push({ job, jobFile }); + } + } + return jobs; + } + + async function loadScopedJobEntries() { + const scopesDir = schedulerScopesDir(); + if (!existsSync(scopesDir)) { + return [] as JobEntry[]; + } + const scopeEntries = await readdir(scopesDir, { withFileTypes: true }); + const jobs: JobEntry[] = []; + for (const scopeEntry of scopeEntries) { + if (!scopeEntry.isDirectory()) { + continue; + } + const scopeId = scopeEntry.name; + const jobsDir = join(scopesDir, scopeId, "jobs"); + if (!existsSync(jobsDir)) { + continue; + } + const entries = await readdir(jobsDir, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isFile() || !entry.name.endsWith(".json")) { + continue; + } + const jobFile = join(jobsDir, entry.name); + const job = await loadJobFile(jobFile); + if (!job) { + continue; + } + jobs.push({ job: { ...job, scopeId: job.scopeId ?? scopeId }, jobFile }); + } + } + return jobs; + } + + async function loadAllJobEntries() { + const [scoped, legacy] = await Promise.all([loadScopedJobEntries(), loadLegacyJobEntries()]); + return [...scoped, ...legacy]; + } + + function requireLocalWorkspaceDataDir(workspaceId: string) { + const workspace = input.workspaceRegistry.getById(workspaceId, { includeHidden: true }); + if (!workspace) { + throw new RouteError(404, "not_found", `Workspace not found: ${workspaceId}`); + } + if (workspace.backend.kind !== "local_opencode") { + throw new RouteError(501, "not_implemented", `Scheduler jobs are not supported for ${workspace.backend.kind} workspaces.`); + } + const dataDir = workspace.backend.local?.dataDir?.trim() ?? ""; + if (!dataDir) { + throw new RouteError(400, "invalid_request", `Workspace ${workspace.id} does not have a local data directory.`); + } + return dataDir; + } + + async function uninstallJob(job: ScheduledJob) { + const homeDir = requireHomeDir(); + if (process.platform === "darwin") { + for (const plist of schedulerSystemPaths(job, homeDir)) { + if (!(await Bun.file(plist).exists())) { + continue; + } + spawnSync("launchctl", ["unload", plist]); + await rm(plist, { force: true }); + } + return; + } + + if (process.platform === "linux") { + const timerUnits = [ + job.scopeId ? `opencode-job-${job.scopeId}-${job.slug}.timer` : null, + `opencode-job-${job.slug}.timer`, + ].filter(Boolean) as string[]; + for (const unit of timerUnits) { + spawnSync("systemctl", ["--user", "stop", unit]); + spawnSync("systemctl", ["--user", "disable", unit]); + } + for (const filePath of schedulerSystemPaths(job, homeDir)) { + if (await Bun.file(filePath).exists()) { + await rm(filePath, { force: true }); + } + } + spawnSync("systemctl", ["--user", "daemon-reload"]); + return; + } + + ensureSchedulerSupported(); + } + + return { + async listWorkspaceJobs(workspaceId: string) { + ensureSchedulerSupported(); + const workdir = requireLocalWorkspaceDataDir(workspaceId); + const normalizedRoot = normalizePathForCompare(workdir); + const entries = await loadAllJobEntries(); + const jobs = entries + .map((entry) => entry.job) + .filter((job) => { + const jobWorkdir = job.workdir?.trim() ?? ""; + return jobWorkdir ? normalizePathForCompare(jobWorkdir) === normalizedRoot : false; + }); + jobs.sort((a, b) => a.name.toLowerCase().localeCompare(b.name.toLowerCase())); + return { items: jobs }; + }, + + async deleteWorkspaceJob(workspaceId: string, name: string) { + ensureSchedulerSupported(); + const workdir = requireLocalWorkspaceDataDir(workspaceId); + const normalizedRoot = normalizePathForCompare(workdir); + const trimmed = name.trim(); + if (!trimmed) { + throw new RouteError(400, "invalid_request", "name is required"); + } + const entries = (await loadAllJobEntries()).filter((entry) => { + const jobWorkdir = entry.job.workdir?.trim() ?? ""; + return jobWorkdir ? normalizePathForCompare(jobWorkdir) === normalizedRoot : false; + }); + const found = findJobEntryByName(entries, trimmed); + if (!found) { + throw new RouteError(404, "not_found", `Job "${trimmed}" not found.`); + } + await uninstallJob(found.job); + await rm(found.jobFile, { force: true }); + const legacyJobPath = legacyJobFilePath(found.job.slug); + if (legacyJobPath !== found.jobFile && await Bun.file(legacyJobPath).exists()) { + await rm(legacyJobPath, { force: true }); + } + if (found.job.scopeId) { + const scopedJobPath = scopedJobFilePath(found.job.scopeId, found.job.slug); + if (scopedJobPath !== found.jobFile && await Bun.file(scopedJobPath).exists()) { + await rm(scopedJobPath, { force: true }); + } + } + return { job: found.job }; + }, + }; +} diff --git a/apps/server-v2/src/services/server-registry-service.ts b/apps/server-v2/src/services/server-registry-service.ts new file mode 100644 index 00000000..3adf4192 --- /dev/null +++ b/apps/server-v2/src/services/server-registry-service.ts @@ -0,0 +1,90 @@ +import type { ServerRepositories } from "../database/repositories.js"; +import type { JsonObject, ServerRecord } from "../database/types.js"; + +export type ServerRegistrySummary = { + hiddenWorkspaceCount: number; + localServerId: string; + remoteServerCount: number; + totalServers: number; + visibleWorkspaceCount: number; +}; + +export type ServerInventoryItem = { + auth: { + configured: boolean; + scheme: "bearer" | "none"; + }; + baseUrl: string | null; + capabilities: JsonObject; + hostingKind: ServerRecord["hostingKind"]; + id: string; + isEnabled: boolean; + isLocal: boolean; + kind: ServerRecord["kind"]; + label: string; + lastSeenAt: string | null; + source: string; + updatedAt: string; +}; + +export type ServerRegistryService = ReturnType; + +function hasServerAuth(record: ServerRecord) { + if (!record.auth) { + return false; + } + + return Object.values(record.auth).some((value) => typeof value === "string" && value.trim().length > 0); +} + +export function createServerRegistryService(input: { + localServerId: string; + repositories: ServerRepositories; +}) { + const { repositories } = input; + + function serialize(record: ServerRecord, options?: { includeBaseUrl?: boolean }) { + return { + auth: { + configured: hasServerAuth(record), + scheme: hasServerAuth(record) ? "bearer" : "none", + }, + baseUrl: options?.includeBaseUrl === false ? null : record.baseUrl, + capabilities: record.capabilities, + hostingKind: record.hostingKind, + id: record.id, + isEnabled: record.isEnabled, + isLocal: record.isLocal, + kind: record.kind, + label: record.label, + lastSeenAt: record.lastSeenAt, + source: record.source, + updatedAt: record.updatedAt, + } satisfies ServerInventoryItem; + } + + return { + getById(serverId: string) { + return repositories.servers.getById(serverId); + }, + + list(options?: { includeBaseUrl?: boolean }) { + return repositories.servers.list().map((record) => serialize(record, options)); + }, + + serialize, + + summarize(): ServerRegistrySummary { + const servers = repositories.servers.list(); + const allWorkspaces = repositories.workspaces.list({ includeHidden: true }); + const hiddenWorkspaceCount = allWorkspaces.filter((workspace) => workspace.isHidden).length; + return { + hiddenWorkspaceCount, + localServerId: input.localServerId, + remoteServerCount: servers.filter((server) => server.kind === "remote").length, + totalServers: servers.length, + visibleWorkspaceCount: allWorkspaces.length - hiddenWorkspaceCount, + }; + }, + }; +} diff --git a/apps/server-v2/src/services/system-service.ts b/apps/server-v2/src/services/system-service.ts new file mode 100644 index 00000000..934d6745 --- /dev/null +++ b/apps/server-v2/src/services/system-service.ts @@ -0,0 +1,148 @@ +import type { ProcessInfoAdapter } from "../adapters/process-info.js"; +import type { DatabaseStatusProvider } from "../database/status-provider.js"; +import { routeNamespaces, workspaceResourcePattern } from "../routes/route-paths.js"; +import type { AuthService, RequestActor } from "./auth-service.js"; +import type { CapabilitiesService } from "./capabilities-service.js"; +import type { RuntimeService } from "./runtime-service.js"; +import type { ServerRegistryService } from "./server-registry-service.js"; +import type { WorkspaceRegistryService } from "./workspace-registry-service.js"; + +export type SystemService = ReturnType; + +export function createSystemService(input: { + auth: AuthService; + capabilities: CapabilitiesService; + environment: string; + processInfo: ProcessInfoAdapter; + database: DatabaseStatusProvider; + runtime: RuntimeService; + serverRegistry: ServerRegistryService; + startedAt: Date; + version: string; + workspaceRegistry: WorkspaceRegistryService; +}) { + const service = "openwork-server-v2" as const; + const packageName = "openwork-server-v2" as const; + + return { + getRootInfo() { + return { + service, + packageName, + version: input.version, + environment: input.environment, + routes: { + ...routeNamespaces, + workspaceResource: workspaceResourcePattern, + }, + contract: { + source: "hono-openapi" as const, + openapiPath: routeNamespaces.openapi, + sdkPackage: "@openwork/server-sdk" as const, + }, + }; + }, + + getCapabilities(actor: RequestActor) { + return input.capabilities.getCapabilities(actor); + }, + + getHealth(now: Date = new Date()) { + return { + service, + status: "ok" as const, + startedAt: input.startedAt.toISOString(), + uptimeMs: Math.max(0, now.getTime() - input.startedAt.getTime()), + database: input.database.getStatus(), + }; + }, + + getStatus(actor: RequestActor, now: Date = new Date()) { + const runtimeSummary = input.runtime.getRuntimeSummary(); + const registry = input.serverRegistry.summarize(); + return { + auth: input.auth.getSummary(actor), + capabilities: input.capabilities.getCapabilities(actor), + database: input.database.getStatus(), + environment: input.environment, + registry, + runtime: { + opencode: { + baseUrl: runtimeSummary.opencode.baseUrl, + running: runtimeSummary.opencode.running, + status: runtimeSummary.opencode.status, + version: runtimeSummary.opencode.version, + }, + router: { + baseUrl: runtimeSummary.router.baseUrl, + running: runtimeSummary.router.running, + status: runtimeSummary.router.status, + version: runtimeSummary.router.version, + }, + source: runtimeSummary.source, + target: runtimeSummary.target, + }, + service, + startedAt: input.startedAt.toISOString(), + status: "ok" as const, + uptimeMs: Math.max(0, now.getTime() - input.startedAt.getTime()), + version: input.version, + }; + }, + + getMetadata(actor: RequestActor) { + return { + foundation: { + phase: 10 as const, + middlewareOrder: [ + "request-id", + "request-context", + "response-finalizer", + "request-logger", + "error-handler", + ], + routeNamespaces: { + ...routeNamespaces, + workspaceResource: workspaceResourcePattern, + }, + database: input.database.getStatus(), + startup: input.database.getStartupDiagnostics(), + }, + requestContext: { + actorKind: actor.kind, + requestIdHeader: "X-Request-Id" as const, + }, + runtime: { + environment: input.processInfo.environment, + hostname: input.processInfo.hostname, + pid: input.processInfo.pid, + platform: input.processInfo.platform, + runtime: input.processInfo.runtime, + runtimeVersion: input.processInfo.runtimeVersion, + }, + runtimeSupervisor: input.runtime.getRuntimeSummary(), + contract: { + source: "hono-openapi" as const, + openapiPath: routeNamespaces.openapi, + sdkPackage: "@openwork/server-sdk" as const, + }, + }; + }, + + listServers() { + return { + items: input.serverRegistry.list({ includeBaseUrl: true }), + }; + }, + + listWorkspaces(options?: { includeHidden?: boolean }) { + return { + items: input.workspaceRegistry.list({ includeHidden: options?.includeHidden ?? false }), + }; + }, + + getWorkspace(workspaceId: string, options?: { includeHidden?: boolean }) { + return input.workspaceRegistry.getById(workspaceId, { includeHidden: options?.includeHidden ?? false }); + }, + }; +} diff --git a/apps/server-v2/src/services/workspace-file-service.ts b/apps/server-v2/src/services/workspace-file-service.ts new file mode 100644 index 00000000..e2beb8c8 --- /dev/null +++ b/apps/server-v2/src/services/workspace-file-service.ts @@ -0,0 +1,1226 @@ +import fs from "node:fs"; +import path from "node:path"; +import { randomUUID } from "node:crypto"; +import { HTTPException } from "hono/http-exception"; +import type { RegistryService } from "./registry-service.js"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { WorkspaceRecord } from "../database/types.js"; +import type { RuntimeService } from "./runtime-service.js"; +import type { ConfigMaterializationService } from "./config-materialization-service.js"; +import { RouteError } from "../http.js"; +import { requestRemoteOpenwork, requestRemoteOpenworkRaw, resolveRemoteWorkspaceTarget } from "../adapters/remote-openwork.js"; + +const FILE_SESSION_DEFAULT_TTL_MS = 15 * 60 * 1000; +const FILE_SESSION_MIN_TTL_MS = 30 * 1000; +const FILE_SESSION_MAX_TTL_MS = 24 * 60 * 60 * 1000; +const FILE_SESSION_MAX_BATCH_ITEMS = 64; +const FILE_SESSION_MAX_FILE_BYTES = 5_000_000; +const FILE_SESSION_CATALOG_DEFAULT_LIMIT = 2000; +const FILE_SESSION_CATALOG_MAX_LIMIT = 10000; + +type ReloadReason = "agents" | "commands" | "config" | "mcp" | "plugins" | "skills"; + +type ReloadTrigger = { + action?: "added" | "removed" | "updated"; + name?: string; + path?: string; + type: "agent" | "command" | "config" | "mcp" | "plugin" | "skill"; +}; + +type ReloadEvent = { + id: string; + reason: ReloadReason; + seq: number; + timestamp: number; + trigger?: ReloadTrigger; + workspaceId: string; +}; + +type FileCatalogEntry = { + kind: "dir" | "file"; + mtimeMs: number; + path: string; + revision: string; + size: number; +}; + +type FileSessionRecord = { + actorTokenHash: string; + canWrite: boolean; + createdAt: number; + expiresAt: number; + id: string; + workspaceId: string; + workspaceRoot: string; +}; + +type FileSessionEvent = { + id: string; + path: string; + revision?: string; + seq: number; + timestamp: number; + toPath?: string; + type: "delete" | "mkdir" | "rename" | "write"; + workspaceId: string; +}; + +class LocalFileSessionStore { + private sessions = new Map(); + + private workspaceEvents = new Map(); + + close(sessionId: string) { + return this.sessions.delete(sessionId); + } + + create(input: Omit & { ttlMs: number }) { + const now = nowMs(); + const record: FileSessionRecord = { + ...input, + createdAt: now, + expiresAt: now + input.ttlMs, + id: randomUUID(), + }; + this.sessions.set(record.id, record); + return record; + } + + get(sessionId: string) { + const session = this.sessions.get(sessionId) ?? null; + if (session && session.expiresAt <= nowMs()) { + this.sessions.delete(sessionId); + return null; + } + return session; + } + + listWorkspaceEvents(workspaceId: string, since = 0) { + const state = this.workspaceEvents.get(workspaceId); + if (!state) { + return { cursor: 0, items: [] as FileSessionEvent[] }; + } + return { + cursor: state.seq, + items: state.events.filter((event) => event.seq > since), + }; + } + + recordWorkspaceEvent(input: Omit) { + const state = this.workspaceEvents.get(input.workspaceId) ?? { events: [], seq: 0 }; + const event: FileSessionEvent = { + ...input, + id: randomUUID(), + seq: state.seq + 1, + timestamp: nowMs(), + }; + state.seq = event.seq; + state.events.push(event); + if (state.events.length > 500) { + state.events.splice(0, state.events.length - 500); + } + this.workspaceEvents.set(input.workspaceId, state); + return event; + } + + renew(sessionId: string, ttlMs: number) { + const session = this.get(sessionId); + if (!session) { + return null; + } + session.expiresAt = nowMs() + ttlMs; + this.sessions.set(sessionId, session); + return session; + } +} + +function nowMs() { + return Date.now(); +} + +function nowIso() { + return new Date().toISOString(); +} + +function recordAuditEntry(workspaceId: string, workspaceRoot: string, action: string, target: string, summary: string) { + const auditRoot = process.env.OPENWORK_DATA_DIR?.trim() + ? path.join(process.env.OPENWORK_DATA_DIR.trim(), "audit") + : path.join(workspaceRoot, ".opencode", "openwork"); + fs.mkdirSync(auditRoot, { recursive: true }); + const filePath = process.env.OPENWORK_DATA_DIR?.trim() + ? path.join(auditRoot, `${workspaceId}.jsonl`) + : path.join(auditRoot, "audit.jsonl"); + const entry = { + action, + actor: { type: "remote" }, + id: randomUUID(), + summary, + target, + timestamp: nowMs(), + workspaceId, + }; + fs.appendFileSync(filePath, `${JSON.stringify(entry)}\n`, "utf8"); +} + +function initializeWorkspaceFiles(workspaceRoot: string) { + fs.mkdirSync(path.join(workspaceRoot, ".opencode"), { recursive: true }); + fs.mkdirSync(path.join(workspaceRoot, ".opencode", "openwork"), { recursive: true }); +} + +function resolveWorkspaceOrThrow(repositories: ServerRepositories, workspaceId: string) { + const workspace = repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + if (!workspace.dataDir?.trim()) { + throw new RouteError(400, "invalid_request", `Workspace ${workspace.id} does not have a local data directory.`); + } + return workspace; +} + +function resolveWorkspaceRecordOrThrow(repositories: ServerRepositories, workspaceId: string) { + const workspace = repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return workspace; +} + +function resolveWorkspaceRoot(workspace: WorkspaceRecord) { + const root = workspace.dataDir?.trim(); + if (!root) { + throw new RouteError(400, "invalid_request", `Workspace ${workspace.id} does not have a local data directory.`); + } + return root; +} + +function normalizeWorkspaceRelativePath(input: string, options: { allowSubdirs: boolean }) { + const raw = String(input ?? "").trim(); + if (!raw) { + throw new RouteError(400, "invalid_request", "Path is required."); + } + if (raw.includes("\u0000")) { + throw new RouteError(400, "invalid_request", "Path contains a null byte."); + } + let normalized = raw.replace(/\\/g, "/").replace(/^\/+/, "").replace(/^\.\//, "").replace(/^workspace\//, "").replace(/^\/+/, ""); + const parts = normalized.split("/").filter(Boolean); + if (!parts.length) { + throw new RouteError(400, "invalid_request", "Path is required."); + } + if (!options.allowSubdirs && parts.length > 1) { + throw new RouteError(400, "invalid_request", "Subdirectories are not allowed."); + } + for (const part of parts) { + if (part === "." || part === "..") { + throw new RouteError(400, "invalid_request", "Path traversal is not allowed."); + } + } + normalized = parts.join("/"); + return normalized; +} + +function resolveSafeChildPath(root: string, child: string) { + const rootResolved = path.resolve(root); + const candidate = path.resolve(rootResolved, child); + if (candidate === rootResolved || !candidate.startsWith(rootResolved + path.sep)) { + throw new RouteError(400, "invalid_request", "Path traversal is not allowed."); + } + return candidate; +} + +function fileRevision(info: { mtimeMs: number; size: number }) { + return `${Math.floor(info.mtimeMs)}:${info.size}`; +} + +function parseFileSessionTtlMs(input: unknown) { + const raw = typeof input === "number" && Number.isFinite(input) ? input : Number.NaN; + if (Number.isNaN(raw)) return FILE_SESSION_DEFAULT_TTL_MS; + const ttlMs = Math.floor(raw * 1000); + if (ttlMs < FILE_SESSION_MIN_TTL_MS) return FILE_SESSION_MIN_TTL_MS; + if (ttlMs > FILE_SESSION_MAX_TTL_MS) return FILE_SESSION_MAX_TTL_MS; + return ttlMs; +} + +function parseCatalogLimit(input: string | null) { + if (!input) return FILE_SESSION_CATALOG_DEFAULT_LIMIT; + const parsed = Number(input); + if (!Number.isFinite(parsed) || parsed <= 0) return FILE_SESSION_CATALOG_DEFAULT_LIMIT; + return Math.min(Math.floor(parsed), FILE_SESSION_CATALOG_MAX_LIMIT); +} + +function parseCursor(input: string | null) { + if (!input) return 0; + const parsed = Number(input); + if (!Number.isFinite(parsed) || parsed < 0) return 0; + return Math.floor(parsed); +} + +function parsePathFilter(input: string | null) { + if (!input?.trim()) return null; + return normalizeWorkspaceRelativePath(input, { allowSubdirs: true }); +} + +function matchesCatalogFilter(pathValue: string, filter: string | null) { + if (!filter) return true; + return pathValue === filter || pathValue.startsWith(`${filter}/`); +} + +function parseBatchPathList(input: unknown) { + if (!Array.isArray(input) || input.length === 0) { + throw new RouteError(400, "invalid_request", "paths must be a non-empty array."); + } + if (input.length > FILE_SESSION_MAX_BATCH_ITEMS) { + throw new RouteError(400, "invalid_request", `paths must include <= ${FILE_SESSION_MAX_BATCH_ITEMS} items.`); + } + return input.map((item) => normalizeWorkspaceRelativePath(String(item ?? ""), { allowSubdirs: true })); +} + +function parseBatchWriteList(input: unknown) { + if (!Array.isArray(input) || input.length === 0) { + throw new RouteError(400, "invalid_request", "writes must be a non-empty array."); + } + if (input.length > FILE_SESSION_MAX_BATCH_ITEMS) { + throw new RouteError(400, "invalid_request", `writes must include <= ${FILE_SESSION_MAX_BATCH_ITEMS} items.`); + } + return input.map((item) => { + if (!item || typeof item !== "object") { + throw new RouteError(400, "invalid_request", "Write entries must be objects."); + } + const record = item as Record; + const contentBase64 = typeof record.contentBase64 === "string" ? record.contentBase64.trim() : ""; + if (!contentBase64) { + throw new RouteError(400, "invalid_request", "contentBase64 is required."); + } + return { + contentBase64, + force: record.force === true, + ifMatchRevision: typeof record.ifMatchRevision === "string" && record.ifMatchRevision.trim() ? record.ifMatchRevision.trim() : undefined, + path: normalizeWorkspaceRelativePath(String(record.path ?? ""), { allowSubdirs: true }), + }; + }); +} + +function parseOperations(input: unknown) { + if (!Array.isArray(input) || input.length === 0) { + throw new RouteError(400, "invalid_request", "operations must be a non-empty array."); + } + if (input.length > FILE_SESSION_MAX_BATCH_ITEMS) { + throw new RouteError(400, "invalid_request", `operations must include <= ${FILE_SESSION_MAX_BATCH_ITEMS} items.`); + } + return input as Array>; +} + +function resolveInboxDir(workspaceRoot: string) { + return path.join(workspaceRoot, ".opencode", "openwork", "inbox"); +} + +function resolveOutboxDir(workspaceRoot: string) { + return path.join(workspaceRoot, ".opencode", "openwork", "outbox"); +} + +function encodeArtifactId(relativePath: string) { + return Buffer.from(relativePath, "utf8").toString("base64url"); +} + +function decodeArtifactId(id: string) { + const raw = id.trim(); + if (!raw) { + throw new RouteError(400, "invalid_request", "Artifact id is required."); + } + try { + return normalizeWorkspaceRelativePath(Buffer.from(raw, "base64url").toString("utf8"), { allowSubdirs: true }); + } catch { + throw new RouteError(400, "invalid_request", "Artifact id is invalid."); + } +} + +class ReloadEventStore { + private events: ReloadEvent[] = []; + + private lastRecorded = new Map(); + + private seq = 0; + + list(workspaceId: string, since = 0) { + return { + cursor: this.seq, + items: this.events.filter((event) => event.workspaceId === workspaceId && event.seq > since), + }; + } + + record(workspaceId: string, reason: ReloadReason, trigger?: ReloadTrigger, debounceMs = 750) { + const key = `${workspaceId}:${reason}:${trigger?.type ?? "unknown"}:${trigger?.path ?? ""}`; + const now = nowMs(); + const last = this.lastRecorded.get(key) ?? 0; + if (now - last < debounceMs) { + return null; + } + this.lastRecorded.set(key, now); + const event: ReloadEvent = { + id: randomUUID(), + reason, + seq: ++this.seq, + timestamp: now, + ...(trigger ? { trigger } : {}), + workspaceId, + }; + this.events.push(event); + if (this.events.length > 500) { + this.events.splice(0, this.events.length - 500); + } + return event; + } +} + +export type WorkspaceFileService = ReturnType; + +export function createWorkspaceFileService(input: { + config: ConfigMaterializationService; + registry: RegistryService; + repositories: ServerRepositories; + runtime: RuntimeService; + serverId: string; +}) { + const fileSessions = new LocalFileSessionStore(); + const reloadEvents = new ReloadEventStore(); + const watcherClosers = new Map void>(); + + function getRemoteServerOrThrow(workspace: WorkspaceRecord) { + const server = input.repositories.servers.getById(workspace.serverId); + if (!server) { + throw new RouteError(502, "bad_gateway", `Workspace ${workspace.id} points at missing remote server ${workspace.serverId}.`); + } + return server; + } + + function getRemoteWorkspacePath(workspace: WorkspaceRecord, suffix: string) { + const server = getRemoteServerOrThrow(workspace); + const target = resolveRemoteWorkspaceTarget(server, workspace); + return { + path: `/workspaces/${encodeURIComponent(target.remoteWorkspaceId)}${suffix}`, + server, + }; + } + + function updateRuntimeHealth(details: Record) { + const current = input.repositories.serverRuntimeState.getByServerId(input.serverId); + const health = current?.health && typeof current.health === "object" ? { ...current.health } : {}; + const runtime = health.runtime && typeof health.runtime === "object" ? { ...(health.runtime as Record) } : {}; + runtime.phase7 = { + ...(runtime.phase7 && typeof runtime.phase7 === "object" ? runtime.phase7 as Record : {}), + ...details, + }; + health.runtime = runtime; + input.repositories.serverRuntimeState.upsert({ + health, + lastExit: current?.lastExit ?? null, + lastStartedAt: current?.lastStartedAt ?? null, + opencodeBaseUrl: current?.opencodeBaseUrl ?? null, + opencodeStatus: current?.opencodeStatus ?? "unknown", + opencodeVersion: current?.opencodeVersion ?? null, + restartPolicy: current?.restartPolicy ?? null, + routerStatus: current?.routerStatus ?? "disabled", + routerVersion: current?.routerVersion ?? null, + runtimeVersion: current?.runtimeVersion ?? null, + serverId: input.serverId, + }); + } + + function classifyReloadTrigger(changedPath: string): { reason: ReloadReason; trigger: ReloadTrigger } { + const normalized = changedPath.replace(/\\/g, "/"); + if (normalized.includes("/.opencode/skills/")) { + const parts = normalized.split("/"); + const name = parts[parts.length - 2] ?? "skill"; + return { reason: "skills", trigger: { action: "updated", name, path: changedPath, type: "skill" } }; + } + if (normalized.includes("/.opencode/commands/")) { + const name = path.basename(changedPath).replace(/\.md$/i, ""); + return { reason: "commands", trigger: { action: "updated", name, path: changedPath, type: "command" } }; + } + return { + reason: "config", + trigger: { action: "updated", name: path.basename(changedPath), path: changedPath, type: "config" }, + }; + } + + function startWorkspaceWatchers(workspaceId: string) { + const roots = input.config.listWatchRoots(workspaceId); + const watchers: fs.FSWatcher[] = []; + let timer: ReturnType | null = null; + const schedule = (changedPath: string) => { + const { reason, trigger } = classifyReloadTrigger(changedPath); + reloadEvents.record(workspaceId, reason, trigger); + if (timer) { + clearTimeout(timer); + } + timer = setTimeout(() => { + timer = null; + try { + input.config.absorbWorkspaceConfig(workspaceId); + } catch { + // ignore best-effort watcher repair failures + } + }, 200); + }; + + for (const root of roots) { + if (!fs.existsSync(root)) { + continue; + } + try { + const watcher = fs.watch(root, { persistent: false }, (_eventType, filename) => { + schedule(filename ? path.join(root, filename.toString()) : root); + }); + watchers.push(watcher); + } catch { + // ignore unsupported watcher roots + } + } + + const close = () => { + if (timer) { + clearTimeout(timer); + } + for (const watcher of watchers) { + try { + watcher.close(); + } catch { + // ignore + } + } + }; + watcherClosers.set(workspaceId, close); + } + + function startWatchers() { + for (const close of watcherClosers.values()) { + close(); + } + watcherClosers.clear(); + const workspaces = input.repositories.workspaces.list({ includeHidden: true }).filter((workspace) => workspace.kind !== "remote"); + for (const workspace of workspaces) { + startWorkspaceWatchers(workspace.id); + } + updateRuntimeHealth({ + watchedWorkspaceIds: workspaces.map((workspace) => workspace.id), + watchersStartedAt: nowMs(), + }); + } + + function reconcileAll() { + const result = input.config.reconcileAllWorkspaces(); + updateRuntimeHealth({ + lastReconciledAt: result.reconciledAt, + reconciledWorkspaceIds: result.workspaceIds, + }); + return result; + } + + reconcileAll(); + startWatchers(); + const periodicRepair = setInterval(() => { + reconcileAll(); + }, 30_000); + (periodicRepair as any).unref?.(); + + function buildActorKey(actorKey: string | undefined, kind: "client" | "host") { + return `${kind}:${actorKey?.trim() || kind}`; + } + + function resolveFileSession(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host") { + const session = fileSessions.get(sessionId); + if (!session || session.workspaceId !== workspaceId) { + throw new HTTPException(404, { message: "File session not found." }); + } + if (session.actorTokenHash !== buildActorKey(actorKey, actorKind)) { + throw new HTTPException(403, { message: "File session does not belong to this actor." }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + return { session, workspace, workspaceRoot: resolveWorkspaceRoot(workspace) }; + } + + async function listArtifacts(rootDir: string) { + if (!fs.existsSync(rootDir)) { + return [] as Array<{ id: string; path: string; size: number; updatedAt: number }>; + } + const items: Array<{ id: string; path: string; size: number; updatedAt: number }> = []; + const walk = (directory: string) => { + for (const entry of fs.readdirSync(directory, { withFileTypes: true })) { + const absolute = path.join(directory, entry.name); + if (entry.isDirectory()) { + walk(absolute); + continue; + } + if (!entry.isFile()) { + continue; + } + const relativePath = normalizeWorkspaceRelativePath(path.relative(rootDir, absolute), { allowSubdirs: true }); + const info = fs.statSync(absolute); + items.push({ + id: encodeArtifactId(relativePath), + path: relativePath, + size: info.size, + updatedAt: info.mtimeMs, + }); + } + }; + walk(rootDir); + items.sort((left, right) => right.updatedAt - left.updatedAt); + return items; + } + + function listWorkspaceCatalogEntries(workspaceRoot: string) { + const items: FileCatalogEntry[] = []; + const visit = (directory: string) => { + for (const entry of fs.readdirSync(directory, { withFileTypes: true }).sort((left, right) => left.name.localeCompare(right.name))) { + const absolute = path.join(directory, entry.name); + const relativePath = normalizeWorkspaceRelativePath(path.relative(workspaceRoot, absolute), { allowSubdirs: true }); + const info = fs.statSync(absolute); + if (entry.isDirectory()) { + items.push({ kind: "dir", mtimeMs: info.mtimeMs, path: relativePath, revision: fileRevision({ mtimeMs: info.mtimeMs, size: 0 }), size: 0 }); + visit(absolute); + continue; + } + if (!entry.isFile()) { + continue; + } + items.push({ kind: "file", mtimeMs: info.mtimeMs, path: relativePath, revision: fileRevision(info), size: info.size }); + } + }; + if (fs.existsSync(workspaceRoot)) { + visit(workspaceRoot); + } + items.sort((left, right) => left.path.localeCompare(right.path)); + return items; + } + + async function audit(workspace: WorkspaceRecord, action: string, target: string, summary: string) { + recordAuditEntry(workspace.id, resolveWorkspaceRoot(workspace), action, target, summary); + } + + return { + activateWorkspace(workspaceId: string) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + updateRuntimeHealth({ activeWorkspaceId: workspaceId, activeWorkspaceUpdatedAt: nowIso() }); + return workspaceId; + }, + + deleteWorkspace(workspaceId: string) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + const deleted = input.repositories.workspaces.deleteById(workspaceId); + updateRuntimeHealth({ activeWorkspaceId: null, activeWorkspaceUpdatedAt: nowIso() }); + startWatchers(); + return { deleted, workspaceId }; + }, + + async disposeWorkspaceInstance(workspaceId: string) { + const workspace = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspace.kind === "remote") { + const remote = getRemoteWorkspacePath(workspace, "/engine/reload"); + await requestRemoteOpenwork<{ reloadedAt: number }>({ + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 20_000, + }); + return { disposed: true, workspaceId }; + } + + await input.runtime.dispose(); + await input.runtime.bootstrap(); + updateRuntimeHealth({ activeWorkspaceId: null, activeWorkspaceUpdatedAt: nowIso() }); + reloadEvents.record(workspace.id, "config", { action: "updated", name: "engine", type: "config" }, 0); + await audit(workspace, "engine.dispose", workspace.dataDir ?? workspace.id, "Disposed workspace runtime instance through Server V2."); + return { disposed: true, workspaceId }; + }, + + async createLocalWorkspace(inputValue: { folderPath: string; name: string; preset: string }) { + const folderPath = inputValue.folderPath.trim(); + if (!folderPath) { + throw new RouteError(400, "invalid_request", "folderPath is required."); + } + const workspaceRoot = path.resolve(folderPath); + fs.mkdirSync(workspaceRoot, { recursive: true }); + initializeWorkspaceFiles(workspaceRoot); + const record = input.registry.importLocalWorkspace({ + dataDir: workspaceRoot, + displayName: inputValue.name.trim() || path.basename(workspaceRoot), + status: "ready", + }); + input.repositories.workspaces.upsert({ + ...record, + displayName: inputValue.name.trim() || record.displayName, + status: "ready", + }); + input.config.absorbWorkspaceConfig(record.id); + startWatchers(); + updateRuntimeHealth({ activeWorkspaceId: record.id, activeWorkspaceUpdatedAt: nowIso() }); + return resolveWorkspaceOrThrow(input.repositories, record.id); + }, + + createWorkspaceFileSession(workspaceId: string, inputValue: { actorKey?: string; actorKind: "client" | "host"; ttlSeconds?: number; write?: boolean }) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, "/file-sessions"); + return requestRemoteOpenwork<{ + canWrite: boolean; + createdAt: number; + expiresAt: number; + id: string; + ttlMs: number; + workspaceId: string; + }>({ + body: { ttlSeconds: inputValue.ttlSeconds, write: inputValue.write }, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 15_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const workspaceRoot = resolveWorkspaceRoot(workspace); + const requestWrite = inputValue.write !== false; + const canWrite = requestWrite && (inputValue.actorKind === "host" || inputValue.actorKind === "client"); + const session = fileSessions.create({ + actorTokenHash: buildActorKey(inputValue.actorKey, inputValue.actorKind), + canWrite, + ttlMs: parseFileSessionTtlMs(inputValue.ttlSeconds), + workspaceId, + workspaceRoot, + }); + return { + canWrite: session.canWrite, + createdAt: session.createdAt, + expiresAt: session.expiresAt, + id: session.id, + ttlMs: Math.max(0, session.expiresAt - nowMs()), + workspaceId, + }; + }, + + async dispose() { + clearInterval(periodicRepair); + for (const close of watcherClosers.values()) { + close(); + } + watcherClosers.clear(); + }, + + emitReloadEvent(workspaceId: string, reason: ReloadReason, trigger?: ReloadTrigger) { + const workspace = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspace.kind === "remote") { + return null; + } + return reloadEvents.record(workspaceId, reason, trigger, 0); + }, + + async downloadArtifact(workspaceId: string, artifactId: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/artifacts/${encodeURIComponent(artifactId)}`); + const response = await requestRemoteOpenworkRaw({ + path: remote.path, + server: remote.server, + timeoutMs: 30_000, + }); + const buffer = Buffer.from(await response.arrayBuffer()); + return { + buffer, + filename: response.headers.get("content-disposition")?.match(/filename="?([^";]+)"?/)?.[1] ?? artifactId, + size: buffer.byteLength, + }; + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const rootDir = resolveOutboxDir(resolveWorkspaceRoot(workspace)); + const relativePath = decodeArtifactId(artifactId); + const absolutePath = resolveSafeChildPath(rootDir, relativePath); + if (!fs.existsSync(absolutePath) || !fs.statSync(absolutePath).isFile()) { + throw new HTTPException(404, { message: "Artifact not found." }); + } + return { absolutePath, filename: path.basename(relativePath), size: fs.statSync(absolutePath).size }; + }, + + async downloadInboxItem(workspaceId: string, inboxId: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/inbox/${encodeURIComponent(inboxId)}`); + const response = await requestRemoteOpenworkRaw({ + path: remote.path, + server: remote.server, + timeoutMs: 30_000, + }); + const buffer = Buffer.from(await response.arrayBuffer()); + return { + buffer, + filename: response.headers.get("content-disposition")?.match(/filename="?([^";]+)"?/)?.[1] ?? inboxId, + size: buffer.byteLength, + }; + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const rootDir = resolveInboxDir(resolveWorkspaceRoot(workspace)); + const relativePath = decodeArtifactId(inboxId); + const absolutePath = resolveSafeChildPath(rootDir, relativePath); + if (!fs.existsSync(absolutePath) || !fs.statSync(absolutePath).isFile()) { + throw new HTTPException(404, { message: "Inbox item not found." }); + } + return { absolutePath, filename: path.basename(relativePath), size: fs.statSync(absolutePath).size }; + }, + + getReloadEvents(workspaceId: string, since?: number) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/reload-events${typeof since === "number" ? `?since=${since}` : ""}`); + return requestRemoteOpenwork<{ cursor: number; items: ReloadEvent[] }>({ + path: remote.path, + server: remote.server, + timeoutMs: 10_000, + }); + } + resolveWorkspaceOrThrow(input.repositories, workspaceId); + return reloadEvents.list(workspaceId, since ?? 0); + }, + + async recordWorkspaceAudit(workspaceId: string, action: string, target: string, summary: string) { + const workspace = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspace.kind === "remote") { + return null; + } + await audit(workspace, action, target, summary); + }, + + async listArtifacts(workspaceId: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, "/artifacts"); + return requestRemoteOpenwork<{ items: Array> }>({ + path: remote.path, + server: remote.server, + timeoutMs: 15_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + return { items: await listArtifacts(resolveOutboxDir(resolveWorkspaceRoot(workspace))) }; + }, + + async listFileSessionCatalogSnapshot(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", inputValue: { after?: string | null; includeDirs?: boolean; limit?: string | null; prefix?: string | null }) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const query = new URLSearchParams(); + if (inputValue.after) query.set("after", inputValue.after); + if (inputValue.includeDirs === false) query.set("includeDirs", "false"); + if (inputValue.limit) query.set("limit", inputValue.limit); + if (inputValue.prefix) query.set("prefix", inputValue.prefix); + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/catalog/snapshot${query.size ? `?${query.toString()}` : ""}`); + return requestRemoteOpenwork<{ + cursor: number; + generatedAt: number; + items: FileCatalogEntry[]; + nextAfter?: string; + sessionId: string; + total: number; + truncated: boolean; + workspaceId: string; + }>({ + path: remote.path, + server: remote.server, + timeoutMs: 20_000, + }); + } + const { workspaceRoot } = resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + const prefix = parsePathFilter(inputValue.prefix ?? null); + const after = parsePathFilter(inputValue.after ?? null); + const includeDirs = inputValue.includeDirs !== false; + const limit = parseCatalogLimit(inputValue.limit ?? null); + const entries = listWorkspaceCatalogEntries(workspaceRoot).filter((entry) => { + if (!includeDirs && entry.kind === "dir") return false; + if (!matchesCatalogFilter(entry.path, prefix)) return false; + if (after && entry.path <= after) return false; + return true; + }); + const items = entries.slice(0, limit); + const cursor = fileSessions.listWorkspaceEvents(workspaceId, Number.MAX_SAFE_INTEGER).cursor; + return { + cursor, + generatedAt: nowMs(), + items, + nextAfter: entries.length > items.length ? items[items.length - 1]?.path : undefined, + sessionId, + total: entries.length, + truncated: entries.length > items.length, + workspaceId, + }; + }, + + listFileSessionEvents(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", since?: string | null) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/catalog/events${since?.trim() ? `?since=${encodeURIComponent(since.trim())}` : ""}`); + return requestRemoteOpenwork<{ cursor: number; items: FileSessionEvent[] }>({ + path: remote.path, + server: remote.server, + timeoutMs: 10_000, + }); + } + resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + return fileSessions.listWorkspaceEvents(workspaceId, parseCursor(since ?? null)); + }, + + async listInbox(workspaceId: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, "/inbox"); + return requestRemoteOpenwork<{ items: Array> }>({ + path: remote.path, + server: remote.server, + timeoutMs: 15_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const items = await listArtifacts(resolveInboxDir(resolveWorkspaceRoot(workspace))); + return { + items: items.map((item) => ({ ...item, id: encodeArtifactId(item.path), name: path.basename(item.path) })), + }; + }, + + async readSimpleContent(workspaceId: string, relativePathInput: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/files/content?path=${encodeURIComponent(relativePathInput)}`); + return requestRemoteOpenwork<{ bytes: number; content: string; path: string; updatedAt: number; revision?: string }>({ + path: remote.path, + server: remote.server, + timeoutMs: 15_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const relativePath = normalizeWorkspaceRelativePath(relativePathInput, { allowSubdirs: true }); + if (!/\.(md|mdx|markdown)$/i.test(relativePath)) { + throw new RouteError(400, "invalid_request", "Only markdown files are supported by the simple content routes."); + } + const absolutePath = resolveSafeChildPath(resolveWorkspaceRoot(workspace), relativePath); + if (!fs.existsSync(absolutePath) || !fs.statSync(absolutePath).isFile()) { + throw new HTTPException(404, { message: "File not found." }); + } + const info = fs.statSync(absolutePath); + if (info.size > FILE_SESSION_MAX_FILE_BYTES) { + throw new RouteError(413, "invalid_request", "File exceeds the maximum supported size."); + } + return { + bytes: info.size, + content: fs.readFileSync(absolutePath, "utf8"), + path: relativePath, + updatedAt: info.mtimeMs, + }; + }, + + async readWorkspaceFiles(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", paths: unknown) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/read-batch`); + return requestRemoteOpenwork<{ items: Array> }>({ + body: { paths }, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 20_000, + }); + } + const { workspaceRoot } = resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + const items = parseBatchPathList(paths).map((relativePath) => { + try { + const absolutePath = resolveSafeChildPath(workspaceRoot, relativePath); + if (!fs.existsSync(absolutePath) || !fs.statSync(absolutePath).isFile()) { + return { code: "file_not_found", message: "File not found", ok: false, path: relativePath }; + } + const info = fs.statSync(absolutePath); + if (info.size > FILE_SESSION_MAX_FILE_BYTES) { + return { code: "file_too_large", maxBytes: FILE_SESSION_MAX_FILE_BYTES, message: "File exceeds size limit", ok: false, path: relativePath, size: info.size }; + } + return { + bytes: info.size, + contentBase64: fs.readFileSync(absolutePath).toString("base64"), + kind: "file", + ok: true, + path: relativePath, + revision: fileRevision(info), + updatedAt: info.mtimeMs, + }; + } catch (error) { + return { code: "read_failed", message: error instanceof Error ? error.message : "Unable to read file", ok: false, path: relativePath }; + } + }); + return { items }; + }, + + async reloadWorkspaceEngine(workspaceId: string) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, "/engine/reload"); + return requestRemoteOpenwork<{ reloadedAt: number }>({ + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 20_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + await input.runtime.dispose(); + await input.runtime.bootstrap(); + reloadEvents.record(workspace.id, "config", { action: "updated", name: "engine", type: "config" }, 0); + await audit(workspace, "engine.reload", workspace.dataDir ?? workspace.id, "Reloaded workspace engine through Server V2."); + return { reloadedAt: nowMs() }; + }, + + reconcileAll, + + updateWorkspaceDisplayName(workspaceId: string, displayName: string | null) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return input.repositories.workspaces.upsert({ + ...workspace, + displayName: displayName?.trim() || workspace.displayName, + }); + }, + + renewWorkspaceFileSession(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", ttlSeconds?: number) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/renew`); + return requestRemoteOpenwork<{ + canWrite: boolean; + createdAt: number; + expiresAt: number; + id: string; + ttlMs: number; + workspaceId: string; + }>({ + body: ttlSeconds === undefined ? {} : { ttlSeconds }, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 10_000, + }); + } + resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + const renewed = fileSessions.renew(sessionId, parseFileSessionTtlMs(ttlSeconds)); + if (!renewed) { + throw new HTTPException(404, { message: "File session not found." }); + } + return { + canWrite: renewed.canWrite, + createdAt: renewed.createdAt, + expiresAt: renewed.expiresAt, + id: renewed.id, + ttlMs: Math.max(0, renewed.expiresAt - nowMs()), + workspaceId, + }; + }, + + closeWorkspaceFileSession(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host") { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}`); + return requestRemoteOpenwork<{ closed?: boolean }>({ + method: "DELETE", + path: remote.path, + server: remote.server, + timeoutMs: 10_000, + }); + } + resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + fileSessions.close(sessionId); + return { closed: true }; + }, + + async uploadInboxItem(workspaceId: string, requestedPath: string, file: File) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const form = new FormData(); + form.append("file", file); + if (requestedPath.trim()) { + form.append("path", requestedPath.trim()); + } + const remote = getRemoteWorkspacePath(workspaceRecord, "/inbox"); + const response = await requestRemoteOpenworkRaw({ + body: form, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 30_000, + }); + const text = await response.text(); + return text.trim() ? JSON.parse(text) as { bytes: number; path: string } : { bytes: file.size, path: requestedPath || file.name }; + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const rootDir = resolveInboxDir(resolveWorkspaceRoot(workspace)); + const relativePath = normalizeWorkspaceRelativePath(requestedPath || file.name, { allowSubdirs: true }); + if (file.size > FILE_SESSION_MAX_FILE_BYTES) { + throw new RouteError(413, "invalid_request", "File exceeds the maximum supported size."); + } + const absolutePath = resolveSafeChildPath(rootDir, relativePath); + fs.mkdirSync(path.dirname(absolutePath), { recursive: true }); + fs.writeFileSync(absolutePath, Buffer.from(await file.arrayBuffer())); + await audit(workspace, "workspace.inbox.upload", absolutePath, `Uploaded ${relativePath} to the workspace inbox.`); + return { bytes: file.size, path: relativePath }; + }, + + async writeSimpleContent(workspaceId: string, inputValue: { baseUpdatedAt?: number | null; content: string; force?: boolean; path: string }) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, "/files/content"); + return requestRemoteOpenwork<{ bytes: number; path: string; revision?: string; updatedAt: number }>({ + body: inputValue, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 20_000, + }); + } + const workspace = resolveWorkspaceOrThrow(input.repositories, workspaceId); + const relativePath = normalizeWorkspaceRelativePath(inputValue.path, { allowSubdirs: true }); + if (!/\.(md|mdx|markdown)$/i.test(relativePath)) { + throw new RouteError(400, "invalid_request", "Only markdown files are supported by the simple content routes."); + } + const absolutePath = resolveSafeChildPath(resolveWorkspaceRoot(workspace), relativePath); + const before = fs.existsSync(absolutePath) ? fs.statSync(absolutePath) : null; + if (before && !before.isFile()) { + throw new RouteError(400, "invalid_request", "Path must point to a file."); + } + if (!inputValue.force && before && inputValue.baseUpdatedAt !== undefined && inputValue.baseUpdatedAt !== null && before.mtimeMs !== inputValue.baseUpdatedAt) { + throw new RouteError(409, "conflict", "File changed since it was loaded."); + } + if (Buffer.byteLength(inputValue.content, "utf8") > FILE_SESSION_MAX_FILE_BYTES) { + throw new RouteError(413, "invalid_request", "File exceeds the maximum supported size."); + } + fs.mkdirSync(path.dirname(absolutePath), { recursive: true }); + fs.writeFileSync(absolutePath, inputValue.content, "utf8"); + const after = fs.statSync(absolutePath); + fileSessions.recordWorkspaceEvent({ path: relativePath, revision: fileRevision(after), type: "write", workspaceId }); + reloadEvents.record(workspaceId, "config", { action: "updated", name: path.basename(relativePath), path: absolutePath, type: "config" }); + await audit(workspace, "workspace.file.write", absolutePath, `Wrote ${relativePath} through the simple content route.`); + return { bytes: Buffer.byteLength(inputValue.content, "utf8"), path: relativePath, revision: fileRevision(after), updatedAt: after.mtimeMs }; + }, + + async writeWorkspaceFiles(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", writes: unknown) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/write-batch`); + return requestRemoteOpenwork<{ cursor: number; items: Array> }>({ + body: { writes }, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 30_000, + }); + } + const { session, workspace, workspaceRoot } = resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + if (!session.canWrite) { + throw new HTTPException(403, { message: "File session is read-only." }); + } + const items: Array> = []; + for (const write of parseBatchWriteList(writes)) { + try { + const absolutePath = resolveSafeChildPath(workspaceRoot, write.path); + const bytes = Buffer.from(write.contentBase64, "base64"); + if (bytes.byteLength > FILE_SESSION_MAX_FILE_BYTES) { + items.push({ code: "file_too_large", maxBytes: FILE_SESSION_MAX_FILE_BYTES, message: "File exceeds size limit", ok: false, path: write.path, size: bytes.byteLength }); + continue; + } + const before = fs.existsSync(absolutePath) ? fs.statSync(absolutePath) : null; + const currentRevision = before ? fileRevision(before) : null; + if (!write.force && write.ifMatchRevision && currentRevision !== write.ifMatchRevision) { + items.push({ code: "conflict", currentRevision, expectedRevision: write.ifMatchRevision, message: "File changed since it was loaded", ok: false, path: write.path }); + continue; + } + fs.mkdirSync(path.dirname(absolutePath), { recursive: true }); + fs.writeFileSync(absolutePath, bytes); + const after = fs.statSync(absolutePath); + const revision = fileRevision(after); + fileSessions.recordWorkspaceEvent({ path: write.path, revision, type: "write", workspaceId }); + reloadEvents.record(workspaceId, "config", { action: "updated", name: path.basename(write.path), path: absolutePath, type: "config" }); + await audit(workspace, "workspace.files.session.write", absolutePath, `Wrote ${write.path} through a file session.`); + items.push({ bytes: bytes.byteLength, ok: true, path: write.path, previousRevision: currentRevision, revision, updatedAt: after.mtimeMs }); + } catch (error) { + items.push({ code: "write_failed", message: error instanceof Error ? error.message : "Failed to write file", ok: false, path: write.path }); + } + } + return { cursor: fileSessions.listWorkspaceEvents(workspaceId, Number.MAX_SAFE_INTEGER).cursor, items }; + }, + + async workspaceFileOperations(workspaceId: string, sessionId: string, actorKey: string, actorKind: "client" | "host", operations: unknown) { + const workspaceRecord = resolveWorkspaceRecordOrThrow(input.repositories, workspaceId); + if (workspaceRecord.kind === "remote") { + const remote = getRemoteWorkspacePath(workspaceRecord, `/file-sessions/${encodeURIComponent(sessionId)}/operations`); + return requestRemoteOpenwork<{ cursor: number; items: Array> }>({ + body: { operations }, + method: "POST", + path: remote.path, + server: remote.server, + timeoutMs: 30_000, + }); + } + const { session, workspace, workspaceRoot } = resolveFileSession(workspaceId, sessionId, actorKey, actorKind); + if (!session.canWrite) { + throw new HTTPException(403, { message: "File session is read-only." }); + } + const items: Array> = []; + for (const operation of parseOperations(operations)) { + const type = String(operation.type ?? "").trim(); + try { + if (type === "mkdir") { + const relativePath = normalizeWorkspaceRelativePath(String(operation.path ?? ""), { allowSubdirs: true }); + const absolutePath = resolveSafeChildPath(workspaceRoot, relativePath); + fs.mkdirSync(absolutePath, { recursive: true }); + fileSessions.recordWorkspaceEvent({ path: relativePath, type: "mkdir", workspaceId }); + reloadEvents.record(workspaceId, "config", { action: "updated", name: path.basename(relativePath), path: absolutePath, type: "config" }); + items.push({ ok: true, path: relativePath, type }); + continue; + } + if (type === "delete") { + const relativePath = normalizeWorkspaceRelativePath(String(operation.path ?? ""), { allowSubdirs: true }); + const absolutePath = resolveSafeChildPath(workspaceRoot, relativePath); + if (!fs.existsSync(absolutePath)) { + items.push({ code: "file_not_found", message: "Path not found", ok: false, path: relativePath, type }); + continue; + } + fs.rmSync(absolutePath, { force: false, recursive: operation.recursive === true }); + fileSessions.recordWorkspaceEvent({ path: relativePath, type: "delete", workspaceId }); + reloadEvents.record(workspaceId, "config", { action: "removed", name: path.basename(relativePath), path: absolutePath, type: "config" }); + await audit(workspace, "workspace.files.session.delete", absolutePath, `Deleted ${relativePath} through a file session.`); + items.push({ ok: true, path: relativePath, type }); + continue; + } + if (type === "rename") { + const from = normalizeWorkspaceRelativePath(String(operation.from ?? ""), { allowSubdirs: true }); + const to = normalizeWorkspaceRelativePath(String(operation.to ?? ""), { allowSubdirs: true }); + const fromAbsolute = resolveSafeChildPath(workspaceRoot, from); + const toAbsolute = resolveSafeChildPath(workspaceRoot, to); + if (!fs.existsSync(fromAbsolute)) { + items.push({ code: "file_not_found", from, message: "Source path not found", ok: false, to, type }); + continue; + } + fs.mkdirSync(path.dirname(toAbsolute), { recursive: true }); + fs.renameSync(fromAbsolute, toAbsolute); + fileSessions.recordWorkspaceEvent({ path: from, toPath: to, type: "rename", workspaceId }); + reloadEvents.record(workspaceId, "config", { action: "updated", name: path.basename(to), path: toAbsolute, type: "config" }); + await audit(workspace, "workspace.files.session.rename", `${fromAbsolute} -> ${toAbsolute}`, `Renamed ${from} to ${to} through a file session.`); + items.push({ from, ok: true, to, type }); + continue; + } + items.push({ code: "invalid_operation", message: `Unsupported operation type: ${type}`, ok: false, type }); + } catch (error) { + items.push({ code: "operation_failed", message: error instanceof Error ? error.message : "Operation failed", ok: false, type }); + } + } + return { cursor: fileSessions.listWorkspaceEvents(workspaceId, Number.MAX_SAFE_INTEGER).cursor, items }; + }, + }; +} diff --git a/apps/server-v2/src/services/workspace-registry-service.ts b/apps/server-v2/src/services/workspace-registry-service.ts new file mode 100644 index 00000000..a04eb30e --- /dev/null +++ b/apps/server-v2/src/services/workspace-registry-service.ts @@ -0,0 +1,191 @@ +import type { ServerRegistryService } from "./server-registry-service.js"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { + BackendKind, + JsonObject, + WorkspaceKind, + WorkspaceRecord, + WorkspaceRuntimeStateRecord, +} from "../database/types.js"; + +type WorkspacePreset = "minimal" | "remote" | "starter"; + +export type WorkspaceBackend = { + kind: BackendKind; + local: null | { + configDir: string | null; + dataDir: string | null; + opencodeProjectId: string | null; + }; + remote: null | { + directory: string | null; + hostUrl: string | null; + remoteType: "openwork" | "opencode"; + remoteWorkspaceId: string | null; + workspaceName: string | null; + }; + serverId: string; +}; + +export type WorkspaceRuntimeSummary = { + backendKind: BackendKind; + health: JsonObject | null; + lastError: JsonObject | null; + lastSessionRefreshAt: string | null; + lastSyncAt: string | null; + updatedAt: string | null; +}; + +export type WorkspaceSummary = { + backend: WorkspaceBackend; + createdAt: string; + displayName: string; + hidden: boolean; + id: string; + kind: WorkspaceKind; + preset: WorkspacePreset; + runtime: WorkspaceRuntimeSummary; + server: ReturnType; + slug: string; + status: WorkspaceRecord["status"]; + updatedAt: string; +}; + +export type WorkspaceDetail = WorkspaceSummary & { + notes: JsonObject | null; +}; + +export type WorkspaceRegistryService = ReturnType; + +function asJsonObject(value: unknown): JsonObject | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + + return value as JsonObject; +} + +function readPreset(workspace: WorkspaceRecord): WorkspacePreset { + const legacyDesktop = asJsonObject(workspace.notes?.legacyDesktop); + const preset = typeof legacyDesktop?.preset === "string" ? legacyDesktop.preset.trim().toLowerCase() : ""; + if (preset === "minimal" || preset === "starter") { + return preset; + } + + return workspace.kind === "remote" ? "remote" : "starter"; +} + +function readRemoteDirectory(workspace: WorkspaceRecord) { + const value = workspace.notes?.directory; + return typeof value === "string" && value.trim() ? value.trim() : null; +} + +function readRemoteType(workspace: WorkspaceRecord): "openwork" | "opencode" { + const explicit = workspace.notes?.remoteType; + return explicit === "opencode" ? "opencode" : "openwork"; +} + +function readRemoteWorkspaceName(workspace: WorkspaceRecord) { + const legacyDesktop = asJsonObject(workspace.notes?.legacyDesktop); + const value = legacyDesktop?.openworkWorkspaceName; + return typeof value === "string" && value.trim() ? value.trim() : null; +} + +function serializeRuntimeState(runtimeState: WorkspaceRuntimeStateRecord | null, backendKind: BackendKind): WorkspaceRuntimeSummary { + return { + backendKind, + health: runtimeState?.health ?? null, + lastError: runtimeState?.lastError ?? null, + lastSessionRefreshAt: runtimeState?.lastSessionRefreshAt ?? null, + lastSyncAt: runtimeState?.lastSyncAt ?? null, + updatedAt: runtimeState?.updatedAt ?? null, + }; +} + +export function createWorkspaceRegistryService(input: { + repositories: ServerRepositories; + servers: ServerRegistryService; +}) { + const { repositories } = input; + + function resolveBackend(workspace: WorkspaceRecord): WorkspaceBackend { + const runtimeState = repositories.workspaceRuntimeState.getByWorkspaceId(workspace.id); + const backendKind = runtimeState?.backendKind ?? (workspace.kind === "remote" ? "remote_openwork" : "local_opencode"); + + if (backendKind === "remote_openwork") { + const server = input.servers.getById(workspace.serverId); + return { + kind: backendKind, + local: null, + remote: { + directory: readRemoteDirectory(workspace), + hostUrl: server?.baseUrl ?? null, + remoteType: readRemoteType(workspace), + remoteWorkspaceId: workspace.remoteWorkspaceId, + workspaceName: readRemoteWorkspaceName(workspace), + }, + serverId: workspace.serverId, + }; + } + + return { + kind: "local_opencode", + local: { + configDir: workspace.configDir, + dataDir: workspace.dataDir, + opencodeProjectId: workspace.opencodeProjectId, + }, + remote: null, + serverId: workspace.serverId, + }; + } + + function serializeWorkspace(workspace: WorkspaceRecord) { + const server = input.servers.getById(workspace.serverId); + if (!server) { + throw new Error(`Workspace ${workspace.id} points at missing server ${workspace.serverId}.`); + } + + const backend = resolveBackend(workspace); + const runtimeState = repositories.workspaceRuntimeState.getByWorkspaceId(workspace.id); + return { + backend, + createdAt: workspace.createdAt, + displayName: workspace.displayName, + hidden: workspace.isHidden, + id: workspace.id, + kind: workspace.kind, + notes: workspace.notes, + preset: readPreset(workspace), + runtime: serializeRuntimeState(runtimeState, backend.kind), + server: input.servers.serialize(server, { includeBaseUrl: false }), + slug: workspace.slug, + status: workspace.status, + updatedAt: workspace.updatedAt, + } satisfies WorkspaceDetail; + } + + function canReadWorkspace(workspace: WorkspaceRecord, options?: { includeHidden?: boolean }) { + return options?.includeHidden === true || !workspace.isHidden; + } + + return { + getById(workspaceId: string, options?: { includeHidden?: boolean }) { + const workspace = repositories.workspaces.getById(workspaceId); + if (!workspace || !canReadWorkspace(workspace, options)) { + return null; + } + return serializeWorkspace(workspace); + }, + + list(options?: { includeHidden?: boolean }) { + return repositories.workspaces + .list({ includeHidden: options?.includeHidden ?? false }) + .filter((workspace) => canReadWorkspace(workspace, options)) + .map((workspace) => serializeWorkspace(workspace)); + }, + + resolveBackend, + serializeWorkspace, + }; +} diff --git a/apps/server-v2/src/services/workspace-session-service.ts b/apps/server-v2/src/services/workspace-session-service.ts new file mode 100644 index 00000000..5c7fdfcd --- /dev/null +++ b/apps/server-v2/src/services/workspace-session-service.ts @@ -0,0 +1,272 @@ +import { HTTPException } from "hono/http-exception"; +import type { ServerRepositories } from "../database/repositories.js"; +import type { WorkspaceRecord } from "../database/types.js"; +import { RouteError } from "../http.js"; +import type { + SessionMessageRecord, + SessionRecord, + SessionSnapshotRecord, + SessionStatusRecord, + SessionTodoRecord, + WorkspaceEventRecord, +} from "../schemas/sessions.js"; +import type { RuntimeService } from "./runtime-service.js"; +import { createLocalOpencodeSessionAdapter } from "../adapters/sessions/local-opencode.js"; +import { OpenCodeBackendError } from "../adapters/sessions/opencode-backend.js"; +import { createRemoteOpenworkSessionAdapter } from "../adapters/sessions/remote-openwork.js"; + +type SessionBackend = ReturnType; + +function toBackendKind(workspace: WorkspaceRecord) { + return workspace.kind === "remote" ? "remote_openwork" : "local_opencode"; +} + +function readRuntimeState(repositories: ServerRepositories, workspace: WorkspaceRecord) { + return repositories.workspaceRuntimeState.getByWorkspaceId(workspace.id); +} + +function recordSuccess(repositories: ServerRepositories, workspace: WorkspaceRecord, input: { refresh?: boolean; sync?: boolean }) { + const current = readRuntimeState(repositories, workspace); + const now = new Date().toISOString(); + repositories.workspaceRuntimeState.upsert({ + backendKind: current?.backendKind ?? toBackendKind(workspace), + health: current?.health ?? null, + lastError: null, + lastSessionRefreshAt: input.refresh ? now : current?.lastSessionRefreshAt ?? null, + lastSyncAt: input.sync ? now : current?.lastSyncAt ?? null, + workspaceId: workspace.id, + }); +} + +function recordError(repositories: ServerRepositories, workspace: WorkspaceRecord, error: RouteError | Error) { + const current = readRuntimeState(repositories, workspace); + repositories.workspaceRuntimeState.upsert({ + backendKind: current?.backendKind ?? toBackendKind(workspace), + health: current?.health ?? null, + lastError: { + code: error instanceof RouteError ? error.code : "internal_error", + message: error.message, + recordedAt: new Date().toISOString(), + }, + lastSessionRefreshAt: current?.lastSessionRefreshAt ?? null, + lastSyncAt: current?.lastSyncAt ?? null, + workspaceId: workspace.id, + }); +} + +function remapBackendError(error: unknown) { + if (error instanceof RouteError) { + throw error; + } + + if (error instanceof OpenCodeBackendError) { + if (error.status === 400) { + throw new RouteError(400, "invalid_request", "Upstream session backend rejected the request."); + } + if (error.status === 404) { + throw new HTTPException(404, { message: "Requested session resource was not found." }); + } + if (error.status === 501) { + throw new RouteError(501, "not_implemented", error.message || "Session operation is not supported by the resolved backend."); + } + throw new RouteError(502, "bad_gateway", error.message || "Resolved session backend request failed."); + } + + if (error instanceof HTTPException) { + throw error; + } + + throw new RouteError(500, "internal_error", error instanceof Error ? error.message : "Unexpected session service failure."); +} + +export type WorkspaceSessionService = ReturnType; + +export function createWorkspaceSessionService(input: { + repositories: ServerRepositories; + runtime: RuntimeService; +}) { + function getWorkspaceOrThrow(workspaceId: string) { + const workspace = input.repositories.workspaces.getById(workspaceId); + if (!workspace) { + throw new HTTPException(404, { message: `Workspace not found: ${workspaceId}` }); + } + return workspace; + } + + function resolveBackend(workspace: WorkspaceRecord): SessionBackend { + if (workspace.kind === "remote") { + const server = input.repositories.servers.getById(workspace.serverId); + if (!server) { + throw new RouteError(502, "bad_gateway", `Workspace ${workspace.id} points at missing server ${workspace.serverId}.`); + } + return createRemoteOpenworkSessionAdapter({ server, workspace }); + } + + return createLocalOpencodeSessionAdapter({ runtime: input.runtime, workspace }); + } + + async function runRead(workspaceId: string, operation: (backend: SessionBackend) => Promise) { + const workspace = getWorkspaceOrThrow(workspaceId); + try { + const result = await operation(resolveBackend(workspace)); + recordSuccess(input.repositories, workspace, { refresh: true }); + return result; + } catch (error) { + const remapped = (() => { + try { + remapBackendError(error); + } catch (next) { + return next; + } + return error; + })(); + recordError(input.repositories, workspace, remapped as Error); + throw remapped; + } + } + + async function runMutation(workspaceId: string, operation: (backend: SessionBackend) => Promise) { + const workspace = getWorkspaceOrThrow(workspaceId); + try { + const result = await operation(resolveBackend(workspace)); + recordSuccess(input.repositories, workspace, { refresh: true, sync: true }); + return result; + } catch (error) { + const remapped = (() => { + try { + remapBackendError(error); + } catch (next) { + return next; + } + return error; + })(); + recordError(input.repositories, workspace, remapped as Error); + throw remapped; + } + } + + return { + abortSession(workspaceId: string, sessionId: string) { + return runMutation(workspaceId, (backend) => backend.abortSession(sessionId)); + }, + + command(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.command(sessionId, body)); + }, + + createSession(workspaceId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.createSession(body)); + }, + + deleteMessage(workspaceId: string, sessionId: string, messageId: string) { + return runMutation(workspaceId, (backend) => backend.deleteMessage(sessionId, messageId)); + }, + + deleteMessagePart(workspaceId: string, sessionId: string, messageId: string, partId: string) { + return runMutation(workspaceId, (backend) => backend.deleteMessagePart(sessionId, messageId, partId)); + }, + + deleteSession(workspaceId: string, sessionId: string) { + return runMutation(workspaceId, (backend) => backend.deleteSession(sessionId)); + }, + + forkSession(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.forkSession(sessionId, body)); + }, + + getMessage(workspaceId: string, sessionId: string, messageId: string): Promise { + return runRead(workspaceId, (backend) => backend.getMessage(sessionId, messageId)); + }, + + getSession(workspaceId: string, sessionId: string): Promise { + return runRead(workspaceId, (backend) => backend.getSession(sessionId)); + }, + + getSessionSnapshot(workspaceId: string, sessionId: string, input?: { limit?: number }): Promise { + return runRead(workspaceId, (backend) => backend.getSessionSnapshot(sessionId, input)); + }, + + async getSessionStatus(workspaceId: string, sessionId: string): Promise { + const statuses = await runRead(workspaceId, (backend) => backend.listStatuses()); + return statuses[sessionId] ?? { type: "idle" }; + }, + + initSession(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.initSession(sessionId, body)); + }, + + listMessages(workspaceId: string, sessionId: string, input?: { limit?: number }): Promise { + return runRead(workspaceId, (backend) => backend.listMessages(sessionId, input)); + }, + + listSessions(workspaceId: string, input?: { limit?: number; roots?: boolean; search?: string; start?: number }): Promise { + return runRead(workspaceId, (backend) => backend.listSessions(input)); + }, + + listSessionStatuses(workspaceId: string): Promise> { + return runRead(workspaceId, (backend) => backend.listStatuses()); + }, + + listTodos(workspaceId: string, sessionId: string): Promise { + return runRead(workspaceId, (backend) => backend.listTodos(sessionId)); + }, + + promptAsync(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.promptAsync(sessionId, body)); + }, + + revert(workspaceId: string, sessionId: string, body: { messageID: string }) { + return runMutation(workspaceId, (backend) => backend.revert(sessionId, body)); + }, + + sendMessage(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.sendMessage(sessionId, body)); + }, + + shareSession(workspaceId: string, sessionId: string) { + return runMutation(workspaceId, (backend) => backend.shareSession(sessionId)); + }, + + shell(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.shell(sessionId, body)); + }, + + async streamWorkspaceEvents(workspaceId: string, signal?: AbortSignal): Promise> { + const workspace = getWorkspaceOrThrow(workspaceId); + try { + return await resolveBackend(workspace).streamEvents(signal); + } catch (error) { + const remapped = (() => { + try { + remapBackendError(error); + } catch (next) { + return next; + } + return error; + })(); + recordError(input.repositories, workspace, remapped as Error); + throw remapped; + } + }, + + summarizeSession(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.summarizeSession(sessionId, body)); + }, + + unshareSession(workspaceId: string, sessionId: string) { + return runMutation(workspaceId, (backend) => backend.unshareSession(sessionId)); + }, + + unrevert(workspaceId: string, sessionId: string) { + return runMutation(workspaceId, (backend) => backend.unrevert(sessionId)); + }, + + updateMessagePart(workspaceId: string, sessionId: string, messageId: string, partId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.updateMessagePart(sessionId, messageId, partId, body)); + }, + + updateSession(workspaceId: string, sessionId: string, body: Record) { + return runMutation(workspaceId, (backend) => backend.updateSession(sessionId, body)); + }, + }; +} diff --git a/apps/server-v2/src/sessions.test.ts b/apps/server-v2/src/sessions.test.ts new file mode 100644 index 00000000..71d73db4 --- /dev/null +++ b/apps/server-v2/src/sessions.test.ts @@ -0,0 +1,308 @@ +import { afterEach, describe, expect, test } from "bun:test"; +import { createApp } from "./app.js"; +import { createAppDependencies } from "./context/app-dependencies.js"; + +type Served = { + port: number; + stop: (closeActiveConnections?: boolean) => void | Promise; +}; + +const stops: Array<() => void | Promise> = []; + +afterEach(async () => { + while (stops.length) { + await stops.pop()?.(); + } +}); + +function createTestApp() { + const dependencies = createAppDependencies({ + environment: "test", + inMemory: true, + legacy: { + desktopDataDir: `/tmp/openwork-server-v2-phase6-desktop-${Math.random().toString(16).slice(2)}`, + orchestratorDataDir: `/tmp/openwork-server-v2-phase6-orchestrator-${Math.random().toString(16).slice(2)}`, + }, + runtime: { + bootstrapPolicy: "disabled", + }, + startedAt: new Date("2026-04-14T00:00:00.000Z"), + version: "0.0.0-test", + }); + + return { + app: createApp({ dependencies }), + dependencies, + }; +} + +function withMockOpencodeBaseUrl(dependencies: ReturnType, baseUrl: string) { + dependencies.services.runtime.getOpencodeHealth = () => ({ + baseUrl, + binaryPath: null, + diagnostics: { combined: [], stderr: [], stdout: [], totalLines: 0, truncated: false }, + lastError: null, + lastExit: null, + lastReadyAt: null, + lastStartedAt: null, + manifest: null, + pid: 123, + running: true, + source: "development", + status: "running", + version: "1.2.3", + }); +} + +function startMockOpencode(options?: { expectBearer?: string; mountPrefix?: string }) { + const requests: Array<{ method: string; pathname: string; authorization: string | null; body: unknown }> = []; + const prefix = options?.mountPrefix?.replace(/\/+$/, "") ?? ""; + + const server = Bun.serve({ + hostname: "127.0.0.1", + port: 0, + fetch(request) { + const url = new URL(request.url); + const pathname = prefix && url.pathname.startsWith(prefix) ? url.pathname.slice(prefix.length) || "/" : url.pathname; + const authorization = request.headers.get("authorization"); + requests.push({ method: request.method, pathname, authorization, body: null }); + + if (options?.expectBearer) { + expect(authorization).toBe(`Bearer ${options.expectBearer}`); + } + + if (pathname === "/event") { + const stream = new ReadableStream({ + start(controller) { + controller.enqueue(`data: ${JSON.stringify({ type: "session.status", properties: { sessionID: "ses_1", status: { type: "busy" } } })}\n\n`); + controller.enqueue(`data: ${JSON.stringify({ type: "session.idle", properties: { sessionID: "ses_1" } })}\n\n`); + controller.close(); + }, + }); + return new Response(stream, { + headers: { + "Content-Type": "text/event-stream", + }, + }); + } + + if (pathname === "/session" && request.method === "GET") { + return Response.json([ + { + id: "ses_1", + title: "Session One", + directory: "/tmp/workspace", + time: { created: 100, updated: 200 }, + }, + ]); + } + + if (pathname === "/session/status" && request.method === "GET") { + return Response.json({ ses_1: { type: "busy" } }); + } + + if (pathname === "/session" && request.method === "POST") { + return Response.json({ + id: "ses_created", + title: "Created Session", + directory: "/tmp/workspace", + time: { created: 300, updated: 300 }, + }); + } + + if (pathname === "/session/ses_1" && request.method === "GET") { + return Response.json({ + id: "ses_1", + title: "Session One", + directory: "/tmp/workspace", + time: { created: 100, updated: 200 }, + }); + } + + if (pathname === "/session/ses_1" && request.method === "PATCH") { + return Response.json({ + id: "ses_1", + title: "Renamed Session", + directory: "/tmp/workspace", + time: { created: 100, updated: 250 }, + }); + } + + if (pathname === "/session/ses_1" && request.method === "DELETE") { + return new Response(null, { status: 204 }); + } + + if (pathname === "/session/ses_1/message" && request.method === "GET") { + return Response.json([ + { + info: { + id: "msg_1", + role: "assistant", + sessionID: "ses_1", + }, + parts: [ + { + id: "prt_1", + messageID: "msg_1", + sessionID: "ses_1", + type: "text", + text: "hello", + }, + ], + }, + ]); + } + + if (pathname === "/session/ses_1/message/msg_1" && request.method === "GET") { + return Response.json({ + info: { + id: "msg_1", + role: "assistant", + sessionID: "ses_1", + }, + parts: [ + { + id: "prt_1", + messageID: "msg_1", + sessionID: "ses_1", + type: "text", + text: "hello", + }, + ], + }); + } + + if (pathname === "/session/ses_1/todo" && request.method === "GET") { + return Response.json([ + { content: "Ship Phase 6", priority: "high", status: "completed" }, + ]); + } + + if (pathname === "/session/ses_1/prompt_async" && request.method === "POST") { + return Response.json({ ok: true }); + } + + if (pathname === "/session/ses_1/command" && request.method === "POST") { + return Response.json({ ok: true }); + } + + if (pathname === "/session/ses_1/revert" && request.method === "POST") { + return Response.json({ + id: "ses_1", + title: "Reverted Session", + directory: "/tmp/workspace", + time: { created: 100, updated: 260 }, + }); + } + + if (pathname === "/session/ses_1/unrevert" && request.method === "POST") { + return Response.json({ + id: "ses_1", + title: "Restored Session", + directory: "/tmp/workspace", + time: { created: 100, updated: 270 }, + }); + } + + return Response.json({ code: "not_found", message: "Not found" }, { status: 404 }); + }, + }) as Served; + + stops.push(() => server.stop(true)); + return { + requests, + url: `http://127.0.0.1:${server.port}`, + }; +} + +describe("workspace session routes", () => { + test("serves local workspace session reads, writes, and streaming", async () => { + const mock = startMockOpencode(); + const { app, dependencies } = createTestApp(); + const workspace = dependencies.persistence.registry.importLocalWorkspace({ + dataDir: "/tmp/workspace", + displayName: "Local Workspace", + status: "ready", + }); + withMockOpencodeBaseUrl(dependencies, mock.url); + + const listResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions?roots=true&limit=1`); + expect(listResponse.status).toBe(200); + expect((await listResponse.json()).data.items[0].id).toBe("ses_1"); + + const snapshotResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions/ses_1/snapshot?limit=5`); + expect(snapshotResponse.status).toBe(200); + const snapshot = await snapshotResponse.json(); + expect(snapshot.data.session.id).toBe("ses_1"); + expect(snapshot.data.status.type).toBe("busy"); + expect(snapshot.data.todos[0].content).toBe("Ship Phase 6"); + + const createResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ title: "Create" }), + }); + expect(createResponse.status).toBe(200); + expect((await createResponse.json()).data.id).toBe("ses_created"); + + const updateResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions/ses_1`, { + method: "PATCH", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ title: "Rename" }), + }); + expect(updateResponse.status).toBe(200); + expect((await updateResponse.json()).data.title).toBe("Renamed Session"); + + const promptResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions/ses_1/prompt_async`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ parts: [{ type: "text", text: "Hello" }] }), + }); + expect(promptResponse.status).toBe(200); + expect((await promptResponse.json()).data.accepted).toBe(true); + + const revertResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions/ses_1/revert`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ messageID: "msg_1" }), + }); + expect(revertResponse.status).toBe(200); + expect((await revertResponse.json()).data.title).toBe("Reverted Session"); + + const eventsResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/events`); + expect(eventsResponse.status).toBe(200); + const eventsBody = await eventsResponse.text(); + expect(eventsBody).toContain("session.status"); + expect(eventsBody).toContain("session.idle"); + }); + + test("routes remote workspace sessions through the mounted remote backend", async () => { + const remote = startMockOpencode({ expectBearer: "secret", mountPrefix: "/w/alpha/opencode" }); + const { app, dependencies } = createTestApp(); + const workspace = dependencies.persistence.registry.importRemoteWorkspace({ + baseUrl: `${remote.url}/w/alpha/opencode`, + directory: "/srv/remote-alpha", + displayName: "Remote Alpha", + legacyNotes: { source: "test" }, + remoteType: "openwork", + remoteWorkspaceId: "alpha", + serverAuth: { openworkToken: "secret" }, + serverBaseUrl: remote.url, + serverHostingKind: "self_hosted", + serverLabel: "remote.example.com", + workspaceStatus: "ready", + }); + + const listResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions`); + expect(listResponse.status).toBe(200); + expect((await listResponse.json()).data.items[0].id).toBe("ses_1"); + + const commandResponse = await app.request(`http://openwork.local/workspaces/${workspace.id}/sessions/ses_1/command`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ command: "review" }), + }); + expect(commandResponse.status).toBe(200); + expect((await commandResponse.json()).data.accepted).toBe(true); + }); +}); diff --git a/apps/server-v2/src/standalone.test.ts b/apps/server-v2/src/standalone.test.ts new file mode 100644 index 00000000..a55a072c --- /dev/null +++ b/apps/server-v2/src/standalone.test.ts @@ -0,0 +1,93 @@ +import { afterEach, expect, test } from "bun:test"; +import net from "node:net"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const packageDir = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const spawnedChildren: Array = []; + +afterEach(async () => { + while (spawnedChildren.length > 0) { + const child = spawnedChildren.pop(); + if (!child) { + continue; + } + + child.kill(); + await child.exited; + } +}); + +function getFreePort() { + return new Promise((resolve, reject) => { + const server = net.createServer(); + server.listen(0, "127.0.0.1", () => { + const address = server.address(); + if (!address || typeof address === "string") { + reject(new Error("Failed to resolve a test port.")); + return; + } + + const { port } = address; + server.close((error) => { + if (error) { + reject(error); + return; + } + + resolve(port); + }); + }); + server.once("error", reject); + }); +} + +async function waitForHealth(url: string) { + const deadline = Date.now() + 10_000; + + while (Date.now() < deadline) { + try { + const response = await fetch(url); + if (response.ok) { + return response; + } + } catch { + // wait for server boot + } + + await Bun.sleep(100); + } + + throw new Error(`Timed out waiting for ${url}`); +} + +test("cli boots as a standalone process and serves health plus runtime routes", async () => { + const port = await getFreePort(); + const child = Bun.spawn(["bun", "src/cli.ts", "--port", String(port)], { + cwd: packageDir, + env: { + ...process.env, + OPENWORK_SERVER_V2_IN_MEMORY: "1", + OPENWORK_SERVER_V2_RUNTIME_BOOTSTRAP: "disabled", + }, + stderr: "pipe", + stdout: "pipe", + }); + spawnedChildren.push(child); + + const response = await waitForHealth(`http://127.0.0.1:${port}/system/health`); + const body = await response.json(); + + const runtimeSummaryResponse = await waitForHealth(`http://127.0.0.1:${port}/system/runtime/summary`); + const runtimeSummary = await runtimeSummaryResponse.json(); + + const runtimeVersionsResponse = await waitForHealth(`http://127.0.0.1:${port}/system/runtime/versions`); + const runtimeVersions = await runtimeVersionsResponse.json(); + + expect(body.ok).toBe(true); + expect(body.data.service).toBe("openwork-server-v2"); + expect(runtimeSummary.ok).toBe(true); + expect(runtimeSummary.data.target).toBeTruthy(); + expect(runtimeVersions.ok).toBe(true); + expect(runtimeVersions.data.pinned.serverVersion).toBeTruthy(); +}, 15_000); diff --git a/apps/server-v2/src/test-fixtures/fake-runtime.ts b/apps/server-v2/src/test-fixtures/fake-runtime.ts new file mode 100644 index 00000000..3fb8acee --- /dev/null +++ b/apps/server-v2/src/test-fixtures/fake-runtime.ts @@ -0,0 +1,142 @@ +#!/usr/bin/env bun + +import http from "node:http"; + +function readOption(name: string, fallback = "") { + return process.env[name]?.trim() || fallback; +} + +function parsePort(argv: string[]) { + const hostArg = argv.find((value) => value.startsWith("--hostname=")) ?? "--hostname=127.0.0.1"; + const portArg = argv.find((value) => value.startsWith("--port=")) ?? "--port=0"; + return { + host: hostArg.slice("--hostname=".length), + port: Number.parseInt(portArg.slice("--port=".length), 10) || 0, + }; +} + +async function startFakeOpencode(argv: string[]) { + const mode = readOption("FAKE_RUNTIME_MODE", "success"); + const version = readOption("FAKE_RUNTIME_VERSION", "1.2.27"); + + if (mode === "early-exit") { + console.error("fake opencode exiting before readiness"); + process.exit(7); + } + + if (mode === "timeout") { + console.log("fake opencode booting slowly"); + setInterval(() => {}, 1_000); + await new Promise(() => undefined); + } + + const { host, port } = parsePort(argv); + const server = http.createServer((req, res) => { + const pathname = req.url ? new URL(req.url, "http://localhost").pathname : ""; + if (pathname === "/health" || pathname === "/global/health") { + res.writeHead(200, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ healthy: true, version })); + return; + } + + res.writeHead(404, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ error: "not_found" })); + }); + + await new Promise((resolve, reject) => { + server.once("error", reject); + server.listen(port, host, () => resolve()); + }); + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("Failed to resolve fake opencode address."); + } + + console.log(`opencode server listening on http://${host}:${address.port}`); + + const exitAfterMs = Number.parseInt(readOption("FAKE_RUNTIME_EXIT_AFTER_MS", "0"), 10) || 0; + if (exitAfterMs > 0) { + setTimeout(() => { + server.close(() => { + process.exit(3); + }); + }, exitAfterMs); + } + + await new Promise((resolve) => { + const shutdown = () => server.close(() => resolve()); + process.on("SIGINT", shutdown); + process.on("SIGTERM", shutdown); + }); +} + +async function startFakeRouter() { + const mode = readOption("FAKE_RUNTIME_MODE", "success"); + const healthPort = Number.parseInt(readOption("OPENCODE_ROUTER_HEALTH_PORT", "0"), 10); + if (!healthPort) { + throw new Error("OPENCODE_ROUTER_HEALTH_PORT is required for the fake router."); + } + + if (mode === "early-exit") { + console.error("fake router exiting before readiness"); + process.exit(9); + } + + if (mode === "timeout") { + console.log("fake router waiting forever"); + setInterval(() => {}, 1_000); + await new Promise(() => undefined); + } + + const server = http.createServer((req, res) => { + const pathname = req.url ? new URL(req.url, "http://localhost").pathname : ""; + if (pathname === "/health" || pathname === "/") { + res.writeHead(200, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ ok: true })); + return; + } + res.writeHead(404, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ error: "not_found" })); + }); + + await new Promise((resolve, reject) => { + server.once("error", reject); + server.listen(healthPort, "127.0.0.1", () => resolve()); + }); + + const exitAfterMs = Number.parseInt(readOption("FAKE_RUNTIME_EXIT_AFTER_MS", "0"), 10) || 0; + if (exitAfterMs > 0) { + setTimeout(() => { + server.close(() => { + process.exit(4); + }); + }, exitAfterMs); + } + + await new Promise((resolve) => { + const shutdown = () => server.close(() => resolve()); + process.on("SIGINT", shutdown); + process.on("SIGTERM", shutdown); + }); +} + +async function main() { + const kind = readOption("FAKE_RUNTIME_KIND", "opencode"); + const [, , ...argv] = process.argv; + if (argv[0] !== "serve") { + console.log(readOption("FAKE_RUNTIME_VERSION", "1.2.27")); + return; + } + + if (kind === "router") { + await startFakeRouter(); + return; + } + + await startFakeOpencode(argv); +} + +main().catch((error) => { + console.error(error instanceof Error ? error.stack ?? error.message : String(error)); + process.exit(1); +}); diff --git a/apps/server-v2/src/version.ts b/apps/server-v2/src/version.ts new file mode 100644 index 00000000..2b70e027 --- /dev/null +++ b/apps/server-v2/src/version.ts @@ -0,0 +1,21 @@ +import packageJson from "../package.json" with { type: "json" }; + +declare const __OPENWORK_SERVER_V2_VERSION__: string | undefined; + +function normalizeVersion(value: string | undefined | null) { + const trimmed = value?.trim() ?? ""; + return trimmed || null; +} + +export function resolveServerV2Version() { + return ( + normalizeVersion(process.env.OPENWORK_SERVER_V2_VERSION) ?? + normalizeVersion( + typeof __OPENWORK_SERVER_V2_VERSION__ === "string" + ? __OPENWORK_SERVER_V2_VERSION__ + : null, + ) ?? + normalizeVersion(packageJson.version) ?? + "0.0.0" + ); +} diff --git a/apps/server-v2/tsconfig.json b/apps/server-v2/tsconfig.json new file mode 100644 index 00000000..9779c5ed --- /dev/null +++ b/apps/server-v2/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "strict": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "resolveJsonModule": true, + "skipLibCheck": true, + "types": ["bun-types", "node"] + }, + "include": ["src", "scripts"] +} diff --git a/package.json b/package.json index d14d067f..ba4cbfc5 100644 --- a/package.json +++ b/package.json @@ -4,6 +4,8 @@ "version": "0.0.0", "scripts": { "dev": "OPENWORK_DEV_MODE=1 pnpm --filter @openwork/desktop dev", + "dev:server-v2": "node scripts/dev-server-v2.mjs", + "dev:server-v2:server": "node scripts/dev-server-v2.mjs --no-app", "dev:windows": ".\\scripts\\dev-windows.cmd", "dev:windows:x64": ".\\scripts\\dev-windows.cmd x64", "dev:ui": "OPENWORK_DEV_MODE=1 pnpm --filter @openwork/app dev", @@ -18,7 +20,10 @@ "build:ui": "pnpm --filter @openwork/app build", "build:web": "pnpm --filter @openwork-ee/den-web build", "preview": "pnpm --filter @openwork/app preview", - "typecheck": "pnpm --filter @openwork/app typecheck", + "sdk:generate": "pnpm --filter openwork-server-v2 openapi:generate && pnpm --filter @openwork/server-sdk generate", + "sdk:watch": "pnpm --filter @openwork/server-sdk watch", + "contract:check": "node scripts/check-server-v2-contract.mjs", + "typecheck": "pnpm run sdk:generate && pnpm --filter @openwork/app typecheck", "test:health": "pnpm --filter @openwork/app test:health", "test:sessions": "pnpm --filter @openwork/app test:sessions", "test:refactor": "pnpm --filter @openwork/app test:refactor", diff --git a/packages/openwork-server-sdk/openapi-ts.config.ts b/packages/openwork-server-sdk/openapi-ts.config.ts new file mode 100644 index 00000000..2b50d7d3 --- /dev/null +++ b/packages/openwork-server-sdk/openapi-ts.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from "@hey-api/openapi-ts"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; + +const configDir = dirname(fileURLToPath(import.meta.url)); + +export default defineConfig({ + input: resolve(configDir, "../../apps/server-v2/openapi/openapi.json"), + output: resolve(configDir, "generated"), +}); diff --git a/packages/openwork-server-sdk/package.json b/packages/openwork-server-sdk/package.json new file mode 100644 index 00000000..984e17ce --- /dev/null +++ b/packages/openwork-server-sdk/package.json @@ -0,0 +1,30 @@ +{ + "name": "@openwork/server-sdk", + "private": true, + "type": "module", + "exports": { + ".": { + "types": "./src/index.ts", + "development": "./src/index.ts", + "default": "./src/index.ts" + } + }, + "files": [ + "generated", + "src", + "README.md" + ], + "scripts": { + "generate": "pnpm exec openapi-ts -f openapi-ts.config.ts", + "watch": "node ./scripts/watch.mjs", + "pretypecheck": "pnpm --dir ../.. run sdk:generate", + "typecheck": "tsc --noEmit" + }, + "dependencies": { + "@hey-api/client-fetch": "0.13.1" + }, + "devDependencies": { + "@hey-api/openapi-ts": "0.95.0", + "typescript": "^5.6.3" + } +} diff --git a/packages/openwork-server-sdk/scripts/watch.mjs b/packages/openwork-server-sdk/scripts/watch.mjs new file mode 100644 index 00000000..fbc1658f --- /dev/null +++ b/packages/openwork-server-sdk/scripts/watch.mjs @@ -0,0 +1,72 @@ +import { spawn } from "node:child_process"; +import { watch } from "node:fs"; +import path from "node:path"; +import process from "node:process"; +import { fileURLToPath } from "node:url"; + +const scriptDir = path.dirname(fileURLToPath(import.meta.url)); +const packageDir = path.resolve(scriptDir, ".."); +const specPath = path.resolve(packageDir, "../../apps/server-v2/openapi/openapi.json"); +const specDir = path.dirname(specPath); +const specFilename = path.basename(specPath); + +let activeChild = null; +let queued = false; +let timer = null; + +function runGenerate() { + if (activeChild) { + queued = true; + return; + } + + activeChild = spawn("pnpm", ["run", "generate"], { + cwd: packageDir, + stdio: "inherit", + env: process.env, + }); + + activeChild.once("exit", (code) => { + activeChild = null; + if (code && code !== 0) { + process.stderr.write(`[openwork-server-sdk] generation failed with exit code ${code}.\n`); + } + if (queued) { + queued = false; + scheduleGenerate(); + } + }); +} + +function scheduleGenerate() { + if (timer) { + clearTimeout(timer); + } + timer = setTimeout(() => { + timer = null; + runGenerate(); + }, 120); +} + +try { + watch(specDir, (_eventType, filename) => { + if (!filename || path.basename(String(filename)) !== specFilename) { + return; + } + scheduleGenerate(); + }); +} catch (error) { + process.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + process.exit(1); +} + +runGenerate(); + +for (const signal of ["SIGINT", "SIGTERM"]) { + process.on(signal, () => { + if (activeChild && activeChild.exitCode === null) { + activeChild.kill("SIGTERM"); + } + process.exit(0); + }); +} diff --git a/packages/openwork-server-sdk/src/client.ts b/packages/openwork-server-sdk/src/client.ts new file mode 100644 index 00000000..8120e72c --- /dev/null +++ b/packages/openwork-server-sdk/src/client.ts @@ -0,0 +1,17 @@ +import { createClient } from "../generated/client/index"; +import type { Client, Config, CreateClientConfig } from "../generated/client/index"; + +export type OpenWorkServerClientConfig = Config; +export type OpenWorkServerClient = Client; +export type OpenWorkServerClientFactory = CreateClientConfig; + +export function normalizeServerBaseUrl(baseUrl: string) { + return baseUrl.replace(/\/+$/, "") || baseUrl; +} + +export function createOpenWorkServerClient(config: OpenWorkServerClientConfig = {}): OpenWorkServerClient { + return createClient({ + ...config, + baseUrl: config.baseUrl ? normalizeServerBaseUrl(config.baseUrl) : config.baseUrl, + }); +} diff --git a/packages/openwork-server-sdk/src/index.ts b/packages/openwork-server-sdk/src/index.ts new file mode 100644 index 00000000..46b84c38 --- /dev/null +++ b/packages/openwork-server-sdk/src/index.ts @@ -0,0 +1,18 @@ +export * from "../generated/index"; +export { createClient } from "../generated/client/index"; +export type { + Client, + ClientOptions, + Config, + CreateClientConfig, + RequestOptions, + RequestResult, +} from "../generated/client/index"; +export { + createOpenWorkServerClient, + normalizeServerBaseUrl, + type OpenWorkServerClient, + type OpenWorkServerClientConfig, + type OpenWorkServerClientFactory, +} from "./client.js"; +export * from "./streams/index.js"; diff --git a/packages/openwork-server-sdk/src/streams/index.ts b/packages/openwork-server-sdk/src/streams/index.ts new file mode 100644 index 00000000..606d66b7 --- /dev/null +++ b/packages/openwork-server-sdk/src/streams/index.ts @@ -0,0 +1,12 @@ +export { + createOpenWorkServerEventStream, + type OpenWorkServerEventStreamOptions, + type OpenWorkServerEventStreamResult, + type OpenWorkServerStreamEvent, +} from "./sse.js"; +export { + createOpenWorkServerWorkspaceEventStream, + type OpenWorkServerWorkspaceEvent, + type OpenWorkServerWorkspaceEventStreamOptions, + type OpenWorkServerWorkspaceEventStreamResult, +} from "./workspace-events.js"; diff --git a/packages/openwork-server-sdk/src/streams/sse.ts b/packages/openwork-server-sdk/src/streams/sse.ts new file mode 100644 index 00000000..ee075ccd --- /dev/null +++ b/packages/openwork-server-sdk/src/streams/sse.ts @@ -0,0 +1,10 @@ +import { createSseClient } from "../../generated/core/serverSentEvents.gen"; +import type { ServerSentEventsOptions, ServerSentEventsResult, StreamEvent } from "../../generated/core/serverSentEvents.gen"; + +export type OpenWorkServerEventStreamOptions = ServerSentEventsOptions; +export type OpenWorkServerEventStreamResult = ServerSentEventsResult; +export type OpenWorkServerStreamEvent = StreamEvent; + +export function createOpenWorkServerEventStream(options: OpenWorkServerEventStreamOptions) { + return createSseClient(options as ServerSentEventsOptions) as OpenWorkServerEventStreamResult; +} diff --git a/packages/openwork-server-sdk/src/streams/workspace-events.ts b/packages/openwork-server-sdk/src/streams/workspace-events.ts new file mode 100644 index 00000000..c41f43ee --- /dev/null +++ b/packages/openwork-server-sdk/src/streams/workspace-events.ts @@ -0,0 +1,30 @@ +import { normalizeServerBaseUrl } from "../client.js"; +import type { OpenWorkServerV2WorkspaceEvent } from "../../generated/types.gen"; +import { + createOpenWorkServerEventStream, + type OpenWorkServerEventStreamOptions, + type OpenWorkServerEventStreamResult, +} from "./sse.js"; + +export type OpenWorkServerWorkspaceEvent = OpenWorkServerV2WorkspaceEvent; + +export type OpenWorkServerWorkspaceEventStreamOptions = Omit< + OpenWorkServerEventStreamOptions, + "url" +> & { + baseUrl: string; + workspaceId: string; +}; + +export type OpenWorkServerWorkspaceEventStreamResult = OpenWorkServerEventStreamResult; + +export function createOpenWorkServerWorkspaceEventStream( + options: OpenWorkServerWorkspaceEventStreamOptions, +): OpenWorkServerWorkspaceEventStreamResult { + const baseUrl = normalizeServerBaseUrl(options.baseUrl); + const url = `${baseUrl}/workspaces/${encodeURIComponent(options.workspaceId)}/events`; + return createOpenWorkServerEventStream({ + ...options, + url, + }); +} diff --git a/packages/openwork-server-sdk/tsconfig.json b/packages/openwork-server-sdk/tsconfig.json new file mode 100644 index 00000000..a2be0f49 --- /dev/null +++ b/packages/openwork-server-sdk/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "Bundler", + "strict": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "resolveJsonModule": true, + "skipLibCheck": true, + "noEmit": true + }, + "include": ["src", "generated"] +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a54516da..9672c0c0 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -267,6 +267,37 @@ importers: specifier: ^5.6.3 version: 5.9.3 + apps/server-v2: + dependencies: + '@opencode-ai/sdk': + specifier: 1.2.27 + version: 1.2.27 + hono: + specifier: 4.12.12 + version: 4.12.12 + hono-openapi: + specifier: 1.3.0 + version: 1.3.0(@hono/standard-validator@0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.12))(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-community/standard-openapi@0.2.9(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-schema/spec@1.1.0)(openapi-types@12.1.3)(zod@4.3.6))(@types/json-schema@7.0.15)(hono@4.12.12)(openapi-types@12.1.3) + jsonc-parser: + specifier: ^3.3.1 + version: 3.3.1 + yaml: + specifier: ^2.8.1 + version: 2.8.2 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@types/node': + specifier: ^22.10.2 + version: 22.19.7 + bun-types: + specifier: ^1.3.6 + version: 1.3.6 + typescript: + specifier: ^5.6.3 + version: 5.9.3 + apps/share: dependencies: '@vercel/blob': @@ -681,6 +712,19 @@ importers: specifier: ^5.5.4 version: 5.9.3 + packages/openwork-server-sdk: + dependencies: + '@hey-api/client-fetch': + specifier: 0.13.1 + version: 0.13.1(@hey-api/openapi-ts@0.95.0(typescript@5.9.3)) + devDependencies: + '@hey-api/openapi-ts': + specifier: 0.95.0 + version: 0.95.0(typescript@5.9.3) + typescript: + specifier: ^5.6.3 + version: 5.9.3 + packages/ui: dependencies: '@paper-design/shaders': @@ -1721,6 +1765,37 @@ packages: engines: {node: '>=6'} hasBin: true + '@hey-api/client-fetch@0.13.1': + resolution: {integrity: sha512-29jBRYNdxVGlx5oewFgOrkulZckpIpBIRHth3uHFn1PrL2ucMy52FvWOY3U3dVx2go1Z3kUmMi6lr07iOpUqqA==} + deprecated: Starting with v0.73.0, this package is bundled directly inside @hey-api/openapi-ts. + peerDependencies: + '@hey-api/openapi-ts': < 2 + + '@hey-api/codegen-core@0.7.4': + resolution: {integrity: sha512-DGd9yeSQzflOWO3Y5mt1GRXkXH9O/yIMgbxPjwLI3jwu/3nAjoXXD26lEeFb6tclYlg0JAqTIs5d930G/qxHeA==} + engines: {node: '>=20.19.0'} + + '@hey-api/json-schema-ref-parser@1.3.1': + resolution: {integrity: sha512-7atnpUkT8TyUPHYPLk91j/GyaqMuwTEHanLOe50Dlx0EEvNuQqFD52Yjg8x4KU0UFL1mWlyhE+sUE/wAtQ1N2A==} + engines: {node: '>=20.19.0'} + + '@hey-api/openapi-ts@0.95.0': + resolution: {integrity: sha512-lk5C+WKl5yqEmliQihEyhX/jNcWlAykTSEqkDeKa9xSq5YDAzOFvx7oos8YTqiIzdc4TemtlEaB8Rns7+8A0qg==} + engines: {node: '>=20.19.0'} + hasBin: true + peerDependencies: + typescript: '>=5.5.3 || >=6.0.0 || 6.0.1-rc' + + '@hey-api/shared@0.3.0': + resolution: {integrity: sha512-G+4GPojdLEh9bUwRG88teMPM1HdqMm/IsJ38cbnNxhyDu1FkFGwilkA1EqnULCzfTam/ZoZkaLdmAd8xEh4Xsw==} + engines: {node: '>=20.19.0'} + + '@hey-api/spec-types@0.1.0': + resolution: {integrity: sha512-StS4RrAO5pyJCBwe6uF9MAuPflkztriW+FPnVb7oEjzDYv1sxPwP+f7fL6u6D+UVrKpZ/9bPNx/xXVdkeWPU6A==} + + '@hey-api/types@0.1.4': + resolution: {integrity: sha512-thWfawrDIP7wSI9ioT13I5soaaqB5vAPIiZmgD8PbeEVKNrkonc0N/Sjj97ezl7oQgusZmaNphGdMKipPO6IBg==} + '@hono/node-server@1.19.11': resolution: {integrity: sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g==} engines: {node: '>=18.14.1'} @@ -2027,6 +2102,9 @@ packages: '@js-sdsl/ordered-map@4.4.2': resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} + '@jsdevtools/ono@7.1.3': + resolution: {integrity: sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==} + '@lexical/clipboard@0.35.0': resolution: {integrity: sha512-ko7xSIIiayvDiqjNDX6fgH9RlcM6r9vrrvJYTcfGVBor5httx16lhIi0QJZ4+RNPvGtTjyFv4bwRmsixRRwImg==} @@ -2310,6 +2388,9 @@ packages: '@opencode-ai/sdk@1.1.39': resolution: {integrity: sha512-EUYBZAci0bzG9+a7JVINmqAqis71ipG2/D3juvmvvKFyu0YBIT/6b+g3+p82Eb5CU2dujxpPdJJCaexZ1389eQ==} + '@opencode-ai/sdk@1.2.27': + resolution: {integrity: sha512-Wk0o/I+Fo+wE3zgvlJDs8Fb67KlKqX0PrV8dK5adSDkANq6r4Z25zXJg2iOir+a8ntg3rAcpel1OY4FV/TwRUA==} + '@opentelemetry/api-logs@0.207.0': resolution: {integrity: sha512-lAb0jQRVyleQQGiuuvCOTDVspc14nx6XJjP4FspJ1sNARo3Regq4ZZbrc3rN4b1TYSuUCvgH+UXUPug4SLOqEQ==} engines: {node: '>=8.0.0'} @@ -3631,6 +3712,10 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + ansi-colors@4.1.3: + resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} + engines: {node: '>=6'} + ansi-regex@5.0.1: resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} engines: {node: '>=8'} @@ -3652,6 +3737,9 @@ packages: arg@5.0.2: resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + async-retry@1.3.3: resolution: {integrity: sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==} @@ -3858,6 +3946,10 @@ packages: bun-webgpu@0.1.4: resolution: {integrity: sha512-Kw+HoXl1PMWJTh9wvh63SSRofTA8vYBFCw0XEP1V1fFdQEDhI8Sgf73sdndE/oDpN/7CMx0Yv/q8FCvO39ROMQ==} + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + bundle-require@5.1.0: resolution: {integrity: sha512-3WrrOuZiyaaZPWiEt4G3+IffISVC9HYlWueJEBWED4ZH4aIAC2PnkdnuRrR94M+w6yGWn4AglWtJtBI8YqvgoA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -3868,6 +3960,14 @@ packages: resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} engines: {node: '>=10.16.0'} + c12@3.3.3: + resolution: {integrity: sha512-750hTRvgBy5kcMNPdh95Qo+XUBeGo8C7nsKSmedDmaQI+E0r82DwHeM6vBewDe4rGFbnxoa4V9pw+sPh5+Iz8Q==} + peerDependencies: + magicast: '*' + peerDependenciesMeta: + magicast: + optional: true + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -3914,10 +4014,20 @@ packages: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} + chokidar@5.0.0: + resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==} + engines: {node: '>= 20.19.0'} + chownr@3.0.0: resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} engines: {node: '>=18'} + citty@0.1.6: + resolution: {integrity: sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==} + + citty@0.2.2: + resolution: {integrity: sha512-+6vJA3L98yv+IdfKGZHBNiGW5KHn22e/JwID0Strsz8h4S/csAu/OuICwxrg44k5MRiZHWIo8XXuJgQTriRP4w==} + cjs-module-lexer@2.2.0: resolution: {integrity: sha512-4bHTS2YuzUvtoLjdy+98ykbNB5jS0+07EvFNXerqZQJ89F7DI6ET7OQo/HJuW6K0aVsKA9hj9/RVb2kQVOrPDQ==} @@ -3939,6 +4049,10 @@ packages: color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + color-support@1.1.3: + resolution: {integrity: sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==} + hasBin: true + combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -3950,6 +4064,10 @@ packages: resolution: {integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==} engines: {node: '>=18'} + commander@14.0.3: + resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==} + engines: {node: '>=20'} + commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} @@ -3965,6 +4083,9 @@ packages: confbox@0.1.8: resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + confbox@0.2.4: + resolution: {integrity: sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ==} + consola@3.4.2: resolution: {integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==} engines: {node: ^14.18.0 || >=16.10.0} @@ -3981,6 +4102,10 @@ packages: crelt@1.0.6: resolution: {integrity: sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==} + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + cssesc@3.0.0: resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} engines: {node: '>=4'} @@ -4160,6 +4285,18 @@ packages: decode-named-character-reference@1.3.0: resolution: {integrity: sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==} + default-browser-id@5.0.1: + resolution: {integrity: sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==} + engines: {node: '>=18'} + + default-browser@5.5.0: + resolution: {integrity: sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==} + engines: {node: '>=18'} + + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + defu@6.1.4: resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} @@ -4178,6 +4315,9 @@ packages: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} + destr@2.0.5: + resolution: {integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==} + detect-libc@2.1.2: resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} engines: {node: '>=8'} @@ -4392,6 +4532,9 @@ packages: resolution: {integrity: sha512-A5EmesHW6rfnZ9ysHQjPdJRni0SRar0tjtG5MNtm9n5TUvsYU8oozprtRD4AqHxcZWWlVuAmQo2nWKfN9oyjTw==} engines: {node: '>=0.10.0'} + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} + extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} @@ -4510,9 +4653,16 @@ packages: get-tsconfig@4.13.1: resolution: {integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w==} + get-tsconfig@4.13.6: + resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==} + gifwrap@0.10.1: resolution: {integrity: sha512-2760b1vpJHNmLzZ/ubTtNnEx5WApN/PYWJvXvgS+tL1egTTthayFYIQQNi136FLEDcN/IyEY2EcGpIITD6eYUw==} + giget@2.0.0: + resolution: {integrity: sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA==} + hasBin: true + glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} @@ -4595,6 +4745,10 @@ packages: hono: optional: true + hono@4.12.12: + resolution: {integrity: sha512-p1JfQMKaceuCbpJKAPKVqyqviZdS0eUxH9v82oWo1kb9xjQ5wA6iP3FNVAPDFlz5/p7d45lO+BpSk1tuSZMF4Q==} + engines: {node: '>=16.9.0'} + hono@4.12.8: resolution: {integrity: sha512-VJCEvtrezO1IAR+kqEYnxUOoStaQPGrCmX3j4wDTNOcD1uRPFpGlwQUIW8niPuvHXaTUxeOUl5MMDGrl+tmO9A==} engines: {node: '>=16.9.0'} @@ -4659,6 +4813,11 @@ packages: is-decimal@2.0.1: resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + is-electron@2.2.2: resolution: {integrity: sha512-FO/Rhvz5tuw4MCWkpMzHFKWD2LsfHzIb7i6MdPYZ/KW7AlxawyLkqdy+jPZP1WubqEADE3O4FUENlJHDfQASRg==} @@ -4677,6 +4836,15 @@ packages: is-hexadecimal@2.0.1: resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} + is-in-ssh@1.0.0: + resolution: {integrity: sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw==} + engines: {node: '>=20'} + + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + is-node-process@1.2.0: resolution: {integrity: sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==} @@ -4699,6 +4867,13 @@ packages: resolution: {integrity: sha512-ZhMwEosbFJkA0YhFnNDgTM4ZxDRsS6HqTo7qsZM08fehyRYIYa0yHu5R6mgo1n/8MgaPBXiPimPD77baVFYg+A==} engines: {node: '>=12.13'} + is-wsl@3.1.1: + resolution: {integrity: sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==} + engines: {node: '>=16'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + isomorphic-ws@5.0.0: resolution: {integrity: sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw==} peerDependencies: @@ -4732,6 +4907,10 @@ packages: js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + jsesc@3.1.0: resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} engines: {node: '>=6'} @@ -5201,6 +5380,9 @@ packages: sass: optional: true + node-fetch-native@1.6.7: + resolution: {integrity: sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==} + node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -5221,6 +5403,11 @@ packages: resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} engines: {node: '>=0.10.0'} + nypm@0.6.5: + resolution: {integrity: sha512-K6AJy1GMVyfyMXRVB88700BJqNUkByijGJM8kEHpLdcAt+vSQAVfkWWHYzuRXHSY6xA2sNc5RjTj0p9rE2izVQ==} + engines: {node: '>=18'} + hasBin: true + object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} @@ -5229,6 +5416,9 @@ packages: resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} engines: {node: '>= 6'} + ohash@2.0.11: + resolution: {integrity: sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==} + omggif@1.0.10: resolution: {integrity: sha512-LMJTtvgc/nugXj0Vcrrs68Mn2D1r0zf630VNtqtpI1FEO7e+O9FP4gqs9AcnBaSEeoHIPm28u6qgPR0oyEpGSw==} @@ -5236,6 +5426,10 @@ packages: resolution: {integrity: sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==} engines: {node: '>=14.0.0'} + open@11.0.0: + resolution: {integrity: sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw==} + engines: {node: '>=20'} + openapi-types@12.1.3: resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==} @@ -5303,6 +5497,10 @@ packages: resolution: {integrity: sha512-qdVgY8KXmVdJZRSS1JdEPOKPdTiEK/pi0RkcT2sw1RhXxohdujUlJFPuS1TSkevZ9vzd3ZlL7ULl1MHGTApKzQ==} engines: {node: '>=14.0.0'} + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} @@ -5317,6 +5515,9 @@ packages: resolution: {integrity: sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==} engines: {node: '>=8'} + perfect-debounce@2.1.0: + resolution: {integrity: sha512-LjgdTytVFXeUgtHZr9WYViYSM/g8MkcTPYDlPa3cDqMirHjKiSZPYd6DoL7pK8AJQr+uWkQvCjHNdiMqsrJs+g==} + picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} @@ -5353,6 +5554,9 @@ packages: pkg-types@1.3.1: resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} + pkg-up@3.1.0: resolution: {integrity: sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==} engines: {node: '>=8'} @@ -5454,6 +5658,10 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + powershell-utils@0.1.0: + resolution: {integrity: sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==} + engines: {node: '>=20'} + prismjs@1.30.0: resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==} engines: {node: '>=6'} @@ -5484,6 +5692,9 @@ packages: quick-format-unescaped@4.0.4: resolution: {integrity: sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==} + rc9@2.1.2: + resolution: {integrity: sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==} + react-dom@18.2.0: resolution: {integrity: sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==} peerDependencies: @@ -5541,6 +5752,10 @@ packages: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} + readdirp@5.0.0: + resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} + engines: {node: '>= 20.19.0'} + real-require@0.2.0: resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} engines: {node: '>= 12.13.0'} @@ -5614,6 +5829,10 @@ packages: roughjs@4.6.6: resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} @@ -5647,6 +5866,11 @@ packages: resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} hasBin: true + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + semver@7.7.4: resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} engines: {node: '>=10'} @@ -5669,6 +5893,14 @@ packages: resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + shell-quote@1.8.3: resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} engines: {node: '>= 0.4'} @@ -6151,6 +6383,11 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + wrap-ansi@7.0.0: resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} engines: {node: '>=10'} @@ -6167,6 +6404,10 @@ packages: utf-8-validate: optional: true + wsl-utils@0.3.1: + resolution: {integrity: sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg==} + engines: {node: '>=20'} + xml-parse-from-string@1.0.1: resolution: {integrity: sha512-ErcKwJTF54uRzzNMXq2X5sMIy88zJvfN2DmdoQvy7PAFJ+tPRU6ydWuOKNMyfmOjdyBQTFREi60s0Y0SyI0G0g==} @@ -7504,10 +7745,69 @@ snapshots: protobufjs: 7.5.4 yargs: 17.7.2 + '@hey-api/client-fetch@0.13.1(@hey-api/openapi-ts@0.95.0(typescript@5.9.3))': + dependencies: + '@hey-api/openapi-ts': 0.95.0(typescript@5.9.3) + + '@hey-api/codegen-core@0.7.4': + dependencies: + '@hey-api/types': 0.1.4 + ansi-colors: 4.1.3 + c12: 3.3.3 + color-support: 1.1.3 + transitivePeerDependencies: + - magicast + + '@hey-api/json-schema-ref-parser@1.3.1': + dependencies: + '@jsdevtools/ono': 7.1.3 + '@types/json-schema': 7.0.15 + js-yaml: 4.1.1 + + '@hey-api/openapi-ts@0.95.0(typescript@5.9.3)': + dependencies: + '@hey-api/codegen-core': 0.7.4 + '@hey-api/json-schema-ref-parser': 1.3.1 + '@hey-api/shared': 0.3.0 + '@hey-api/spec-types': 0.1.0 + '@hey-api/types': 0.1.4 + ansi-colors: 4.1.3 + color-support: 1.1.3 + commander: 14.0.3 + get-tsconfig: 4.13.6 + typescript: 5.9.3 + transitivePeerDependencies: + - magicast + + '@hey-api/shared@0.3.0': + dependencies: + '@hey-api/codegen-core': 0.7.4 + '@hey-api/json-schema-ref-parser': 1.3.1 + '@hey-api/spec-types': 0.1.0 + '@hey-api/types': 0.1.4 + ansi-colors: 4.1.3 + cross-spawn: 7.0.6 + open: 11.0.0 + semver: 7.7.3 + transitivePeerDependencies: + - magicast + + '@hey-api/spec-types@0.1.0': + dependencies: + '@hey-api/types': 0.1.4 + + '@hey-api/types@0.1.4': {} + '@hono/node-server@1.19.11(hono@4.12.8)': dependencies: hono: 4.12.8 + '@hono/standard-validator@0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.12)': + dependencies: + '@standard-schema/spec': 1.1.0 + hono: 4.12.12 + optional: true + '@hono/standard-validator@0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.8)': dependencies: '@standard-schema/spec': 1.1.0 @@ -7843,6 +8143,8 @@ snapshots: '@js-sdsl/ordered-map@4.4.2': {} + '@jsdevtools/ono@7.1.3': {} + '@lexical/clipboard@0.35.0': dependencies: '@lexical/html': 0.35.0 @@ -8129,6 +8431,8 @@ snapshots: '@opencode-ai/sdk@1.1.39': {} + '@opencode-ai/sdk@1.2.27': {} + '@opentelemetry/api-logs@0.207.0': dependencies: '@opentelemetry/api': 1.9.0 @@ -9607,6 +9911,8 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 4.3.6 + ansi-colors@4.1.3: {} + ansi-regex@5.0.1: {} ansi-styles@4.3.0: @@ -9624,6 +9930,8 @@ snapshots: arg@5.0.2: {} + argparse@2.0.1: {} + async-retry@1.3.3: dependencies: retry: 0.13.1 @@ -9818,6 +10126,10 @@ snapshots: bun-webgpu-win32-x64: 0.1.4 optional: true + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + bundle-require@5.1.0(esbuild@0.27.2): dependencies: esbuild: 0.27.2 @@ -9827,6 +10139,21 @@ snapshots: dependencies: streamsearch: 1.1.0 + c12@3.3.3: + dependencies: + chokidar: 5.0.0 + confbox: 0.2.4 + defu: 6.1.4 + dotenv: 17.3.1 + exsolve: 1.0.8 + giget: 2.0.0 + jiti: 2.6.1 + ohash: 2.0.11 + pathe: 2.0.3 + perfect-debounce: 2.1.0 + pkg-types: 2.3.0 + rc9: 2.1.2 + cac@6.7.14: {} call-bind-apply-helpers@1.0.2: @@ -9878,8 +10205,18 @@ snapshots: dependencies: readdirp: 4.1.2 + chokidar@5.0.0: + dependencies: + readdirp: 5.0.0 + chownr@3.0.0: {} + citty@0.1.6: + dependencies: + consola: 3.4.2 + + citty@0.2.2: {} + cjs-module-lexer@2.2.0: {} client-only@0.0.1: {} @@ -9898,6 +10235,8 @@ snapshots: color-name@1.1.4: {} + color-support@1.1.3: {} + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 @@ -9906,6 +10245,8 @@ snapshots: commander@12.1.0: {} + commander@14.0.3: {} + commander@4.1.1: {} commander@7.2.0: {} @@ -9914,6 +10255,8 @@ snapshots: confbox@0.1.8: {} + confbox@0.2.4: {} + consola@3.4.2: {} convert-source-map@2.0.0: {} @@ -9928,6 +10271,12 @@ snapshots: crelt@1.0.6: {} + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + cssesc@3.0.0: {} csstype@3.2.3: {} @@ -10126,6 +10475,15 @@ snapshots: dependencies: character-entities: 2.0.2 + default-browser-id@5.0.1: {} + + default-browser@5.5.0: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.1 + + define-lazy-prop@3.0.0: {} + defu@6.1.4: {} delaunator@5.1.0: @@ -10138,6 +10496,8 @@ snapshots: dequal@2.0.3: {} + destr@2.0.5: {} + detect-libc@2.1.2: {} devlop@1.1.0: @@ -10319,6 +10679,8 @@ snapshots: dependencies: homedir-polyfill: 1.0.3 + exsolve@1.0.8: {} + extend@3.0.2: {} fast-glob@3.3.3: @@ -10435,11 +10797,24 @@ snapshots: dependencies: resolve-pkg-maps: 1.0.0 + get-tsconfig@4.13.6: + dependencies: + resolve-pkg-maps: 1.0.0 + gifwrap@0.10.1: dependencies: image-q: 4.0.0 omggif: 1.0.10 + giget@2.0.0: + dependencies: + citty: 0.1.6 + consola: 3.4.2 + defu: 6.1.4 + node-fetch-native: 1.6.7 + nypm: 0.6.5 + pathe: 2.0.3 + glob-parent@5.1.2: dependencies: is-glob: 4.0.3 @@ -10564,6 +10939,16 @@ snapshots: dependencies: parse-passwd: 1.0.0 + hono-openapi@1.3.0(@hono/standard-validator@0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.12))(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-community/standard-openapi@0.2.9(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-schema/spec@1.1.0)(openapi-types@12.1.3)(zod@4.3.6))(@types/json-schema@7.0.15)(hono@4.12.12)(openapi-types@12.1.3): + dependencies: + '@standard-community/standard-json': 0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6) + '@standard-community/standard-openapi': 0.2.9(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-schema/spec@1.1.0)(openapi-types@12.1.3)(zod@4.3.6) + '@types/json-schema': 7.0.15 + openapi-types: 12.1.3 + optionalDependencies: + '@hono/standard-validator': 0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.12) + hono: 4.12.12 + hono-openapi@1.3.0(@hono/standard-validator@0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.8))(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-community/standard-openapi@0.2.9(@standard-community/standard-json@0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6))(@standard-schema/spec@1.1.0)(openapi-types@12.1.3)(zod@4.3.6))(@types/json-schema@7.0.15)(hono@4.12.8)(openapi-types@12.1.3): dependencies: '@standard-community/standard-json': 0.3.5(@standard-schema/spec@1.1.0)(@types/json-schema@7.0.15)(quansync@0.2.11)(zod@4.3.6) @@ -10574,6 +10959,8 @@ snapshots: '@hono/standard-validator': 0.2.2(@standard-schema/spec@1.1.0)(hono@4.12.8) hono: 4.12.8 + hono@4.12.12: {} + hono@4.12.8: {} html-entities@2.3.3: {} @@ -10630,6 +11017,8 @@ snapshots: is-decimal@2.0.1: {} + is-docker@3.0.0: {} + is-electron@2.2.2: {} is-extglob@2.1.1: {} @@ -10642,6 +11031,12 @@ snapshots: is-hexadecimal@2.0.1: {} + is-in-ssh@1.0.0: {} + + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + is-node-process@1.2.0: {} is-number@7.0.0: {} @@ -10654,6 +11049,12 @@ snapshots: is-what@4.1.16: {} + is-wsl@3.1.1: + dependencies: + is-inside-container: 1.0.0 + + isexe@2.0.0: {} + isomorphic-ws@5.0.0(ws@8.19.0): dependencies: ws: 8.19.0 @@ -10702,6 +11103,10 @@ snapshots: js-tokens@4.0.0: {} + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + jsesc@3.1.0: {} json-schema@0.4.0: {} @@ -11370,6 +11775,8 @@ snapshots: - '@babel/core' - babel-plugin-macros + node-fetch-native@1.6.7: {} + node-fetch@2.7.0: dependencies: whatwg-url: 5.0.0 @@ -11380,14 +11787,31 @@ snapshots: normalize-range@0.1.2: {} + nypm@0.6.5: + dependencies: + citty: 0.2.2 + pathe: 2.0.3 + tinyexec: 1.0.4 + object-assign@4.1.1: {} object-hash@3.0.0: {} + ohash@2.0.11: {} + omggif@1.0.10: {} on-exit-leak-free@2.1.2: {} + open@11.0.0: + dependencies: + default-browser: 5.5.0 + define-lazy-prop: 3.0.0 + is-in-ssh: 1.0.0 + is-inside-container: 1.0.0 + powershell-utils: 0.1.0 + wsl-utils: 0.3.1 + openapi-types@12.1.3: {} p-finally@1.0.0: {} @@ -11451,6 +11875,8 @@ snapshots: path-expression-matcher@1.1.3: {} + path-key@3.1.1: {} + path-parse@1.0.7: {} path-scurry@1.11.1: @@ -11462,6 +11888,8 @@ snapshots: peek-readable@4.1.0: {} + perfect-debounce@2.1.0: {} + picocolors@1.1.1: {} picomatch@2.3.1: {} @@ -11502,6 +11930,12 @@ snapshots: mlly: 1.8.2 pathe: 2.0.3 + pkg-types@2.3.0: + dependencies: + confbox: 0.2.4 + exsolve: 1.0.8 + pathe: 2.0.3 + pkg-up@3.1.0: dependencies: find-up: 3.0.0 @@ -11588,6 +12022,8 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + powershell-utils@0.1.0: {} + prismjs@1.30.0: {} process-warning@5.0.0: {} @@ -11619,6 +12055,11 @@ snapshots: quick-format-unescaped@4.0.4: {} + rc9@2.1.2: + dependencies: + defu: 6.1.4 + destr: 2.0.5 + react-dom@18.2.0(react@18.2.0): dependencies: loose-envify: 1.4.0 @@ -11689,6 +12130,8 @@ snapshots: readdirp@4.1.2: {} + readdirp@5.0.0: {} + real-require@0.2.0: {} rehype-harden@1.1.8: @@ -11809,6 +12252,8 @@ snapshots: points-on-curve: 0.2.0 points-on-path: 0.2.1 + run-applescript@7.1.0: {} + run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 @@ -11833,6 +12278,8 @@ snapshots: semver@6.3.1: {} + semver@7.7.3: {} + semver@7.7.4: {} seroval-plugins@1.3.3(seroval@1.3.2): @@ -11874,6 +12321,12 @@ snapshots: '@img/sharp-win32-ia32': 0.34.5 '@img/sharp-win32-x64': 0.34.5 + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + shell-quote@1.8.3: {} simple-xml-to-json@1.2.3: {} @@ -12340,6 +12793,10 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 + which@2.0.2: + dependencies: + isexe: 2.0.0 + wrap-ansi@7.0.0: dependencies: ansi-styles: 4.3.0 @@ -12348,6 +12805,11 @@ snapshots: ws@8.19.0: {} + wsl-utils@0.3.1: + dependencies: + is-wsl: 3.1.1 + powershell-utils: 0.1.0 + xml-parse-from-string@1.0.1: {} xml2js@0.5.0: diff --git a/prds/server-v2-plan/app-audit.md b/prds/server-v2-plan/app-audit.md new file mode 100644 index 00000000..67fe26d3 --- /dev/null +++ b/prds/server-v2-plan/app-audit.md @@ -0,0 +1,350 @@ +# App Audit + +## Scope + +This audit covers `apps/app/**` only. + +## Alignment Note + +This audit documents the current client-side footprint, not the final target-state ownership boundary. + +To fully match `prds/server-v2-plan/ideal-flow.md`, cloud settings, workspace/server relationship state, config mutation, and session/runtime behavior should become server-owned, while app-local storage shrinks to transient UI state and minimal reconnect data. + +The goal is to document: + +- every meaningful feature that does not explicitly contact the OpenWork server +- every feature that does substantial local/client-side work before it eventually sends data to the server + +This document now assumes the target architecture is a single main server API surface, not a permanent split between app, orchestrator control plane, and server control plane. + +The focus is on the client-owned lifecycle: local state, browser APIs, local persistence, parsing, transformations, clipboard, dialogs, routing, rendering, Tauri-bridged local actions, and mixed local-then-server flows. + +## Disposition Labels + +- `Stay`: should remain in the app because it is transient UI state, presentation logic, or other true client behavior. +- `Move`: should move behind the server because it is real workspace behavior. +- `Split`: some UI orchestration or local preprocessing should stay, but the underlying capability should move behind the server. + +## High-Level Lifecycle + +1. The frontend boots and restores local shell state. +2. Theme, zoom, window preferences, and workspace/session preferences are restored locally. +3. Deep links and startup state are parsed locally before deciding whether to connect anywhere. +4. Workspace creation, connection, sharing, session composition, and settings flows do a lot of local shaping before contacting server surfaces. +5. Large parts of the UI remain purely local: layout, drafts, rendering, search, diagnostics, and clipboard/open-file helpers. + +## Shell And Persistent UI State + +Disposition guidance: + +- all items in this section -> `Stay` + +Reasoning: theme, zoom, layout, local preferences, and shell restoration are legitimate client-owned concerns. + +### `theme` + +- What it does: manages light/dark/system theme and applies it to the document. +- Called from and when: initialized during app boot and updated when the user changes theme settings. +- Ends up calling: `localStorage`, `matchMedia`, `document.documentElement.dataset.theme`, and CSS `color-scheme`; no server contact. + +### `LocalProvider` and `persisted` + +- What they do: persist app-level UI preferences and shell state such as tabs, thinking visibility, model defaults, and other local settings. +- Called from and when: mounted at app startup and used throughout the app lifecycle. +- Ends up calling: browser storage or platform storage abstractions; no server contact by themselves. + +### `useSessionDisplayPreferences` + +- What it does: stores per-user display preferences such as whether “thinking” is shown. +- Called from and when: used while rendering session pages and on settings resets. +- Ends up calling: local preference persistence and render state updates; no server contact. + +### app startup/session preference restoration in `app.tsx` + +- What it does: restores startup state such as last-selected session, selected base URL, engine source/runtime preferences, and update-related UI settings. +- Called from and when: runs during app boot. +- Ends up calling: `localStorage`, navigation setup, startup session restoration, and connection preference state; no direct server contact, though restored values may later influence server calls. + +### font zoom and window chrome helpers + +- What they do: handle zoom shortcuts, persist zoom state, apply CSS fallback zoom, and toggle Tauri window decorations. +- Called from and when: initialized during app boot and triggered on keyboard shortcuts or preference changes. +- Ends up calling: `localStorage`, CSS updates, Tauri webview zoom APIs, and Tauri window APIs; no server contact. + +### workspace shell layout persistence + +- What it does: stores sidebar widths, expansion state, and other shell layout preferences. +- Called from and when: used while the session shell is open and while the user resizes or toggles layout areas. +- Ends up calling: local storage and render/layout updates only; no server contact. + +## Deep Links And Cloud Session State + +Disposition guidance: + +- deep-link bridge -> `Stay` +- deep-link parsing and controller logic -> `Split` +- OpenWork Cloud settings persistence -> `Split` +- manual cloud sign-in flow -> `Split` +- OpenWork Cloud template cache -> `Stay` + +Reasoning: parsing, routing, and lightweight cached cloud session state stay in the UI, but durable cloud settings and auth/session state should move behind the server. + +### deep-link bridge + +- What it does: queues native/browser deep links before the app is fully ready and replays them into the running UI. +- Called from and when: used at app boot and when desktop/native deep-link events arrive. +- Ends up calling: `window.__OPENWORK__` state and custom browser events; no server contact by itself. + +### deep-link parsing and controller logic + +- What it does: parses OpenWork remote-connect links, Den auth links, debug links, and cleans URL state after consuming them. +- Called from and when: runs on app boot and when deep-link events arrive. +- Ends up calling: local routing, modal state, query-param cleanup, and cloud/session settings updates; some branches eventually contact OpenWork or Den after local parsing is complete. + +### OpenWork Cloud settings persistence + +- What it does: stores cloud base URL, auth token, and active org for Den/OpenWork cloud features. +- Called from and when: used by cloud settings, workspace creation, and sharing flows. +- Ends up calling: `localStorage` and local cloud-session state; in the ideal model durable cloud auth/settings move to the server DB and this becomes transient reconnect/UI state. + +### manual cloud sign-in flow + +- What it does: accepts a pasted deep link or handoff code, parses it locally, validates it, and exchanges it for a cloud token. +- Called from and when: called from the Cloud settings panel when the user signs in manually. +- Ends up calling: local parsing and status state first, then cloud auth endpoints. + +### OpenWork Cloud template cache + +- What it does: memoizes cloud template lists by cloud identity and org. +- Called from and when: used when template-driven workspace creation or cloud settings panels open. +- Ends up calling: in-memory cache and signals first; initial loads eventually fetch from cloud/server surfaces. + +## Workspace Creation And Connection + +Disposition guidance: + +- `CreateWorkspaceModal` -> `Stay` +- `createWorkspaceFlow` -> `Split` +- sandbox creation flow -> `Split` +- `createRemoteWorkspaceFlow` -> `Split` +- onboarding/bootstrap branching -> `Stay` + +Reasoning: modal state and startup branching stay in the UI, but actual workspace creation/connection/runtime behavior should move behind the server. + +### `CreateWorkspaceModal` + +- What it does: owns the local UI state for local workspace creation, remote worker connection, cloud template browsing, worker filtering, and folder selection. +- Called from and when: opened from onboarding and create/connect workspace flows. +- Ends up calling: local modal state, folder pickers, cloud template cache, browser/Tauri link opening, and then optionally remote or cloud connection flows. + +### `createWorkspaceFlow` + +- What it does: orchestrates local workspace creation, derives default names, queues a starter session, updates selection state, and routes into first-session setup. +- Called from and when: called from onboarding, the create-workspace modal, and template/bundle import flows. +- Ends up calling: local busy state, selected workspace state, navigation, Tauri local workspace creation, and starter session setup; some branches eventually use local server surfaces. + +### sandbox creation flow + +- What it does: manages local progress state, Docker preflight state, debug logs, and Tauri event subscriptions for sandbox startup. +- Called from and when: called from sandbox/new worker creation UI. +- Ends up calling: local progress UI, Tauri event listeners, and debug state first, then remote/server registration once the sandbox is ready. + +### `createRemoteWorkspaceFlow` + +- What it does: normalizes the remote host URL/token, resolves remote workspace identity, updates local server settings, and persists a remote workspace record. +- Called from and when: called from deep links, onboarding connect flows, worker open actions, and remote workspace modals. +- Ends up calling: local validation, local settings persistence, routing, selected-workspace state, and then remote server requests; in the ideal model the durable remote workspace record belongs in the local server DB, not the app. + +### onboarding/bootstrap branching + +- What it does: decides whether startup should create a welcome workspace, reconnect a local runtime, reconnect a remote worker, or stay on welcome/onboarding UI. +- Called from and when: runs during app startup. +- Ends up calling: local startup-phase state, navigation, and workspace-selection logic first; some branches eventually connect to server/runtime surfaces. + +## Bundle Import, Share, And Publish + +Disposition guidance: + +- bundle URL parsing and fetch fallback -> `Stay` +- bundle schema parsing -> `Stay` +- bundle workflow store -> `Split` +- workspace share/export state -> `Split` +- bundle publishing helpers -> `Split` + +Reasoning: parsing and UI state stay client-side, but import/export/share of real workspace capabilities should ultimately be server-owned. + +### bundle URL parsing and fetch fallback + +- What it does: parses bundle deep links, cleans bundle-specific query params, rewrites bundle URLs, and chooses fetch strategies. +- Called from and when: runs on app boot, bundle deep-link open, and debug-open flows. +- Ends up calling: local URL cleanup and fetch strategy selection first, then bundle fetches. + +### bundle schema parsing + +- What it does: validates imported bundle shape and normalizes names, presets, and files into app-friendly structures. +- Called from and when: used whenever a bundle is opened, previewed, or imported. +- Ends up calling: local parsing/validation only; no server contact by itself. + +### bundle workflow store + +- What it does: owns modal state and the import decision tree, including trust warnings, worker-target resolution, and import routing. +- Called from and when: entered from bundle deep links, team templates, and debug-open flows. +- Ends up calling: local modal state, navigation, worker selection, and import state first, then workspace import or worker-creation flows. + +### workspace share/export state + +- What it does: derives shareable metadata for local and remote workspaces, resolves missing workspace IDs, and tracks share modal state. +- Called from and when: used by the session share flow. +- Ends up calling: local modal state, clipboard, browser/Tauri opener, and share metadata derivation first; publish/export actions later contact OpenWork or Den. + +### bundle publishing helpers + +- What they do: shape bundle payloads locally and reconcile org identity before publish. +- Called from and when: called from sharing and skills-publish flows. +- Ends up calling: local payload construction first, then OpenWork/Den publish APIs. + +## Session Composer And Session View + +Disposition guidance: + +- session draft persistence -> `Stay` +- attachment preprocessing in the composer -> `Stay` +- prompt and command file-part shaping -> `Split` +- optimistic session creation and navigation -> `Split` +- undo, redo, and compact helpers -> `Split` +- local message search and command palette -> `Stay` +- message-windowing and render throttling -> `Stay` +- local file and folder affordances -> `Stay` + +Reasoning: drafts, rendering, local search, and client-side attachment prep stay in the UI; real session operations and workspace-aware file semantics should move behind the server. + +### session draft persistence + +- What it does: stores and restores per-workspace/per-session draft text and mode. +- Called from and when: active while the user edits prompts and switches sessions. +- Ends up calling: `localStorage`, custom flush events, and local composer state only; no server contact until send. + +### attachment preprocessing in the composer + +- What it does: filters incoming files, compresses images, estimates payload size, creates preview URLs, and handles drag/paste attachment intake. +- Called from and when: called while the user edits a prompt or drops/pastes attachments. +- Ends up calling: `FileReader`, `OffscreenCanvas` or canvas APIs, object URLs, clipboard/paste handling, and local warning state; attachments may later be sent to the server when the user submits. + +### prompt and command file-part shaping in the session actions store + +- What it does: resolves file mentions, converts attachments into data URLs, builds prompt or command payload parts, and clears drafts at the right time. +- Called from and when: called when the user sends a prompt, retries, or creates a new session from local input. +- Ends up calling: local prompt/draft state, file/data transformation, and then final prompt/session server calls. + +### optimistic session creation and navigation + +- What it does: creates a new session flow in the UI, preserves the initial prompt locally, selects the session, refreshes sidebar state, and navigates to it. +- Called from and when: called when the user sends without an active session or explicitly creates one. +- Ends up calling: local navigation and session-list state first, then session creation on the server. + +### undo, redo, and compact helpers + +- What they do: perform local prompt/session-state coordination around history operations. +- Called from and when: called from session history controls. +- Ends up calling: local prompt/session state first, then server-backed revert or compact operations. + +### local message search and session command palette + +- What it does: builds client-side searchable message text, debounces queries, tracks hits, and scrolls to matches; also manages local session command-palette behavior. +- Called from and when: active while a session is open and the user searches or switches via palette. +- Ends up calling: local render/search state, scroll positioning, and navigation only; no server contact. + +### message-windowing and render throttling + +- What it does: windows long histories, batches streaming render commits, and tracks render/perf details locally. +- Called from and when: active during session rendering and message streaming. +- Ends up calling: render state and performance bookkeeping only; no server contact. + +### local file and folder affordances in the session UI + +- What they do: reveal workspace directories, open local files, and reveal artifact paths. +- Called from and when: called from message parts and session sidebars. +- Ends up calling: Tauri opener APIs and local toast state; no server contact. + +## Skills, Plugins, MCP, And Local Config Editing + +Disposition guidance: + +- cloud import metadata normalization -> `Split` +- skills and cloud-sync local prep -> `Split` +- plugin config editing -> `Move` +- MCP connection and config flow -> `Split` +- cloud provider list memoization -> `Stay` + +Reasoning: UI-owned shaping and memoization stay local, but config mutation, skills/plugins/MCP capability changes, and other workspace mutations should move behind the server. + +### cloud import metadata normalization + +- What it does: parses and rewrites `cloudImports` metadata in workspace config. +- Called from and when: used when syncing skills, providers, and hubs with cloud-backed metadata. +- Ends up calling: local config-shaping logic first; final writes should route through the server, with any Tauri-backed config path treated as temporary fallback-only debt. + +### skills and cloud-sync local prep + +- What it does: slugifies names, extracts markdown bodies, builds frontmatter, tracks imported cloud-skill maps, and stores hub preferences locally. +- Called from and when: used in skills import/edit/remove flows. +- Ends up calling: local markdown/config shaping, local state, `localStorage`, and in some cases direct local skill-file mutation; those direct local mutation paths are temporary fallback-only debt, and successful flows should ultimately go through the server or cloud APIs. + +### plugin config editing + +- What it does: parses and rewrites plugin arrays inside `opencode.json` using local JSONC edits. +- Called from and when: called from plugin/settings UI. +- Ends up calling: local config parsing and file-update shaping first; writes should move behind server-backed config APIs, and local writes should be treated as temporary fallback-only debt. + +### MCP connection and config flow + +- What it does: builds MCP config objects locally, infers names, injects special Chrome DevTools env values, and edits/removes config. +- Called from and when: called from MCP connect, remove, and auth/logout UI. +- Ends up calling: local config editing, local MCP modal state, and then server-backed MCP writes/logout or external auth flows; direct local config edits here should be treated as temporary fallback-only debt. + +### cloud provider list memoization + +- What it does: caches and merges provider lists using local cloud session/org state. +- Called from and when: used while provider settings or provider-auth UI is open. +- Ends up calling: in-memory cache and local state first; refreshes eventually contact Den or the main server, and any direct OpenCode access should be treated as migration debt. + +## Diagnostics, Reset, And Desktop Utilities + +Disposition guidance: + +- OpenWork server settings persistence -> `Split` +- reset and reload state management -> `Stay` +- settings diagnostics and export helpers -> `Split` +- incidental clipboard/open-link helpers -> `Stay` + +Reasoning: client preferences, reload state, clipboard, and pure diagnostics remain UI concerns, while durable connection/auth/product state should move behind the server. + +### OpenWork server settings persistence + +- What it does: normalizes and persists host URL, token, remote-access preference, and derived base URL/client settings. +- Called from and when: used on app boot and when connection settings change. +- Ends up calling: `localStorage` and local connection state; in the ideal model this shrinks to minimal reconnect/bootstrap hints while durable connection registry and cloud auth/session metadata move behind the server. + +### reset and reload state management + +- What it does: tracks reload-required reasons, supports local resets, and relaunch/reload behavior. +- Called from and when: used by settings and reload-warning UI. +- Ends up calling: local storage cleanup, local state resets, and app relaunch/reload APIs; no direct server contact. + +### settings diagnostics and export helpers + +- What they do: build local debug/export payloads, copy them to clipboard, download them as files, or reveal paths in Finder. +- Called from and when: called from settings diagnostics UI. +- Ends up calling: clipboard APIs, Blob download APIs, Tauri opener APIs, and local reset helpers; most paths do not contact the server. + +### incidental clipboard/open-link helpers + +- What they do: copy links, share codes, messages, and open auth/share URLs. +- Called from and when: called from share, auth, and message actions across the UI. +- Ends up calling: clipboard and opener/browser APIs; they often follow a server action, but the helper itself is local. + +## Coverage Limits + +- This audit stays focused on `apps/app`. +- It intentionally excludes simple server fetch wrappers unless the client does meaningful local work first. +- It describes mixed flows where local parsing/state/setup happens before a server request, because those are important ownership boundaries. diff --git a/prds/server-v2-plan/architecture.md b/prds/server-v2-plan/architecture.md new file mode 100644 index 00000000..69d64dd7 --- /dev/null +++ b/prds/server-v2-plan/architecture.md @@ -0,0 +1,593 @@ +# Server V2 Architecture + +## Status: Draft +## Date: 2026-04-09 + +## Purpose + +This document expands `prds/server-v2-plan/plan.md` with a more concrete technical design for Server V2. + +The goal is to define a whole new Hono-based server package, expose a typed contract, and support incremental client migration onto that server. + +## Core Model + +Server V2 starts as a separate new server package and process. + +```text +apps/server-v2/ +├── server process +├── OpenAPI contract +└── server-owned runtime/workspace behavior +``` + +This means: + +- a clean replacement server boundary +- a separate deployable/process during the transition +- new logic isolated in new files +- no need to preserve legacy server structure while designing the new architecture + +## Target End State + +The long-term target is a single main server API surface. + +Desired shape: + +```text +desktop app or CLI +-> starts or connects to one OpenWork server process +-> OpenWork server owns workspace/runtime/product behavior +-> OpenWork server supervises the local runtime pieces it needs +``` + +This means: + +- the orchestrator should stop being a separate product control plane +- orchestrator runtime/workspace APIs should be folded into the main server +- bootstrap and supervision behavior should move into the main server wherever practical + +## Design Principles + +- Server V2 code lives in new files only. +- The new server API contract is explicit and typed. +- Clients depend on generated contracts and a small app-side SDK adapter, not server internals. +- Multi-server routing is explicit at the client boundary. +- The desktop app is a thin interface layer, not a second workspace runtime. +- Workspace behavior belongs to the server, even when the server is hosted locally by the desktop app. +- Migration happens by vertical slice, not by broad framework churn. +- Legacy code should be deleted as soon as each migrated slice is complete. + +## Ownership Boundary + +The architecture should enforce a simple rule: + +- the app presents and collects intent +- the server performs workspace work + +### Desktop app owns + +- local UI state +- navigation and presentation state +- drafts, filters, and transient client-side interaction state +- cached/derived visible server and workspace state returned by the server +- starting or connecting to server processes + +### Server owns + +- workspace reads +- workspace writes +- AI/session/task behavior +- project/runtime inspection +- skill, plugin, MCP, and config mutation +- OpenCode integration and sidecar/runtime coordination +- any other workspace-scoped capability that is more than transient UI state + +This boundary applies even in desktop-hosted mode. Running on the same machine does not make the UI the right owner of workspace behavior. + +The same principle applies to the orchestrator boundary: + +- product/runtime control surfaces should move into the server +- bootstrap and supervision should also move into the server wherever practical +- the desktop shell should ideally launch one server process, not a separate runtime manager + +## Server Layout + +Proposed package layout inside `apps/server-v2`: + +```text +apps/server-v2/ +├── src/ +│ ├── app.ts +│ ├── cli.ts +│ ├── bootstrap/ +│ ├── database/ +│ ├── context/ +│ ├── middleware/ +│ ├── routes/ +│ ├── services/ +│ ├── schemas/ +│ └── adapters/ +├── openapi/ +└── scripts/ +``` + +### Ownership + +- `app.ts` builds the Hono app and mounts route groups. +- `bootstrap/` owns server startup plus any runtime supervision that gets folded into the server. +- `database/` owns sqlite state, migrations, and persistence boundaries. +- `routes/` owns HTTP concerns: method, path, validation, response shape. +- `services/` owns domain workflows. +- `schemas/` owns request/response definitions. +- `adapters/` owns integration with OpenCode, storage, and runtime pieces. +- `middleware/` owns cross-cutting HTTP concerns. +- `context/` owns per-request wiring and shared typed context. + +## Runtime Supervision Inside The New Server + +The new server should not just proxy product logic. It should also supervise the local runtime pieces it depends on. + +That includes: + +- OpenCode +- `opencode-router` +- any other local child runtime needed for the product surface + +### Router supervision model + +Current baseline being replaced: + +- orchestrator decides whether router is enabled +- orchestrator resolves the router binary +- orchestrator spawns and supervises the router + +Target model: + +- server bootstrap decides whether router is enabled +- server bootstrap resolves the router binary +- server bootstrap spawns and supervises the router +- server API exposes router status/control behavior to the UI + +Recommended shape: + +- one `opencode-router` child per local OpenWork server +- server-owned router config materialization from sqlite or server-managed config state +- server-owned health checks, restart behavior, and status reporting + +This keeps router lifecycle under the same ownership boundary as the rest of the runtime. + +### Why one router per server + +- identities and bindings are naturally server-level +- supervision is simpler +- workspace scoping can still be enforced by server logic +- the UI does not need to understand a second runtime graph + +## Startup Strategy + +The desktop app should eventually launch the new server directly. + +Target shape: + +```text +desktop app +-> launches apps/server-v2 +-> talks only to the new server process +``` + +Rules: + +- the new server should not be designed as a mounted sub-application of the old server +- startup/bootstrap should move into the new server package over time +- orchestrator control-plane routes should be replaced by main-server routes rather than preserved as a second API model + +## Typed Contract Flow + +The new server is the source of truth for its contract. + +```text +Hono route + schema definitions +-> generated OpenAPI spec +-> generated TypeScript SDK +-> app-side createSdk(serverId) adapter +-> app features +``` + +### Why this flow + +- The server owns the contract. +- The SDK stays in sync through generation. +- App code gets strong typing without importing server implementation. +- A tiny app-side adapter remains free to handle runtime-specific decisions without replacing the generated SDK. +- The app can stay thin because the contract surface represents real workspace capabilities, not just transport helpers. + +## OpenAPI and SDK Generation + +Detailed generator and script choices live in `prds/server-v2-plan/sdk-generation.md`. + +Proposed structure: + +```text +apps/server-v2/openapi/openapi.json +packages/openwork-server-sdk/generated/** +packages/openwork-server-sdk/src/index.ts +apps/app/.../createSdk({ serverId }) adapter +``` + +### Contract rules + +- The OpenAPI spec is generated, not handwritten. +- `hono-openapi` is the leading candidate for generating the new server OpenAPI spec because it is Hono-native and fits the route-first model we want. +- The generated SDK is TypeScript-first. +- The SDK should expose stable exports from `src/index.ts`. +- The app should avoid importing raw generated files directly. +- The generated SDK package should stay server-agnostic and reusable. +- The app-facing entrypoint should look like `createSdk({ serverId })`. +- `createSdk({ serverId })` should live in app code, resolve `serverId` into base URL, token, and capabilities locally, then prepare the generated client. +- `createSdk({ serverId })` should stay lightweight enough that it can be called per use without meaningful overhead. +- The SDK surface should grow until app-owned workspace behavior shrinks to near zero. + +`hono-openapi` should be treated as the spec-generation layer only: + +- it generates the OpenAPI contract from Hono routes and schemas +- a separate SDK generator still produces the TypeScript client package +- SSE ergonomics still likely require small handwritten helpers + +### App-facing SDK shape + +Preferred usage for standard endpoints: + +```ts +await createSdk({ serverId }).sessions.listMessages({ workspaceId, sessionId }) +``` + +This keeps: + +- server selection explicit through `serverId` +- resource hierarchy explicit through params like `workspaceId` and `sessionId` +- the client surface mostly generated rather than manually re-modeled + +### SSE contract note + +OpenAPI can document SSE endpoints, but most generated SDKs do not produce an ergonomic typed streaming API automatically. + +Because of that: + +- normal JSON endpoints should come directly from the generated SDK +- the likely one or two SSE endpoints may need small handwritten stream helpers +- those helpers should still be exported from the same SDK package +- event payload types should come from generated or shared contract output, not from server source files + +### CI rules + +CI should regenerate both the OpenAPI spec and the SDK and fail if a diff appears. + +That gives us: + +- no silent contract drift +- reproducible SDK output +- reliable local and CI behavior + +## Local Development Loop + +The local developer experience should make contract changes visible immediately. + +Detailed local watch and rebuild behavior lives in `prds/server-v2-plan/local-dev.md`. + +Desired loop: + +```text +edit new-server route or schema +-> regenerate openapi/openapi.json +-> regenerate TypeScript SDK +-> app sees updated types and methods +-> continue coding without manual sync work +``` + +Recommended watch pipeline: + +- `apps/server-v2`: watch `src/**`, regenerate `openapi/openapi.json` through `hono-openapi` +- `packages/openwork-server-sdk`: watch `openapi/openapi.json`, regenerate the reusable generated client package +- `apps/app`: watch the app-side `createSdk({ serverId })` adapter alongside normal app code +- `packages/openwork-server-sdk`: optional watch build if the package publishes built output +- `apps/app`: consumes the workspace package directly + +This should keep endpoint changes and client types effectively live in monorepo development. + +The server runtime watcher should ignore generated OpenAPI and SDK files so contract regeneration does not cause unnecessary backend restart loops. + +## Client Architecture + +The client side should use a thin adapter over the generated SDK rather than a large custom wrapper hierarchy. + +```text +generated SDK +-> createSdk({ serverId }) adapter +-> app features +``` + +### Generated SDK responsibilities + +- typed request and response shapes +- typed route methods +- low-level transport helpers +- representing server-owned workspace capabilities in a reusable client surface + +### Thin adapter responsibilities + +- resolve `serverId` into current server config +- inject auth/token headers +- during migration, route features to the current or new server when needed +- prepare a lightweight client instance +- add capability checks when needed + +The adapter should not rebuild a second large API model on top of the generated SDK unless there is a strong reason. + +It also should not become a place where workspace behavior is reimplemented in the app. + +## Multi-Server Target Model + +The system may know about different server destinations at the same time, so target selection must be explicit. + +The important distinction is: + +- a server target identifies which server to talk to +- a workspace ID identifies which workspace on that server to operate on + +Those are related, but they are not the same thing. + +The local OpenWork server should maintain the durable registry of servers and workspaces. The app should render or cache what the server returns. + +That model is intentionally minimal. The app only needs enough local state to know: + +- which servers exist +- which workspaces belong to which server +- which workspace is selected in the UI + +It should not need to locally own the underlying workspace behavior itself. + +That allows: + +- multiple workspaces on one server +- multiple configured servers in one app session +- one SDK creation point per server target, with workspace IDs passed into individual operations when direct server targeting is needed + +Examples: + +- local desktop-hosted OpenWork server +- remote worker-backed OpenWork server +- hosted OpenWork Cloud server + +Proposed shared shape: + +```ts +export type ServerTargetKind = "local" | "remote" + +export type ServerHostingKind = "desktop" | "self_hosted" | "cloud" + +export type ServerTarget = { + kind: ServerTargetKind + hostingKind: ServerHostingKind + baseUrl: string + token?: string + capabilities?: { + v2?: boolean + } +} +``` + +Preferred app-facing creation during migration or server-management flows: + +```ts +const sdk = createSdk({ serverId }) +``` + +Then operations should take the workspace ID explicitly: + +```ts +await sdk.sessions.list({ workspaceId }) +await sdk.sessions.get({ workspaceId, sessionId }) +await sdk.sessions.listMessages({ workspaceId, sessionId }) +``` + +Illustrative app-side model: + +```ts +type WorkspaceRecord = { + id: string + serverTargetId: string +} +``` + +In that model: + +- `serverTargetId` tells the app which server configuration to use +- `id` is the stable OpenWork workspace identifier the UI uses + +This avoids hidden globals and makes mixed-target flows possible while keeping server selection separate from workspace identity. + +In the ideal steady state, normal app traffic should still flow through the local OpenWork server using stable OpenWork workspace IDs, with remote OpenWork workspace IDs and OpenCode project IDs remaining server-owned mappings. + +## Migration Routing Model + +During migration, the adapter may choose between the current and new server per operation. + +Example decision inputs: + +- does the target advertise new-server capability? +- is the feature enabled for the new server? +- has this specific endpoint been ported? +- do we need a temporary fallback? + +Illustrative flow: + +```text +feature resolves workspace -> server target +-> feature calls createSdk({ serverId }).sessions.list({ workspaceId }) +-> adapter inspects target + capability + rollout settings +-> adapter calls the current or new server implementation +-> feature receives typed result +``` + +This keeps migration logic out of the UI. + +The more of the product surface we move behind the server, the less special-case behavior the app needs to keep locally. + +## Streaming Strategy + +The app should consume OpenCode-related streaming only through the OpenWork server. + +That means: + +- the desktop app never connects directly to underlying OpenCode SSE endpoints +- the new server exposes its own SSE endpoints where needed +- the new server can proxy, translate, or normalize underlying OpenCode stream events + +Because there will likely be only one or two SSE endpoints, we do not need a large custom streaming framework. + +Recommended shape: + +- document the SSE routes in the new server contract +- keep event payloads typed from generated or shared contract types +- expose small handwritten streaming helpers from `packages/openwork-server-sdk` +- keep those helpers under the same `createSdk({ serverId })` entrypoint + +Illustrative usage: + +```ts +const stream = await createSdk({ serverId }).sessions.streamMessages({ + workspaceId, + sessionId, +}) + +for await (const event of stream) { + // typed SSE event +} +``` + +This gives us one unified client surface while accepting that OpenAPI generation alone is usually not enough for ergonomic typed SSE consumption. + +## Domain Slice Migration + +The preferred migration unit is a vertical slice. + +Example order: + +1. health and diagnostics +2. low-risk read endpoints +3. session reads +4. workspace reads +5. mutations +6. higher-risk workflow endpoints + +Rules: + +- migrate one slice fully enough to validate the pattern +- switch that slice's adapter routing to the new server +- remove app-owned workspace logic for that slice when the new server version is ready +- remove old-server code when the slice no longer needs it + +Example categories to move behind the server over time: + +1. workspace file reads and writes +2. workspace config mutation +3. skill/plugin/MCP mutation +4. project/runtime inspection +5. session/task execution behavior +6. orchestrator workspace/runtime control APIs +7. orchestrator-managed tool/config mutation behavior + +## Orchestrator Integration Path + +The recommended path is to collapse orchestrator responsibilities inward rather than preserve a separate orchestrator control plane forever. + +### What should move into the server + +- workspace activation and disposal semantics +- runtime control/status/upgrade product APIs +- daemon-style workspace/runtime control surfaces +- config/skill/plugin/MCP mutation product capabilities +- managed OpenCode integration behavior that clients should consume through one API +- child process launch and supervision where practical +- sidecar and binary resolution where practical +- local env/port/bootstrap setup where practical +- sandbox/container startup orchestration where practical + +### Recommended migration shape + +```text +today: +desktop -> orchestrator API -> server API + +target: +desktop -> server API +desktop -> launches one server process +server -> starts and supervises local children when needed +``` + +This removes the separate orchestrator boundary rather than preserving it as a second permanent host layer. + +## Error and Compatibility Model + +The new server should improve consistency instead of repeating legacy inconsistencies. + +Targets: + +- consistent error envelopes +- predictable auth failures +- stable response schemas +- request IDs for tracing +- typed success and error bodies where practical + +During migration, the adapter may need to normalize old-server and new-server responses into one app-facing shape. + +## Testing Strategy + +We need confidence at three levels. + +### 1. Contract tests + +- route validation works +- response schemas match expectations +- generated SDK matches current spec + +### 2. Server integration tests + +- new-server routes hit real service/adapters +- auth and runtime context behave correctly +- the new server works correctly as its own process and API surface + +### 3. App integration tests + +- the SDK adapter calls the correct target +- adapter-based old-server/new-server switching works during migration +- desktop flows continue to work while slices are migrated + +## Exit Criteria for the Old Server + +We can remove the old server when: + +- all app consumers use new-server-backed SDK calls +- no routes still require the old server +- compatibility shims are no longer needed +- desktop startup launches only the new server + +At that point, Server V2 stops being a migration concept and becomes the server. + +The same spirit applies to the client boundary: + +- the app still owns local UI state +- but workspace capabilities should no longer be split between app and server +- the server should be the clear owner of workspace behavior + +The same spirit also applies to the orchestrator boundary: + +- runtime/workspace product capability should no longer be split between orchestrator and server +- bootstrap and supervision should also collapse into the server wherever possible +- the main server should be the canonical and primary runtime control surface + +## Open Decisions + +- whether capability detection is static, dynamic, or both +- which endpoint group becomes the first proof-of-path migration +- whether the working name `openwork-server-v2` survives to ship time or is renamed before release diff --git a/prds/server-v2-plan/current-server-audit.md b/prds/server-v2-plan/current-server-audit.md new file mode 100644 index 00000000..34673bd2 --- /dev/null +++ b/prds/server-v2-plan/current-server-audit.md @@ -0,0 +1,474 @@ +# Current Server Audit + +## Scope + +This audit covers the current server under `apps/server/**`. + +The goal is to document the current server in the same framework as the other audits: + +- what the major function/module is +- what it does in human-readable language +- where it is called from and when +- what it ultimately calls or affects + +This is meant to help break down the current server into clear migration targets for the new server. + +## Overall Shape + +- The current server is still a Bun-first, custom-router server centered in `apps/server/src/server.ts`. +- Most meaningful behavior is implemented through one large route-registration function, `createRoutes`, plus focused modules for config mutation, OpenCode integration, auth/tokens, reload/watch behavior, portable export/import, and OpenCode Router bridging. +- The earlier in-place `/v2` scaffold under `apps/server/src/v2` has been removed. The real replacement server now lives separately under `apps/server-v2/**`. + +## 1. Startup, CLI, Config, And Process Boot + +### `src/cli.ts` main entrypoint + +- What it is: the packaged/server CLI entrypoint. +- What it does: parses startup args, resolves runtime config, starts the server, and prints startup information. +- Called from and when: called when the `openwork-server` binary or `bun src/cli.ts` is launched. +- What it calls: `parseCliArgs`, `resolveServerConfig`, `createServerLogger`, and `startServer`. + +### `parseCliArgs` + +- What it is: CLI argument parser. +- What it does: turns command-line flags into normalized runtime options. +- Called from and when: called immediately at process startup. +- What it calls: feeds `resolveServerConfig` with host/port/token/workspace/OpenCode/logging overrides. + +### `resolveServerConfig` + +- What it is: config resolution pipeline. +- What it does: merges CLI args, env vars, and config file state into the final runtime config. +- Called from and when: called once during boot before the server starts. +- What it calls: `buildWorkspaceInfos`, token defaults, approval/cors/logging/read-only/authorized-roots setup. + +### `buildWorkspaceInfos` + +- What it is: workspace config normalizer. +- What it does: turns configured workspace records into normalized `WorkspaceInfo` objects with stable IDs. +- Called from and when: called while building the final server config. +- What it calls: produces the workspace metadata used by routing, auth, proxying, export/import, and runtime flows. + +### `createServerLogger` + +- What it is: server logging factory. +- What it does: creates either plain text or OTEL-style JSON logging with a run ID. +- Called from and when: called during startup and reused for request logging. +- What it calls: all startup, request, and reload-watcher logs. + +### `startServer` + +- What it is: main server boot function. +- What it does: initializes approvals, reload events, tokens, watchers, route registration, and starts Bun HTTP serving. +- Called from and when: called once after config resolution. +- What it calls: Bun `serve`, `ApprovalService`, `TokenService`, `ReloadEventStore`, `startReloadWatchers`, proxy behavior, and all legacy routes. + +## 2. HTTP Routing And Request Dispatch + +### `startServer(...).fetch` + +- What it is: the top-level Bun request handler. +- What it does: handles every incoming request, applies CORS and request logging, routes mounted workspace paths, proxies OpenCode/OpenCode Router requests, and finally dispatches to legacy routes. +- Called from and when: called by Bun for every HTTP request. +- What it calls: `parseWorkspaceMount`, OpenCode proxy helpers, OpenCode Router proxy helpers, and `createRoutes` matches. + +### `parseWorkspaceMount` + +- What it is: mounted-workspace path parser. +- What it does: detects workspace-mounted URLs like `/w/:id/...`. +- Called from and when: called early in request dispatch. +- What it calls: enables single-workspace mounted base URL behavior. + +### `createRoutes` + +- What it is: the current legacy route registration map. +- What it does: defines the bulk of the server API surface: status, tokens, workspaces, config, sessions, router, files, skills, plugins, MCP, export/import, approvals, and more. +- Called from and when: called once at startup. +- What it calls: nearly every major subsystem in the current server. + +### `withCors` + +- What it is: response header helper. +- What it does: adds CORS headers based on configured allowlist. +- Called from and when: applied to every response in the dispatcher finalization path. +- What it calls: browser access policy for the server surface. + +### `logRequest` + +- What it is: per-request log helper. +- What it does: emits structured request logs with auth/proxy metadata. +- Called from and when: called after each request resolves or fails. +- What it calls: operational visibility into status/auth/proxy usage. + +## 3. Auth, Tokens, And Approvals + +### `TokenService` + +- What it is: persisted scoped-token manager. +- What it does: manages bearer tokens with scopes like owner, collaborator, and viewer. +- Called from and when: instantiated at startup and used by auth and token-management routes. +- What it calls: reads and writes `tokens.json`, resolves token scope, issues and revokes tokens. + +### `requireClient` + +- What it is: client-auth guard. +- What it does: authenticates normal client bearer tokens. +- Called from and when: called by client-protected routes and proxy paths. +- What it calls: token resolution and `Actor` creation. + +### `requireHost` + +- What it is: host/admin auth guard. +- What it does: authenticates host token or owner bearer token. +- Called from and when: called by host-only routes like token management and approvals. +- What it calls: elevated owner-level auth flows. + +### `requireClientScope` + +- What it is: scope enforcement helper. +- What it does: enforces minimum client token scope for mutations. +- Called from and when: called inside many write routes. +- What it calls: permission failures for viewers or lower-scope actors. + +### `ApprovalService` + +- What it is: in-memory approval queue and responder. +- What it does: stores pending approvals and resolves allow/deny/timeout outcomes. +- Called from and when: instantiated at startup and used by approval-gated routes. +- What it calls: mutation blocking until host/admin response. + +### `requireApproval` + +- What it is: approval wrapper. +- What it does: enforces approval on sensitive writes. +- Called from and when: called by config, file, plugin, skill, MCP, command, scheduler, and router identity writes. +- What it calls: `ApprovalService`; throws `write_denied` on deny/timeout. + +### `/tokens` and `/approvals` routes + +- What they are: auth/approval control endpoints. +- What they do: expose token inventory/issuance/revocation and pending approval inventory/response actions. +- Called from and when: called by host/admin control UI or operator flows. +- What they call: `TokenService` and `ApprovalService`. + +## 4. Workspace Lifecycle, Status, And Capabilities + +### `resolveWorkspace` + +- What it is: workspace lookup and validation helper. +- What it does: resolves a workspace by ID, validates authorized-root membership, and repairs legacy commands if writable. +- Called from and when: called by almost every workspace-scoped route. +- What it calls: normalized `WorkspaceInfo` for all downstream file/config/OpenCode actions. + +### `/status` and `/workspaces` + +- What they are: core discovery/status routes. +- What they do: expose server health, config summary, capabilities, and workspace inventory. +- Called from and when: called by clients during connect, status refresh, and initial UI load. +- What they call: workspace serialization, `buildCapabilities`, bind/auth/read-only summary state. + +### `buildCapabilities` + +- What it is: capability summarizer. +- What it does: advertises what this server instance can do. +- Called from and when: called by `/capabilities` routes. +- What it calls: read-only mode, approvals, sandbox, browser provider, OpenCode/OpenCode Router availability. + +### `/workspaces/local` + +- What it is: local workspace creation route. +- What it does: creates a new local workspace folder and seeds starter files. +- Called from and when: called by host/admin workspace creation flows. +- What it calls: `ensureWorkspaceFiles`, workspace config persistence, audit logging. + +### workspace rename / activate / delete routes + +- What they are: workspace management endpoints. +- What they do: rename, activate, or remove a workspace from the server. +- Called from and when: called by host/admin workspace-management UI. +- What they call: in-memory config mutation, `server.json` persistence, reload watcher restart, audit logging. + +## 5. Workspace Bootstrapping And Local Config Files + +### `ensureWorkspaceFiles` + +- What it is: workspace seeding helper. +- What it does: creates starter `.opencode` state, commands, skills, agent, `opencode.json`, and `openwork.json`. +- Called from and when: called when creating a local workspace. +- What it calls: OpenWork/OpenCode starter file generation. + +### `ensureOpencodeConfig` + +- What it is: OpenCode config seeder. +- What it does: seeds `opencode.json` defaults, default agent, scheduler plugin, and starter MCP. +- Called from and when: called during `ensureWorkspaceFiles`. +- What it calls: first-run OpenCode behavior for the workspace. + +### `ensureWorkspaceOpenworkConfig` + +- What it is: OpenWork config seeder. +- What it does: seeds `openwork.json` with authorized roots, blueprint sessions, and workspace metadata. +- Called from and when: called during `ensureWorkspaceFiles`. +- What it calls: OpenWork-specific workspace behavior and starter session metadata. + +### workspace config routes + +- What they are: config read/patch/raw text endpoints. +- What they do: read or patch workspace `opencode` and `openwork` config, including raw OpenCode config editor flows. +- Called from and when: called by settings/config UI. +- What they call: JSONC mutation helpers, raw config file writes, reload events, and audit entries. + +## 6. OpenCode Integration And Session Read Model + +### `resolveWorkspaceOpencodeConnection` + +- What it is: OpenCode connection resolver. +- What it does: resolves OpenCode base URL and optional Basic auth for a workspace. +- Called from and when: called by OpenCode proxy and reload flows. +- What it calls: upstream OpenCode connection parameters. + +### `proxyOpencodeRequest` + +- What it is: OpenCode reverse proxy. +- What it does: forwards `/opencode` traffic to upstream OpenCode while injecting workspace directory and upstream auth. +- Called from and when: called by the main dispatcher for `/opencode` and mounted equivalents. +- What it calls: upstream OpenCode HTTP endpoints. + +### `reloadOpencodeEngine` + +- What it is: engine reload helper. +- What it does: calls OpenCode `/instance/dispose` to force an engine reload. +- Called from and when: called by `/workspace/:id/engine/reload`. +- What it calls: upstream OpenCode instance reset. + +### session routes and `session-read-model.ts` + +- What they are: session list/detail/messages/snapshot routes plus normalization helpers. +- What they do: fetch session data from OpenCode and validate/normalize the payloads. +- Called from and when: called by session UI/history surfaces. +- What they call: `fetchOpencodeJson` and `buildSessionList`, `buildSession`, `buildSessionMessages`, `buildSessionSnapshot`. + +### `seedOpencodeSessionMessages` + +- What it is: direct OpenCode DB seeding helper. +- What it does: inserts starter messages directly into the OpenCode SQLite DB for blueprint sessions. +- Called from and when: used during starter-session materialization. +- What it calls: direct OpenCode DB mutation. + +## 7. Reload/Watch Behavior + +### `startReloadWatchers` + +- What it is: workspace reload-watcher setup. +- What it does: starts per-workspace watchers over root config files and `.opencode` trees. +- Called from and when: called during `startServer`, restarted when workspaces change. +- What it calls: `ReloadEventStore` with debounced reload signals. + +### `ReloadEventStore` + +- What it is: reload event queue. +- What it does: stores debounced workspace-scoped reload events with cursors. +- Called from and when: instantiated at startup, used by watchers and write routes. +- What it calls: `/workspace/:id/events` polling responses. + +### `emitReloadEvent` + +- What it is: manual reload signal helper. +- What it does: records reload signals after server-side mutations. +- Called from and when: called after config/plugin/skill/MCP/command/import writes. +- What it calls: client/runtime synchronization for server-caused file changes. + +### `/workspace/:id/events` and `/engine/reload` + +- What they are: reload polling and explicit engine-reload endpoints. +- What they do: return reload events since a cursor, and explicitly reload the upstream OpenCode engine. +- Called from and when: called by clients that need hot-reload awareness or manual engine reload. +- What they call: `ReloadEventStore` and `reloadOpencodeEngine`. + +## 8. File Access, Inbox/Outbox, And Session-Scoped File Editing + +### `FileSessionStore` + +- What it is: ephemeral file-session manager. +- What it does: tracks scoped file editing sessions and workspace file-event cursors. +- Called from and when: instantiated inside `createRoutes`. +- What it calls: write eligibility, TTL, ownership, and incremental file-event streams. + +### file session routes + +- What they are: scoped file catalog/read/write/ops endpoints. +- What they do: create a file session, return a catalog snapshot, read files, write files with conflict detection, and apply mkdir/delete/rename ops. +- Called from and when: called by editors and remote file-management tooling. +- What they call: actual workspace filesystem reads/writes, file event logs, approvals, and audit logging. + +### simple content routes + +- What they are: markdown-oriented read/write routes. +- What they do: provide simpler file content APIs for lighter document flows. +- Called from and when: called by markdown/file editors. +- What they call: actual workspace file reads/writes plus audit and file-event signaling. + +### inbox/outbox routes + +- What they are: file ingest and artifact download endpoints. +- What they do: manage uploadable inbox files and downloadable artifact files under `.opencode/openwork`. +- Called from and when: called by file injection/download flows. +- What they call: workspace file writes, file listings, and binary download responses. + +## 9. Plugins, Skills, MCP, Commands, And Scheduler + +### plugin functions + +- What they are: `listPlugins`, `addPlugin`, `removePlugin`. +- What they do: expose and mutate OpenCode plugin config and plugin directories. +- Called from and when: called by `/workspace/:id/plugins` routes. +- What they call: `opencode.json` mutation, plugin discovery, reload events. + +### skill functions + +- What they are: `listSkills`, `upsertSkill`, `deleteSkill`, plus Skill Hub helpers. +- What they do: discover and manage local/global skills, and install remote GitHub-backed skills. +- Called from and when: called by `/workspace/:id/skills*` routes and workspace bootstrap flows. +- What they call: `.opencode/skills` reads/writes, GitHub fetches, reload events. + +### MCP functions + +- What they are: `listMcp`, `addMcp`, `removeMcp`. +- What they do: manage MCP server config in `opencode.json`. +- Called from and when: called by `/workspace/:id/mcp*` routes. +- What they call: MCP config mutation and tool availability changes. + +### command functions + +- What they are: `listCommands`, `upsertCommand`, `deleteCommand`, `repairCommands`. +- What they do: manage project/global command markdown files and repair legacy frontmatter. +- Called from and when: called by `/workspace/:id/commands*` and implicitly by `resolveWorkspace`. +- What they call: `.opencode/commands` writes, frontmatter repair, reload events. + +### scheduler functions + +- What they are: scheduler job inspection/removal helpers. +- What they do: inspect and delete scheduled jobs backed by launchd/systemd and JSON job files. +- Called from and when: called by `/workspace/:id/scheduler/jobs*` routes. +- What they call: job file deletion and OS scheduler unload/remove behavior. + +## 10. OpenCode Router / Messaging Integration + +### `resolveOpenCodeRouterProxyPolicy` + +- What it is: OpenCode Router auth policy resolver. +- What it does: decides what auth and scope is required for router proxy paths. +- Called from and when: called by the main dispatcher for `/opencode-router` paths. +- What it calls: access control for bindings, identities, health, and other router APIs. + +### `proxyOpenCodeRouterRequest` + +- What it is: OpenCode Router reverse proxy. +- What it does: forwards raw OpenCode Router requests to the local router service. +- Called from and when: called for `/opencode-router` and mounted equivalents. +- What it calls: localhost OpenCode Router health/config/send endpoints. + +### router identity persistence helpers + +- What they are: Telegram/Slack identity config writers. +- What they do: persist messaging identity config into `opencode-router.json`. +- Called from and when: called by workspace router-management routes. +- What they call: local router config mutation while preserving legacy fallback fields. + +### `tryPostOpenCodeRouterHealth` / `tryFetchOpenCodeRouterHealth` + +- What they are: best-effort router apply/fetch helpers. +- What they do: apply or fetch router health/config state without requiring a restart. +- Called from and when: called after router config changes and health/bind/send flows. +- What they call: live router process control/status behavior. + +### workspace router routes + +- What they are: `/workspace/:id/opencode-router/*` routes. +- What they do: manage health, Telegram/Slack setup, identities, bindings, and outbound sends. +- Called from and when: called by messaging/connectors UI. +- What they call: router config files, live router process state, identity pairing state, and outbound routing behavior. + +## 11. Portable Export/Import And Sharing + +### `exportWorkspace` + +- What it is: portable workspace export builder. +- What it does: builds a portable workspace bundle including config, skills, commands, and allowed portable files. +- Called from and when: called by `/workspace/:id/export`. +- What it calls: workspace reads, config sanitization, portable file planning, and sensitive-data warnings. + +### `importWorkspace` + +- What it is: portable workspace import applier. +- What it does: applies imported bundle data into a workspace in replace or merge mode. +- Called from and when: called by `/workspace/:id/import`. +- What it calls: config writes, skills/commands writes, portable file writes, reload events. + +### portable config and file helpers + +- What they are: `sanitizePortableOpencodeConfig`, `planPortableFiles`, `listPortableFiles`, `writePortableFiles`, export-safety helpers. +- What they do: restrict export/import to portable config/files and detect or strip sensitive data. +- Called from and when: called by export/import flows. +- What they call: safe config/file selection and secret-aware export behavior. + +### shared bundle publishing/fetching + +- What they are: `publishSharedBundle`, `fetchSharedBundle`. +- What they do: publish and fetch named bundle payloads via a trusted OpenWork publisher. +- Called from and when: called by `/share/bundles/publish` and `/share/bundles/fetch`. +- What they call: remote publisher services and trusted-origin bundle fetch behavior. + +## 12. Audit Trail And Blueprint Session Materialization + +### audit functions + +- What they are: `recordAudit`, `readAuditEntries`, `readLastAudit`. +- What they do: append and read per-workspace JSONL audit logs. +- Called from and when: called after most mutation flows and by `/workspace/:id/audit`. +- What they call: audit persistence under OpenWork data directories. + +### blueprint session helpers + +- What they are: blueprint template normalization/materialization helpers. +- What they do: parse starter-session templates from `openwork.json`, track what was already materialized, create OpenCode sessions, and seed starter messages. +- Called from and when: called by blueprint/session materialization routes and workspace bootstrap flows. +- What they call: upstream OpenCode session creation, direct OpenCode DB seeding, and `openwork.json` updates. + +## 13. Runtime Control And Operational Endpoints + +### `/health` and related status routes + +- What they are: health and operational summary endpoints. +- What they do: report reachability, uptime, actor identity, runtime status, and toy UI/debug support. +- Called from and when: called by probes, status pages, and manual operator/debug flows. +- What they call: server uptime/version state, auth resolution, runtime control service, and toy UI assets. + +### `/runtime/versions` and `/runtime/upgrade` + +- What they are: runtime-control proxy endpoints. +- What they do: proxy runtime version and upgrade behavior. These are the legacy current-server route names; the Server V2 plan normalizes equivalent server-wide runtime endpoints under `/system/runtime/*`. +- Called from and when: called by upgrade/admin flows. +- What they call: `fetchRuntimeControl` and the configured runtime control base URL. + +### `fetchRuntimeControl` + +- What it is: runtime control HTTP client. +- What it does: calls the configured runtime control base URL with bearer auth. +- Called from and when: called by runtime version/upgrade routes. +- What it calls: external runtime control plane. + +## Key Takeaways + +- The current server is dominated by one large orchestration file, `apps/server/src/server.ts`, with many meaningful domains hanging off it. +- The best decomposition candidates for the new server are: + - startup/config/runtime wiring + - auth/tokens/approvals + - workspace lifecycle/config + - OpenCode proxy + session read model + - file session API + - OpenCode Router integrations + - portable export/import + sharing + - plugins/skills/MCP/commands/scheduler +- The strongest existing seams are service-style modules such as `TokenService`, `ApprovalService`, `ReloadEventStore`, `FileSessionStore`, `session-read-model.ts`, `portable-files.ts`, `workspace-export-safety.ts`, and `skill-hub.ts`. +- The weakest area is route ownership: many domains still terminate directly inside `createRoutes` instead of domain routers/controllers. diff --git a/prds/server-v2-plan/distribution.md b/prds/server-v2-plan/distribution.md new file mode 100644 index 00000000..10d7e75b --- /dev/null +++ b/prds/server-v2-plan/distribution.md @@ -0,0 +1,614 @@ +# Server V2 Distribution + +## Status: Draft +## Date: 2026-04-13 + +## Purpose + +This document defines the preferred distribution model for the new OpenWork server. + +It covers: + +- how the new server should be built +- how `opencode` and `opencode-router` should be packaged +- how the desktop app should bundle the server +- how standalone server users should install and run it + +## Core Distribution Goal + +We want one canonical server runtime per platform. + +That server runtime should: + +- be the same thing the desktop app bundles +- also be shippable as a standalone server download +- include the matching OpenCode and OpenCode Router sidecars + +## Recommended Build Model + +Recommended implementation/runtime choice: + +- implement `apps/server-v2` in TypeScript +- run it with Bun in development +- compile it with Bun for distribution + +Recommended packaging choice: + +- one compiled server executable per target platform +- embed `opencode` and `opencode-router` into that executable +- extract those sidecars into a managed runtime directory on first run +- launch them from there + +This gives us a single-file-per-platform distribution model without needing a second wrapper executable, unless Bun packaging proves insufficient in practice. + +## Why Bun Changes The Packaging Story + +Bun's `--compile` support gives us a much stronger path than a normal JS runtime build. + +Important capabilities: + +- compile TypeScript into a standalone executable +- cross-compile for other platforms +- embed arbitrary files with `with { type: "file" }` +- embed build-time constants +- produce minified and bytecode-compiled binaries for faster startup + +That means the new server can likely: + +- be built as a Bun-compiled executable +- carry embedded sidecar payloads +- self-extract those payloads on startup + +## Target Distribution Shape + +Per platform, the canonical runtime should be: + +- `openwork-server-v2` + - compiled Bun executable + - embedded `opencode` + - embedded `opencode-router` + - embedded release/runtime manifest + +One artifact per platform, for example: + +- `openwork-server-v2-darwin-arm64` +- `openwork-server-v2-darwin-x64` +- `openwork-server-v2-linux-x64` +- `openwork-server-v2-linux-arm64` +- `openwork-server-v2-windows-x64.exe` + +## Desktop Distribution + +Desktop users download the desktop app. + +The desktop app should: + +- ship with the matching `openwork-server-v2` runtime embedded or bundled as an app resource +- launch only that server +- never directly launch `opencode` or `opencode-router` + +At runtime: + +1. Desktop app launches `openwork-server-v2`. +2. `openwork-server-v2` checks its managed runtime directory. +3. If needed, it extracts embedded `opencode` and `opencode-router`. +4. It starts and supervises those sidecars itself. +5. Desktop app talks only to the server over port + token. + +## Standalone Server Distribution + +Some users will want only the server. + +For them, we should publish the same canonical runtime as a standalone download. + +That means: + +- standalone users download `openwork-server-v2` for their platform +- they run it directly +- it performs the same sidecar extraction and supervision the desktop-bundled copy would do + +This keeps the runtime identical between: + +- desktop-hosted use +- standalone server use + +## Runtime Extraction Model + +The server executable should embed sidecar payloads and extract them to a persistent versioned runtime directory. + +Current implementation note: + +- Phase 10 now treats the managed app-data runtime directory as the canonical release runtime location. +- The release runtime is populated on first run from a bundled runtime source directory (for example the desktop resource sidecar directory or an executable-adjacent bundle with `manifest.json`) and is then reused across later runs. +- `apps/server-v2/script/build.ts` now also supports `--embed-runtime`, which generates a temporary build entrypoint that embeds `opencode`, `opencode-router`, and `manifest.json` directly into the compiled Server V2 binary via Bun `with { type: "file" }` imports. +- Extraction now uses a lock, temp directory, atomic replace, lease file, and conservative cleanup of stale runtime directories. +- The standalone embedded artifact can now boot without an adjacent sidecar bundle: when no filesystem bundle is present, Server V2 falls back to the embedded runtime payload and extracts from there. + +Recommended behavior: + +1. On startup, the server determines its runtime version. +2. It computes a runtime directory under app-data. +3. It checks whether the sidecars already exist and match the expected manifest/checksums. +4. If not, it extracts them atomically. +5. It marks executable bits where needed. +6. It launches sidecars from that runtime directory. + +Recommended runtime path shape: + +```text +/runtime/server-v2// +``` + +Example contents: + +```text +/runtime/server-v2/0.1.0/ + manifest.json + opencode + opencode-router +``` + +## Why Persistent Runtime Dir Instead Of Temp + +We should prefer a persistent runtime directory instead of temp. + +Reasons: + +- avoids repeated extraction on every run +- avoids temp cleanup breaking the runtime +- improves debuggability +- makes versioned runtime upgrades simpler +- makes locking and atomic replacement easier + +## Build Pipeline + +Recommended build flow: + +1. Build or collect the platform-specific `opencode` binary. +2. Build or collect the platform-specific `opencode-router` binary. +3. Generate a runtime manifest containing: + - server version + - OpenCode version + - router version + - target platform + - checksums +4. Compile `apps/server-v2/src/cli.ts` with Bun. +5. Embed the sidecars and manifest into the compiled executable. + +Illustrative Bun compile command: + +```bash +bun build --compile --minify --bytecode --target=bun-darwin-arm64 ./src/cli.ts --outfile dist/openwork-server-v2 +``` + +The exact build script will likely be JS-driven rather than a one-liner so it can: + +- prepare sidecar assets +- generate the manifest +- inject build-time constants +- compile per target + +Current implementation note: + +- `pnpm --filter openwork-server-v2 build:bin` builds the plain compiled executable. +- `pnpm --filter openwork-server-v2 build:bin:embedded --bundle-dir ` builds the compiled executable with embedded runtime assets from a prepared bundle directory. +- `pnpm --filter openwork-server-v2 build:bin:embedded:all` drives the same embedding flow across the supported Bun targets when target-specific runtime bundle files are staged. +- The build script resolves target-specific asset filenames like `opencode-` and `manifest.json-` when cross-target bundles are staged. + +## Bun Embedding Model + +The preferred Bun packaging approach is: + +- embed sidecar files with `with { type: "file" }` +- access them via Bun's embedded file support +- copy them into the persistent runtime directory on first run + +This means we do not need a separate wrapper binary unless Bun's real-world behavior proves insufficient. + +## Cross-Platform Targets + +The server should be built in a matrix across supported targets. + +Initial likely targets: + +- `bun-darwin-arm64` +- `bun-darwin-x64` +- `bun-linux-x64` +- `bun-linux-arm64` +- `bun-windows-x64` + +Possible later targets: + +- `bun-windows-arm64` +- musl variants for portable Linux distribution + +For Linux x64, baseline builds may be safer if broad CPU compatibility matters. + +## Version Pinning + +Each server release should pin: + +- server version +- OpenCode version +- router version + +Recommended runtime manifest shape: + +```json +{ + "serverVersion": "0.1.0", + "opencodeVersion": "1.2.27", + "routerVersion": "0.1.0", + "target": "bun-darwin-arm64", + "files": { + "opencode": { + "sha256": "..." + }, + "opencode-router": { + "sha256": "..." + } + } +} +``` + +## Desktop Vs Standalone Release Model + +### Desktop release + +- desktop app contains the matching `openwork-server-v2` runtime +- user launches app +- app launches server + +### Standalone server release + +- user downloads `openwork-server-v2` for their platform +- user launches server directly +- server self-extracts sidecars and runs normally + +This gives us one runtime with two install channels. + +## Local Dev Asset Model + +Local development should preserve the same ownership model as production without requiring the final compiled single-file bundle on every edit. + +Recommended dev behavior: + +- run `apps/server-v2` directly with Bun in watch mode +- keep `opencode-router` as a locally built workspace binary from `apps/opencode-router` +- acquire `opencode` as a pinned release artifact rather than committing the binary into git +- stage both binaries into a gitignored local runtime-assets directory +- have Server V2 launch those staged binaries by absolute path + +The important rule is that development should still be deterministic: + +- no reliance on `PATH` +- no silent use of whichever `opencode` binary happens to be installed globally +- no checked-in release binaries under source control + +### Why `opencode` should not be committed into the repo + +We do not need the `opencode` binary checked into git. + +What we need is a reproducible acquisition path: + +- read the pinned version from `constants.json` +- download the matching OpenCode release artifact for the current platform +- store it in a gitignored local runtime-assets/cache location +- use that exact file for local dev and for release embedding + +This keeps local dev aligned with the pinned product version while avoiding binary churn in the repo. + +### Source of truth for the pinned version + +The OpenCode version should come from the existing root `constants.json` file. + +For Server V2 planning, that means: + +- `constants.json` remains the version pin source of truth for `opencode` +- local dev setup should read `opencodeVersion` from `constants.json` +- release packaging should read the same value when embedding the final binary + +### Recommended local path shape + +Illustrative shape: + +```text +/.local/runtime-assets/ + opencode/ + darwin-arm64/ + v1.2.27/ + opencode + opencode-router/ + darwin-arm64/ + dev/ + opencode-router +``` + +Notes: + +- this directory should be gitignored +- exact path names can change, but the shape should be versioned and platform-specific +- `opencode-router` can use a `dev` slot because it is built from the local workspace during development +- `opencode` should use the pinned version from `constants.json` + +### Recommended dev acquisition flow + +1. Read `opencodeVersion` from `constants.json`. +2. Resolve the current platform/arch target. +3. Check whether the pinned `opencode` binary already exists in the local runtime-assets cache. +4. If not, download the matching OpenCode release artifact. +5. Verify checksum if the release metadata supports it. +6. Mark executable bits where needed. +7. Build `apps/opencode-router` locally and place its binary in the staged dev runtime location. +8. Start Server V2 and pass those absolute binary paths into runtime startup. + +### Dev vs release relationship + +The difference between dev and release should be only where the sidecar payloads come from: + +- release: sidecars are embedded into `openwork-server-v2` and extracted on first run +- local dev: sidecars are staged into a gitignored local runtime-assets directory first + +The runtime ownership model should stay the same in both cases: + +- Server V2 resolves the binaries +- Server V2 launches them +- Server V2 supervises them + +## How This Differs From The Current System + +Today, runtime distribution is more fragmented. + +Current behavior is closer to: + +- desktop app bundles or prepares multiple sidecars +- desktop/Tauri still owns more startup logic +- orchestrator is a separate hosting/control layer +- `openwork-server`, `opencode`, and `opencode-router` are not yet one canonical server runtime bundle + +Target behavior becomes: + +- desktop app starts one thing: `openwork-server-v2` +- standalone users start that same `openwork-server-v2` +- `openwork-server-v2` starts and supervises its own runtime dependencies + +So the key shift is: + +- from component distribution +- to runtime-bundle distribution + +## Current Workflow Reality + +Based on the current repo workflows in this branch: + +- macOS notarization is explicitly configured +- Windows signing now has an explicit repo workflow path in `.github/workflows/windows-signed-artifacts.yml`, but it still requires a real signing certificate and Windows validation run before broad rollout + +That means: + +- we should not assume we already have a working Windows signing pipeline +- the new server distribution plan will need an explicit Windows signing step for both desktop and standalone runtime artifacts + +## Important Caveats + +This Bun-based single-file approach looks promising, but it still needs validation. + +### 1. Embedded binary extraction and execution + +We need to confirm that embedded sidecar binaries can be: + +- copied out reliably +- marked executable reliably +- launched reliably on all supported platforms + +### 2. macOS code signing and notarization + +This is especially important. + +We need to validate: + +- the compiled server's codesigning story +- Bun JIT entitlements if needed +- behavior of extracted sidecars under Gatekeeper/notarization + +Important practical note: + +- the main `openwork-server-v2` executable will need a clean signing and notarization path +- extracted sidecars may also need to be signed appropriately if macOS quarantine or Gatekeeper treats them as separate executables +- we should assume that "signed main binary" does not automatically make extracted child binaries a non-issue + +Questions to validate on macOS: + +- can the signed/notarized main server extract and launch sidecars without triggering new trust prompts? +- do extracted sidecars need to preserve signatures from the embedded payloads? +- do we need to strip quarantine attributes or will that create trust problems? +- does Bun's compiled executable require specific JIT-related entitlements in our real deployment model? + +This means macOS is not just a packaging detail. It is one of the first things we should prototype before fully committing to the single-file distribution format. + +### 3. Windows AV / SmartScreen behavior + +Extracted executables may have more friction on Windows. + +We need to test: + +- first-run extraction +- launch reliability +- user-facing warnings + +Important practical note: + +- Windows Defender or third-party AV may treat a self-extracting executable plus child-process extraction as suspicious behavior +- SmartScreen reputation may apply to the main executable separately from the extracted sidecars +- repeated extraction into temp-like locations is more likely to look suspicious than extraction into a stable app-data runtime directory + +Questions to validate on Windows: + +- does first-run extraction trigger Defender or SmartScreen warnings? +- are extracted sidecars quarantined, delayed, or scanned in ways that materially hurt startup time? +- do signed extracted sidecars behave better than unsigned ones? +- do we need to prefer a stable per-version runtime directory to avoid repeated AV scans and trust churn? + +This means Windows should also get an early prototype, especially for first-run startup latency and user-facing trust prompts. + +## Windows Signing Plan + +We should plan to sign Windows artifacts explicitly. + +That includes: + +- desktop app executable/installer +- standalone `openwork-server-v2.exe` +- extracted Windows sidecars when they are shipped as separate signed executables inside the embedded runtime bundle + +Recommended signing model: + +- use Authenticode signing at minimum +- consider EV signing if SmartScreen reputation becomes a serious UX issue +- timestamp signatures so they remain valid after certificate rotation or expiry + +Rule of thumb: + +- every Windows executable we intentionally ship to users should be signed + +That includes: + +- the desktop app executable and/or installer +- `openwork-server-v2.exe` +- `opencode.exe` +- `opencode-router.exe` + +Important practical point: + +- signing only the main desktop executable is not enough for the server runtime model we want +- if `openwork-server-v2.exe` extracts `opencode.exe` and `opencode-router.exe`, those sidecars should ideally also be signed before embedding + +## Suggested Windows Release Flow + +### Desktop release flow + +1. Build the Windows desktop artifact. +2. Sign the desktop executable or installer. +3. Verify the signature. +4. Publish the signed asset. + +### Standalone server release flow + +1. Build `openwork-server-v2.exe` for Windows. +2. Build or collect signed Windows `opencode.exe` and `opencode-router.exe` payloads. +3. Embed those signed sidecar payloads into the server executable. +4. Sign the final `openwork-server-v2.exe`. +5. Verify the signature. +6. Publish the signed asset. + +This means Windows signing happens at two layers: + +- sidecar payload signing +- final runtime signing + +## GitHub Actions Sketch + +The repo does not currently show an explicit Windows signing step, so we should plan one. + +Illustrative shape: + +```yaml +jobs: + build-windows-server-v2: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Build server runtime + run: pnpm --filter openwork-server-v2 build:bin:windows + + - name: Build or fetch Windows sidecars + run: pnpm --filter openwork-server-v2 build:sidecars:windows + + - name: Import signing certificate + shell: pwsh + run: | + $bytes = [Convert]::FromBase64String("${{ secrets.WINDOWS_CERT_PFX_BASE64 }}") + [IO.File]::WriteAllBytes("codesign.pfx", $bytes) + + - name: Sign sidecars + shell: pwsh + run: | + signtool sign /fd SHA256 /tr http://timestamp.digicert.com /td SHA256 /f codesign.pfx /p "${{ secrets.WINDOWS_CERT_PASSWORD }}" dist\\sidecars\\opencode.exe + signtool sign /fd SHA256 /tr http://timestamp.digicert.com /td SHA256 /f codesign.pfx /p "${{ secrets.WINDOWS_CERT_PASSWORD }}" dist\\sidecars\\opencode-router.exe + + - name: Build embedded runtime + run: pnpm --filter openwork-server-v2 package:windows + + - name: Sign final server runtime + shell: pwsh + run: | + signtool sign /fd SHA256 /tr http://timestamp.digicert.com /td SHA256 /f codesign.pfx /p "${{ secrets.WINDOWS_CERT_PASSWORD }}" dist\\openwork-server-v2.exe + + - name: Verify signature + shell: pwsh + run: | + signtool verify /pa /v dist\\openwork-server-v2.exe +``` + +The desktop Windows job would follow the same pattern, but sign the desktop executable/installer artifact. + +## Secrets And Infrastructure Needed + +To support Windows signing in CI, we will likely need: + +- a Windows code signing certificate in PFX form +- a password for that PFX +- a timestamp server URL +- possibly a separate EV signing process if we choose that route later + +Likely GitHub secrets: + +- `WINDOWS_CERT_PFX_BASE64` +- `WINDOWS_CERT_PASSWORD` +- `WINDOWS_TIMESTAMP_URL` + +## Recommendation + +For now, the important planning takeaway is: + +- Windows signing is not already clearly implemented in the repo workflows +- we should treat it as a required new release capability for Server V2 distribution +- both desktop and standalone server runtimes will need explicit Windows signing support + +### 4. Upgrade and extraction locking + +We need to design: + +- atomic extraction +- concurrent launch locking +- old runtime cleanup +- rollback behavior if extraction is interrupted + +## Recommended Path + +Recommended sequence: + +1. Build the new server in Bun/TypeScript. +2. Make the server own runtime supervision logically first. +3. Prototype a multi-file per-platform build first if needed for speed. +4. Then implement the Bun single-file embedded-sidecar distribution path. +5. Use that same runtime artifact in both: + - desktop releases + - standalone server releases + +The long-term preferred model is still the Bun-based self-extracting single executable per platform. + +## Open Questions + +1. Should the extracted runtime directory be versioned only by server version, or by server+OpenCode+router tuple? +2. What exact app-data path should we standardize on for desktop-hosted and standalone modes? +3. How should old extracted runtimes be garbage-collected safely? +4. Do we want to keep a multi-file fallback distribution format even after the single-file format works? +5. What exact release pipeline should produce the platform sidecars before the Bun compile step? diff --git a/prds/server-v2-plan/final-cutover-checklist.md b/prds/server-v2-plan/final-cutover-checklist.md new file mode 100644 index 00000000..05e77d8b --- /dev/null +++ b/prds/server-v2-plan/final-cutover-checklist.md @@ -0,0 +1,169 @@ +# Server V2 Final Cutover Checklist + +## Status + +In progress. This checklist is the honest Phase 10 cutover and release ledger for the current worktree state. + +## Default Path Checks + +- Desktop startup default: `Legacy` +- App rollout default: legacy path unless `OPENWORK_UI_USE_SERVER_V2=1` +- Runtime ownership default: Server V2 supervises OpenCode and router +- Route mount: root-mounted (`/system/*`, `/workspaces/*`), no legacy `/v2` + +## Completed In Current Worktree + +- Composite local dev graph exists and is documented: + - `pnpm dev:server-v2` + - `pnpm dev:server-v2:server` +- OpenAPI and SDK watch loops are hardened against replace-style writes and generator side effects. +- Server V2 resolves a real package version instead of reporting `0.0.0`. +- Bun compile build exists for Server V2. +- Embedded runtime packaging exists via `apps/server-v2/script/build.ts --embed-runtime`. +- Release runtime extraction uses a persistent versioned runtime directory with lock, atomic replace, lease tracking, and cleanup. +- Desktop can bundle and start Server V2 when `OPENWORK_UI_USE_SERVER_V2=1` is enabled. +- App can route through Server V2 when `OPENWORK_UI_USE_SERVER_V2=1` is enabled. +- Windows signing workflow exists at `.github/workflows/windows-signed-artifacts.yml` for the standalone Server V2 binary, signed sidecars, and desktop Windows artifacts. + +## Remaining Release Gates + +- Delete legacy `apps/server` codepaths once no active caller needs them. +- Delete or archive obsolete orchestrator control-plane code once no active caller needs it. +- Commit or otherwise land the regenerated Server V2 contract so `pnpm contract:check` passes on a clean tree. +- Validate macOS signing + notarization with real signed artifacts. +- Validate Windows SmartScreen / Defender / AV behavior with real Windows artifacts. +- Capture Chrome MCP success evidence once the Docker stack session flow is fixed. + +## Current Validation Status In This Environment + +- `pnpm sdk:generate` runs successfully. +- `pnpm contract:check` still fails in this in-progress worktree because generated Phase 10 contract changes are not committed yet; it is acting as a drift detector against `HEAD`, not as a no-op generator check. +- Server V2 package tests and typecheck pass. +- App Server V2 boundary tests and app typecheck pass. +- Desktop Rust tests pass. +- Plain and embedded Server V2 Bun builds both pass. +- Embedded standalone runtime smoke passed: the compiled Server V2 binary launched from outside the bundle directory extracted and started OpenCode from the managed runtime directory using `source: release`. +- Docker dev stack now starts on the Server V2 path after moving the stack off orchestrator startup and serializing shared `pnpm install` work across containers. +- Docker API-level smoke succeeded for `GET /system/health`, `GET /system/opencode/health`, and `GET /workspaces` against the running dev stack. +- Docker product-flow API smoke now succeeds for `POST /workspaces/:id/sessions` after fixing Server V2 compatibility config materialization to emit the OpenCode-compatible `permission.external_directory` object-map format. +- Chrome MCP validation is not runnable from this environment because the current tool session does not expose Chrome DevTools MCP actions. +- macOS signing/notarization was not completed here because no signing identity or notary credentials are available in this session. +- Windows signing workflow was implemented, but end-to-end signing and SmartScreen / AV validation were not completed here because no Windows runner or Windows signing certificate is available in this session. + +## Automated Validation Commands + +Run from repo root unless noted. + +```bash +pnpm sdk:generate +pnpm contract:check +pnpm --filter openwork-server-v2 test +pnpm --filter openwork-server-v2 typecheck +pnpm --filter @openwork/app test:server-v2-boundary +pnpm --filter @openwork/app typecheck +cargo test --manifest-path apps/desktop/src-tauri/Cargo.toml --locked +pnpm --filter openwork-server-v2 build:bin +pnpm --filter openwork-server-v2 build:bin:embedded --bundle-dir ../desktop/src-tauri/sidecars +``` + +## macOS Manual Validation + +Prerequisites: + +- Apple signing identity available in keychain +- Notary API key and issuer configured +- Real release-style sidecar bundle prepared + +Suggested validation flow: + +```bash +pnpm -C apps/desktop prepare:sidecar +pnpm --filter openwork-server-v2 build:bin:embedded --bundle-dir ../desktop/src-tauri/sidecars +codesign --deep --force -vvvv --sign "" --entitlements apps/desktop/src-tauri/entitlements.plist apps/server-v2/dist/bin/openwork-server-v2 +codesign -vvv --verify apps/server-v2/dist/bin/openwork-server-v2 +xcrun notarytool submit apps/server-v2/dist/bin/openwork-server-v2 --key --key-id --issuer --wait +spctl --assess --type execute --verbose apps/server-v2/dist/bin/openwork-server-v2 +OPENWORK_SERVER_V2_WORKDIR="$(mktemp -d)" apps/server-v2/dist/bin/openwork-server-v2 --port 32123 +``` + +Confirm: + +- the binary verifies after signing +- notarization succeeds +- first-run extraction succeeds from the embedded payload +- extracted sidecars launch without trust prompts + +## Windows Manual Validation + +Prerequisites: + +- Windows runner or workstation +- Authenticode certificate in PFX form +- `signtool.exe` + +Suggested validation flow: + +```powershell +pnpm install --frozen-lockfile +pnpm -C apps/desktop prepare:sidecar +pnpm --filter openwork-server-v2 build:bin:embedded --bundle-dir ../desktop/src-tauri/sidecars --target bun-windows-x64 +signtool sign /fd SHA256 /tr http://timestamp.digicert.com /td SHA256 /f codesign.pfx /p apps\server-v2\dist\bin\openwork-server-v2-bun-windows-x64.exe +signtool verify /pa /v apps\server-v2\dist\bin\openwork-server-v2-bun-windows-x64.exe +``` + +SmartScreen / AV validation: + +```powershell +$env:OPENWORK_SERVER_V2_WORKDIR = Join-Path $env:TEMP "openwork-server-v2-smoke" +New-Item -ItemType Directory -Force -Path $env:OPENWORK_SERVER_V2_WORKDIR | Out-Null +apps\server-v2\dist\bin\openwork-server-v2-bun-windows-x64.exe --port 32123 +Invoke-WebRequest http://127.0.0.1:32123/system/opencode/health +Invoke-WebRequest http://127.0.0.1:32123/system/runtime/summary +``` + +Record: + +- whether SmartScreen warns on first launch +- whether Defender delays or quarantines extracted sidecars +- first-run vs second-run startup latency +- whether signed extracted sidecars materially reduce warnings + +## End-to-End Product Validation + +Preferred flow: + +```bash +packaging/docker/dev-up.sh +``` + +Then validate a real UI flow with Chrome MCP: + +- open the printed web URL +- navigate to the session surface +- send a message +- confirm the response renders +- save screenshot evidence + +If Chrome MCP is unavailable in the current environment, record that explicitly and include the exact command above plus the expected manual reviewer steps. + +Current state from this worktree: + +- `packaging/docker/dev-up.sh` now reaches healthy `server`, `web`, and `share` containers on the Server V2 path. +- Docker API smoke including session creation now succeeds: + +```bash +source tmp/.dev-env- +curl -H "Authorization: Bearer $OPENWORK_TOKEN" http://127.0.0.1:/workspaces +curl -X POST -H "Authorization: Bearer $OPENWORK_TOKEN" -H "Content-Type: application/json" --data '{"title":"Docker E2E"}' http://127.0.0.1:/workspaces//sessions +``` + +- Remaining manual reviewer work: + +```bash +packaging/docker/dev-up.sh +source tmp/.dev-env- +curl -H "Authorization: Bearer $OPENWORK_TOKEN" http://127.0.0.1:/workspaces +curl -X POST -H "Authorization: Bearer $OPENWORK_TOKEN" -H "Content-Type: application/json" --data '{"title":"Docker E2E"}' http://127.0.0.1:/workspaces//sessions +``` + +and then complete the Chrome MCP UI flow in the running stack. diff --git a/prds/server-v2-plan/ideal-flow.md b/prds/server-v2-plan/ideal-flow.md new file mode 100644 index 00000000..52553402 --- /dev/null +++ b/prds/server-v2-plan/ideal-flow.md @@ -0,0 +1,597 @@ +# Server V2 Ideal Flow + +## Status: Draft +## Date: 2026-04-13 + +## Purpose + +This document captures the desired end-state runtime model for OpenWork Server V2. + +It is more concrete than the incremental migration docs. It describes the ideal product flow once the desktop app is a thin UI, the server owns workspace behavior, and orchestrator responsibilities have been folded into the server itself. + +## Core Principle + +The desktop app is just a UI. + +The server is the real system. + +That means: + +- the desktop app starts the server on launch +- the desktop app talks to the server over a port plus tokens +- all meaningful product data shown in the UI comes from the server +- all workspace, file, AI, config, and runtime behavior lives behind the server + +## High-Level Runtime Flow + +Target flow: + +```text +DesktopApp +-> starts local OpenWork server +-> connects to that server over port + token +-> asks server for workspaces +-> asks server for sessions within each workspace +-> renders server-backed state + +OpenWork server +-> owns local sqlite state +-> owns workspace/config/runtime mapping +-> talks to OpenCode via SDK +-> talks to remote OpenWork servers for remote workspaces +``` + +## Server And Workspace Registry Model + +The local OpenWork server should maintain the canonical registry of servers and workspaces. + +That registry includes: + +- one local server +- zero or more remote servers +- all local, remote, control, and help workspaces + +Each workspace points at one server. + +Important nuance: + +- `server` is a real system concept +- but it does not need to be a first-class user-facing concept yet + +The user should mainly experience: + +- workspaces +- sessions inside workspaces + +while the server keeps the canonical mapping and the app only renders or caches what the server returns. + +## Desktop App Responsibilities + +The desktop app should only do these things: + +- launch the local OpenWork server +- store enough local connection state to reconnect to the server +- maintain transient UI state +- maintain knowledge of which servers it is connected to +- render workspaces, sessions, messages, settings, and actions from server data +- send user intent to the server + +The desktop app should not directly own: + +- workspace data models +- workspace config mutation +- file reads or writes +- AI/session/task behavior +- OpenCode SDK interaction +- direct runtime orchestration logic + +## Server Responsibilities + +The local OpenWork server becomes the primary owner of: + +- workspace registry +- workspace config registry +- session discovery and session interaction +- local sqlite persistence +- OpenCode project/session integration +- local runtime supervision +- remote OpenWork workspace connections +- exposing local workspaces for remote consumption + +In the target model, the local desktop-hosted server should be able to do everything the app needs without the app performing its own parallel business logic. + +## Server Database + +The server should have its own sqlite database. + +This database is the source of truth for OpenWork-managed metadata and relationships. + +At minimum it should track: + +- `servers` + - known local or remote server connections + - connection metadata, base URLs, auth/token state, capability flags +- `workspaces` + - local and remote workspaces + - workspace kind, display metadata, connection mapping, status +- `workspace_sessions_cache` or equivalent lightweight indexes if needed + - optional server-side cache/index for faster listing +- `mcps` + - config and auth metadata +- `skills` + - skill metadata and OpenWork-managed ownership +- `plugins` + - plugin metadata and enabled state +- `provider_configs` + - provider definitions, auth references, workspace assignments +- `cloud_signin` + - cloud auth/session metadata for the local server +- linking tables between config items and workspaces + - so one config item can be attached to one or many workspaces + +The sqlite DB is for OpenWork-owned metadata. + +OpenCode remains the owner of session state. + +For now, the server can query sessions live from OpenCode or remote servers. + +It does not need a durable authoritative session cache yet. + +The UI can eventually keep a cache for responsiveness if needed. + +## Workspace Model + +There are three important workspace categories. + +### 1. Local workspaces + +Each local workspace: + +- belongs to the local OpenWork server +- has a stable OpenWork workspace ID +- maps to exactly one OpenCode project +- has its own OpenWork-managed config directory +- has its own user data directory / work directory + +### 2. Remote workspaces + +Each remote workspace: + +- belongs to a remote OpenWork server +- maps to exactly one workspace on that remote OpenWork server +- is consumed through the remote server API, not by talking to OpenCode directly + +### 3. Internal workspaces + +There should also be two local special-purpose workspaces: + +- Control Workspace +- Help Workspace + +These should: + +- be preconfigured by the local server +- exist as real server-managed workspaces +- not appear in the normal user-facing workspace list +- be accessible through dedicated UI flows instead of the standard workspace switcher + +These give the product stable built-in surfaces for control/help behavior without mixing them into the user’s normal workspace list. + +For now they should remain hidden. + +Later they can be surfaced through settings and help/support areas rather than the standard workspace list. + +## Local Workspace Mapping To OpenCode + +Each local workspace should map to one OpenCode project. + +That means: + +- OpenWork owns the workspace record +- OpenWork workspace ID remains separate from the OpenCode project ID +- OpenCode owns the underlying session state for that project +- OpenWork server is responsible for translating between workspace IDs and OpenCode project/session identifiers + +The server should query OpenCode through the SDK for: + +- sessions in that project +- messages in a session +- archived state +- session names +- forks and other session primitives +- any other OpenCode-native session/project capability we need to expose + +## Remote Workspace Mapping + +Each remote workspace should map to a single workspace on a remote OpenWork server. + +That means: + +- the local UI does not treat a remote workspace as a direct OpenCode connection +- the local server talks to the remote OpenWork server +- the remote OpenWork server is responsible for its own OpenCode integration + +This preserves one product model: + +- UI <-> OpenWork server + +instead of: + +- UI <-> OpenCode directly for some things +- UI <-> OpenWork server for others + +## Exposing A Local Workspace Remotely + +The local server should be able to expose a local workspace for remote consumption. + +This should be modeled as: + +- one local OpenWork server +- one exposed workspace access surface per workspace +- remote consumers connect to the OpenWork server and are scoped to the workspace they were granted + +That access should be workspace-scoped. + +Each shared local workspace should have its own access key scoped just to that workspace. + +Important principle: + +- local configs affect the local server only +- workspace-specific config should apply only to the workspace it is attached to + +## Startup Flow + +Ideal startup flow: + +1. Desktop app launches. +2. Desktop app starts the local OpenWork server. +3. Desktop app establishes a connection to the server using port + token. +4. Desktop app asks the server for the list of visible workspaces. +5. For each workspace, the desktop app asks the server for sessions in that workspace. +6. The server resolves the workspace to its backend: + - local OpenCode project for local workspaces + - remote OpenWork server workspace for remote workspaces +7. The server queries the relevant backend live for those sessions. +8. The desktop app renders only the data returned by the server. + +The server should also expose explicit runtime health endpoints for its local dependencies, including OpenCode. + +Recommended namespace: + +- use `/system/*` for server-wide runtime and operational surfaces +- for example: `/system/opencode/health`, `/system/runtime/versions`, `/system/runtime/upgrade` + +At minimum, the UI should be able to ask whether OpenCode is running and which version is active. + +## OpenCode Router Startup Flow + +`opencode-router` should be started by the new OpenWork server, not by the desktop app. + +Current baseline being replaced: + +- today, the orchestrator decides whether router support is needed +- today, the orchestrator resolves the router binary +- today, the orchestrator spawns and supervises the router child process + +Target flow: + +1. Desktop app launches the local OpenWork server. +2. The server boots its sqlite state and runtime registry. +3. The server evaluates whether router support is needed. +4. If router support is needed, the server: + - resolves the `opencode-router` binary + - materializes the effective router config from server-owned state + - launches the router child process + - waits for router health + - tracks router status in memory and optionally in runtime state tables +5. The server exposes router status and router-backed capabilities through its own API. + +The desktop app should not: + +- launch `opencode-router` directly +- supervise `opencode-router` directly +- talk to `opencode-router` directly + +The server should own the full lifecycle. + +### Recommended shape + +- one router process per local OpenWork server +- server-level identities and bindings +- workspace-aware routing enforced by the server when needed + +This is simpler than one router process per workspace and fits the server-first ownership model better. + +### Startup decision model + +The server should decide whether router startup is needed based on: + +- whether any router identities or bindings are configured +- whether any server-owned features require router-backed behavior +- whether messaging-related capabilities are enabled + +Recommended behavior: + +- if no router-backed capability is configured, router can stay off +- once messaging/bindings are configured, router should be started and supervised by the server + +### Runtime behavior + +The server should also own: + +- router restart +- router health checks +- router config apply/reload behavior +- router status reporting to the UI + +That makes `opencode-router` just another runtime dependency of the OpenWork server, not a separate app-owned or orchestrator-owned control surface. + +## Session Ownership Model + +The state of a session should be managed by OpenCode. + +That includes: + +- session identity +- session name +- archived state +- messages +- message ordering/history +- forks +- other OpenCode-native session semantics + +The OpenWork server should query OpenCode via the SDK to get those. + +OpenWork should not duplicate the source of truth for those fields in its own sqlite DB unless it needs a cache or index for performance. + +## Workspace Config Ownership Model + +The config of a workspace should be managed by OpenWork. + +That includes: + +- MCPs +- providers +- plugins +- skills +- any OpenWork-owned workspace settings + +When a user adds a config item: + +- by default it should be added to the workspace currently active in the UI +- the UI should also let the user apply that item to other workspaces +- nothing should be globally applied by default + +When that happens: + +1. The server creates or updates the config item in its own sqlite DB. +2. The server creates rows in a dedicated linking table that associates the config item with one or more workspaces. +3. The server materializes the effective OpenCode config for each affected workspace. +4. The server updates the OpenCode config file(s) needed for that workspace only. + +When the server starts OpenCode, it should also be able to generate the effective OpenCode config object from its own database state and pass that config into the OpenCode runtime directly. + +This lets OpenWork own the config model while still projecting it into the OpenCode format OpenCode needs. + +When a user creates a new workspace, the product should also offer a list of existing config items that can be applied to that new workspace. + +## Config Directory Separation + +The ideal layout separates: + +- user data directory / project data directory +- OpenWork-managed config directory +- OpenCode-managed runtime state + +When a user adds a new local workspace: + +1. OpenWork prompts for the directory the user wants to work with. +2. That chosen directory becomes the workspace data directory. +3. OpenWork creates a separate workspace config directory under an OpenWork-controlled path. +4. OpenCode is pointed at the OpenWork-controlled config directory. +5. The user’s chosen data directory is added as an authorized path in config. + +This gives a cleaner split between: + +- files owned by the user +- files controlled by OpenWork +- files controlled by OpenCode + +Initial config path direction: + +```text +/workspaces//config +``` + +## Config Absorption Model + +If a user manually adds config item files into a workspace config path, the server should detect that and absorb it into the local sqlite database. + +That means the server should eventually support: + +- watching workspace config directories +- detecting unmanaged additions/changes/removals +- parsing those items into OpenWork-owned models +- reconciling them back into the DB as imported or externally-managed records + +This gives the system a path to coexist with manual edits instead of fighting them. + +For now: + +- if a file clearly matches a known managed concept such as an MCP, Skill, Plugin, or similar item, the server can absorb it +- if it does not match a known managed concept, the server should leave it in place and not force it into the DB + +## Session Interaction Flow + +For anything related to a session, the client should always talk to the OpenWork server with the workspace ID. + +Example shape: + +```text +client action +-> OpenWork server request with workspaceId (+ sessionId if needed) +-> server resolves workspace backend +-> local OpenCode project/session OR remote OpenWork workspace +-> response comes back through OpenWork server +``` + +For local workspaces: + +- the server forwards to the correct OpenCode project/session using the SDK + +For remote workspaces: + +- the server forwards to the remote OpenWork server + +The client should not need to know which backend type it is talking to beyond workspace/server identity. + +The server should maintain the mapping between: + +- OpenWork workspace ID +- backend type +- OpenCode project ID or remote OpenWork workspace ID + +## OpenCode Primitive Exposure + +To make this work, the OpenWork server will need to expose the OpenCode primitives it depends on. + +The initial minimum set of upstream OpenCode capabilities that the server must wrap should include at least: + +### Session list and lifecycle + +- `GET /session` + - list sessions +- `POST /session` + - create session +- `GET /session/status` + - get session status +- `DELETE /session/{sessionID}` + - delete session +- `PATCH /session/{sessionID}` + - update session + +### Session structure and control + +- `GET /session/{sessionID}/todo` + - get session todos +- `POST /session/{sessionID}/init` + - initialize session +- `POST /session/{sessionID}/fork` + - fork session +- `POST /session/{sessionID}/abort` + - abort session +- `POST /session/{sessionID}/share` + - share session +- `DELETE /session/{sessionID}/share` + - unshare session +- `GET /session/{sessionID}/diff` + - get message diff +- `POST /session/{sessionID}/summarize` + - summarize session + +### Messages + +- `GET /session/{sessionID}/message` + - get session messages +- `POST /session/{sessionID}/message` + - send message +- `GET /session/{sessionID}/message/{messageID}` + - get message +- `DELETE /session/{sessionID}/message/{messageID}` + - delete message +- `DELETE /session/{sessionID}/message/{messageID}/part/{partID}` + - delete message part +- `PATCH /session/{sessionID}/message/{messageID}/part/{partID}` + - update message part + +### Prompt and command execution + +- `POST /session/{sessionID}/prompt_async` + - send async message +- `POST /session/{sessionID}/command` + - send command +- `POST /session/{sessionID}/shell` + - run shell command +- `POST /session/{sessionID}/revert` + - revert message +- `POST /session/{sessionID}/unrevert` + - restore reverted messages + +The OpenWork server becomes the adapter and policy layer over those upstream primitives. + +The public API exposed to clients should not use those raw session-first routes directly. + +Instead, the public API should be normalized around workspace-first OpenWork routes, for example: + +- `GET /workspaces/{workspaceId}/sessions` +- `POST /workspaces/{workspaceId}/sessions` +- `GET /workspaces/{workspaceId}/sessions/status` +- `GET /workspaces/{workspaceId}/sessions/{sessionId}` +- `PATCH /workspaces/{workspaceId}/sessions/{sessionId}` +- `DELETE /workspaces/{workspaceId}/sessions/{sessionId}` +- `GET /workspaces/{workspaceId}/sessions/{sessionId}/messages` +- `POST /workspaces/{workspaceId}/sessions/{sessionId}/messages` +- `POST /workspaces/{workspaceId}/sessions/{sessionId}/fork` +- `POST /workspaces/{workspaceId}/sessions/{sessionId}/abort` + +That means the public API shape should be normalized around: + +- workspace ID first +- then session ID / message ID / other nested IDs + +instead of exposing raw OpenCode-shaped session-first routes directly to clients. + +## Recommended Data Boundaries + +### OpenCode owns + +- project/session/message state +- session runtime behavior +- OpenCode-native semantics around forks/history + +### OpenWork server owns + +- workspace registry +- config registry and config/workspace assignment +- connection registry for local and remote servers +- special internal workspaces +- projection of OpenWork config into OpenCode workspace config +- exposure of workspace/session/runtime primitives to the UI + +### Desktop app owns + +- connection to the local server +- transient UI state +- rendering and interaction state + +## Implications For Migration + +This target implies: + +- Tauri-owned local workspace/config/file behavior must move behind the server +- orchestrator-owned runtime/workspace behavior must move behind the server +- the app should stop depending on direct local mutation as a normal path +- remote workspace support should normalize around "one workspace on one remote OpenWork server" + +The migration rollout itself should be handled in a separate migration plan so existing users can be moved safely and smoothly. + +## Decisions Captured So Far + +1. Session lists can be queried live from OpenCode or remote servers for now. The UI can maintain a cache later if needed. +2. The system has a real `servers` model and a real `workspaces` model. Each workspace points at exactly one server, even if `server` is not yet a first-class user-facing concept. +3. Control Workspace and Help Workspace stay hidden for now, and can later surface through settings/help areas. +4. The preferred initial config path is under the server working directory, shaped like `workspaces//config`. +5. Nothing is global by default. When a config item is created, the user chooses which workspaces it applies to. When a workspace is created, the user can choose existing config items to apply. +6. Config reconciliation should use both file watching and periodic/pull-based reconciliation over time. +7. Config absorption should only occur for recognized managed concepts. Unknown/manual files should remain in place. +8. A remotely exposed local workspace should use workspace-scoped access, with its own access key. +9. Local workspaces keep a stable OpenWork workspace ID that is separate from the OpenCode project ID, and the server stores the mapping. +10. Session and message primitives should be exposed through workspace-first OpenWork-shaped endpoints, not raw OpenCode endpoint shapes. +11. The migration rollout itself should be described in a separate migration plan. + +## Questions To Address Next + +1. What exact schema should represent the `servers` table, `workspaces` table, config item tables, and workspace-config linking tables? +2. Which other desktop-owned data should move into the server DB as part of this same ownership shift, such as cloud settings, user auth, and synced-item metadata? +3. What exact filesystem layout should we use around the config directory beyond the initial `workspaces//config` direction? +4. What exact workspace-first OpenWork route naming should wrap the required OpenCode session/message primitives? diff --git a/prds/server-v2-plan/local-dev.md b/prds/server-v2-plan/local-dev.md new file mode 100644 index 00000000..a2dbbd4c --- /dev/null +++ b/prds/server-v2-plan/local-dev.md @@ -0,0 +1,332 @@ +# Server V2 Local Dev Workflow + +## Status: Draft +## Date: 2026-04-09 + +## Purpose + +This document defines how Server V2, the generated SDK, and the app should stay in sync during local development without manual rebuilds or process restarts after every change. + +This doc assumes Server V2 is a separate new server package, not a mounted sub-application inside the old server. + +While Server V2 is still under validation, the current product should continue to work by default on the legacy path. For local testing of the new path, opt in explicitly with: + +```bash +OPENWORK_UI_USE_SERVER_V2=1 pnpm dev:server-v2 +``` + +That same logical flag should control both app routing and desktop startup behavior. + +Detailed generator selection and script shape live in `prds/server-v2-plan/sdk-generation.md`. + +## Goal + +The ideal local loop is: + +```text +edit a new-server route or schema +-> server reloads +-> OpenAPI spec regenerates +-> SDK regenerates +-> app sees the updated types and client methods +-> continue coding without restarting everything +``` + +## Principle + +We should treat local development as three separate but connected loops: + +- server runtime loop +- contract generation loop +- app dev loop + +There is also one runtime-asset loop that matters specifically for Server V2: + +- local sidecar acquisition loop + +Each loop watches its own inputs and reacts only to the changes it actually cares about. + +## Runtime Asset Loop + +For Server V2 local development, runtime ownership should match production, but asset sourcing can be lighter-weight. + +Recommended model: + +- `apps/server-v2` runs directly with Bun in dev/watch mode +- `opencode-router` is built from the local workspace source in `apps/opencode-router` +- `opencode` is downloaded from a pinned release artifact, not committed into the repo and not resolved from `PATH` +- both binaries are staged into a gitignored local runtime-assets directory +- Server V2 launches those staged binaries by absolute path + +### Pinned version source + +The pinned OpenCode version for local dev should come from the root `constants.json` file. + +That means the local-dev flow should: + +- read `opencodeVersion` from `constants.json` +- normalize the version for the upstream release download if needed +- fetch exactly that version when the local cache is missing + +### Recommended local-dev behavior + +1. On first local Server V2 run, check the local runtime-assets cache for the pinned `opencode` binary. +2. If the pinned binary is missing, download the matching release artifact for the current platform. +3. Build `apps/opencode-router` locally and stage the resulting binary in the same gitignored runtime-assets area. +4. Start Server V2 in Bun watch mode. +5. Have Server V2 spawn the staged binaries by absolute path. + +### Important rules + +- do not require developers to install `opencode` globally for Server V2 dev +- do not use `PATH` lookup as the default dev mechanism +- do not check the `opencode` binary into git +- prefer caching the downloaded pinned artifact locally so repeated dev restarts are fast + +## Watch Graph + +```text +apps/server-v2/src/** +-> server watch reloads server runtime +-> OpenAPI watch regenerates apps/server-v2/openapi/openapi.json +-> SDK watch regenerates packages/openwork-server-sdk/generated/** +-> app dev server sees workspace package changes +-> app recompiles with updated types and methods +``` + +## Watchers + +### 1. Server runtime watcher + +Purpose: + +- reload the backend when server code changes + +Inputs: + +- `apps/server-v2/src/**` + +Should ignore: + +- `apps/server-v2/openapi/**` +- `packages/openwork-server-sdk/**` + +Reason: + +- generated contract artifacts should not cause unnecessary backend restarts + +### 2. OpenAPI watcher + +Purpose: + +- regenerate the new-server contract when routes or schemas change + +Inputs: + +- `apps/server-v2/src/**` + +Output: + +- `apps/server-v2/openapi/openapi.json` + +Notes: + +- this should use `hono-openapi` +- it should be narrowly scoped to new-server sources +- it should debounce rapid file changes to avoid overlapping runs + +### 3. SDK watcher + +Purpose: + +- regenerate the TypeScript SDK when the OpenAPI spec changes + +Input: + +- `apps/server-v2/openapi/openapi.json` + +Output: + +- `packages/openwork-server-sdk/generated/**` + +Notes: + +- it should only react when the spec actually changes +- it should not trigger server reloads +- it should be fast enough to run continuously in dev + +### 4. App dev watcher + +Purpose: + +- recompile the app when app code or SDK package code changes + +Inputs: + +- `apps/app/**` +- `packages/openwork-server-sdk/**` + +Notes: + +- the app should consume the SDK package through a workspace dependency +- the app should own the thin `createSdk({ serverId })` adapter that resolves local server config +- in dev, the SDK package should preferably expose TypeScript source directly rather than requiring a full `dist/` build on every change + +## Preferred SDK Package Dev Shape + +To keep iteration fast, `packages/openwork-server-sdk` should ideally work like this in development: + +- generated files land in `generated/**` +- handwritten SDK files like `src/index.ts` and SSE helpers live beside them +- the app imports the package source through the workspace +- the app keeps `createSdk({ serverId })` in app code rather than in the reusable SDK package +- Vite and TypeScript pick up changes automatically + +That avoids a slow extra cycle like: + +```text +regenerate SDK +-> rebuild package dist +-> app sees dist change +``` + +If a build step is still needed for packaging or publishing, it should exist, but dev should prefer source consumption whenever possible. + +## Runtime Client Creation + +The app-facing entrypoint stays: + +```ts +const sdk = createSdk({ serverId }) +``` + +or directly: + +```ts +await createSdk({ serverId }).sessions.listMessages({ workspaceId, sessionId }) +``` + +`createSdk({ serverId })` should remain lightweight and app-owned. + +In the ideal product model, most user-facing app traffic should still target the local OpenWork server as the canonical adapter and registry. Direct alternate `serverId` targeting is mainly for migration, testing, and explicit server-management flows. + +It should only: + +- resolve `serverId` to the latest known `baseUrl`, `token`, and capability info when direct target selection is needed +- prepare a generated SDK instance plus any app-local migration routing +- return the typed SDK object + +It should not: + +- perform network discovery by default +- make a capability request on every call +- do expensive initialization work + +This keeps per-call SDK creation cheap enough that we do not need to cache or reuse it aggressively. + +## What Changes Trigger What + +### Change: new-server route handler or schema + +- server reloads +- OpenAPI spec regenerates +- SDK regenerates +- app sees updated methods and types + +### Change: server internals only, no contract change + +- server reloads +- OpenAPI may regenerate +- SDK may regenerate to identical output +- app usually does not need any meaningful change + +### Change: generated SDK mapping or SSE helper + +- SDK package changes +- app recompiles +- server does not need to restart unless server code also changed + +### Change: app feature code only + +- app recompiles +- server and SDK do not need to restart + +## SSE in Local Dev + +There will likely be only one or two SSE endpoints. + +Recommended approach: + +- document the SSE endpoints in the new server contract +- keep event payloads typed from generated or shared contract types +- expose small handwritten SSE helpers from `packages/openwork-server-sdk` +- let the app consume those helpers through the same `createSdk({ serverId })` entrypoint + +That means SSE changes still fit the same watch graph: + +- server-side event contract change -> spec generation -> SDK or helper update -> app sees new types +- helper implementation change -> app recompiles immediately + +## Avoiding Restart Loops + +The main risk in this setup is watchers causing each other to loop. + +We should prevent that by keeping responsibilities clean: + +- server watcher ignores generated spec and SDK files +- OpenAPI watcher only watches new-server source +- SDK watcher only watches the spec file +- app watcher only consumes the SDK package output, not the server source tree directly + +If needed, generation steps should write files only when contents actually change. + +## CI Mirror of the Dev Flow + +Local dev should be convenient, but CI should still enforce correctness. + +CI should run the same core graph without watch mode: + +```text +generate openapi spec +-> generate sdk +-> fail if git diff is non-empty +``` + +That ensures local convenience never replaces contract discipline. + +## Suggested Scripts + +Exact tooling is still open, but the shape should look like this: + +```text +apps/server-v2 +- dev # backend watch mode +- openapi:generate # one-shot spec generation +- openapi:watch # watch new-server sources and regenerate spec + +packages/openwork-server-sdk +- generate # one-shot SDK generation +- watch # watch spec and regenerate sdk + +repo root +- dev:server-v2 # run server watch + openapi watch + sdk watch + app dev together +- dev:server-v2:server # run only the backend/spec/sdk watch graph when the app is not needed +``` + +Current implementation note: + +- `pnpm dev:server-v2` is the default composite command and includes the app dev server. +- `pnpm dev:server-v2:server` exists for backend-only work. +- The SDK watcher watches the OpenAPI directory entry instead of a single file handle so spec rewrites do not silently stop regeneration after replace-style writes. +- OpenAPI generation runs against an isolated temporary Server V2 working directory so contract generation does not touch or depend on a developer's live imported workspace state. + +## Developer Experience Target + +From a developer's point of view, the happy path should be: + +1. run one dev command +2. edit new-server routes, schemas, or app code freely +3. let watchers keep server runtime, spec, SDK, and app types synchronized +4. avoid manual kill/restart/build loops except when tooling itself changes + +That is the standard we should design toward. diff --git a/prds/server-v2-plan/orchestrator-audit.md b/prds/server-v2-plan/orchestrator-audit.md new file mode 100644 index 00000000..a352d2f5 --- /dev/null +++ b/prds/server-v2-plan/orchestrator-audit.md @@ -0,0 +1,515 @@ +# Orchestrator Audit + +## Scope + +This audit covers `apps/orchestrator/**`. + +The goal is to explain what the orchestrator is, what its major functions do, where they are called from, what they ultimately affect, and which responsibilities should ideally move into the main server over time. + +## What The Orchestrator Is + +The orchestrator is the host-side runtime manager for OpenWork. + +In plain English, it is the thing that turns: + +- a workspace +- some binaries +- some ports and tokens +- an execution mode + +into a running OpenWork worker stack. + +Today it is responsible for things like: + +- starting and supervising `opencode` +- starting and supervising `openwork-server` +- optionally starting `opencode-router` +- exposing a daemon API for desktop workspace activation and disposal +- managing detached and sandboxed runtime flows +- handling host-side sidecar/binary resolution and upgrade control + +It is currently both: + +- a bootstrap shell for host/runtime startup +- a mini runtime control plane + +That split is important, because the control-plane responsibilities should move first, and the bootstrap shell responsibilities should also be reevaluated for collapse into the main server rather than preserved automatically. + +## Disposition Labels + +- `Stay`: should remain an orchestrator/host-shell concern. +- `Move`: should ideally move into the main server over time. +- `Split`: some boundary or trigger may stay in the orchestrator, but the core capability should move into the main server. + +## High-Level Lifecycle + +1. CLI or desktop launches the orchestrator. +2. The orchestrator resolves binaries, sidecars, ports, tokens, and state dirs. +3. It starts and supervises OpenCode, OpenWork server, and optional router. +4. In daemon mode, it persists workspace/runtime state and exposes a local daemon API. +5. In detached or sandboxed flows, it creates and manages those host/container runtimes. +6. It also exposes convenience CLI wrappers over some server APIs. + +## Single-Workspace Host Startup And Shutdown + +Disposition guidance: + +- `main()` -> `Split` +- `runStart()` -> `Split` +- `shutdown()` inside `runStart()` -> `Split` + +Reasoning: this is current-state bootstrap logic, but the large amount of runtime capability assembled by `runStart()` should ultimately be server-owned and folded into the main server. + +### `main()` + +- What it does: dispatches the top-level orchestrator CLI commands. +- Called from and when: process entrypoint for the `openwork` CLI. +- Ends up calling: `runStart()`, daemon commands, approvals/files/status helpers, and other CLI subcommands. + +### `runStart()` + +- What it does: main coordinator for host mode; resolves workspace, ports, tokens, binaries, sandbox mode, starts services, waits for health, and prints connect info. +- Called from and when: called by `main()` for `openwork start` and `openwork serve`; also used indirectly when desktop detached mode shells out to `openwork start --detach`. +- Ends up calling: OpenCode startup, OpenWork server startup, optional OpenCodeRouter startup, sandbox/container logic, health loops, owner token issuance, and runtime control server setup. + +### `shutdown()` inside `runStart()` + +- What it does: gracefully stops children, control server, timers, and optional sandbox container. +- Called from and when: called on SIGINT/SIGTERM, fatal child exits, failed startup, and check completion. +- Ends up calling: full host-stack shutdown. + +## Runtime Process Supervision And Upgrade Control + +Disposition guidance: + +- `startOpencode()` -> `Split` +- `startOpenworkServer()` -> `Split` +- `startOpenCodeRouter()` -> `Split` +- child exit/spawn error handlers -> `Split` +- runtime control server in `runStart()` -> `Move` +- `performRuntimeUpgrade()` -> `Split` + +Reasoning: native child supervision exists today in the orchestrator, but even that should be treated as transition-state behavior if the main server is going to absorb bootstrap and runtime supervision over time. + +### `startOpencode()` + +- What it does: spawns OpenCode with the chosen env, auth, reload, and logging settings. +- Called from and when: called from `runStart()` and daemon `ensureOpencode()`. +- Ends up calling: `opencode serve` and the OpenCode runtime itself. + +### `startOpenworkServer()` + +- What it does: launches `openwork-server` with the bootstrap information needed for host mode today. +- Called from and when: called from `runStart()` and restart paths. +- Ends up calling: the main OpenWork API/control layer process; over time this bootstrap contract should shrink toward the minimum needed to bring the server up. + +### `startOpenCodeRouter()` + +- What it does: launches `opencode-router` when messaging/router support is enabled. +- Called from and when: called from `runStart()` and restart paths. +- Ends up calling: the router sidecar process; launch may remain host-owned, but router product control should move into the server. + +### child exit and spawn error handlers + +- What they do: turn child process failures into orchestrator shutdown or degraded-mode handling. +- Called from and when: called during host mode whenever a managed child exits or fails to spawn. +- Ends up calling: shutdown or failure-state logic for the whole host stack. + +### runtime control server in `runStart()` + +- What it does: exposes host-local legacy `/runtime/versions` and `/runtime/upgrade` endpoints; in the Server V2 target these capabilities should land under `/system/runtime/*` on the main server. +- Called from and when: started during host startup. +- Ends up calling: runtime inspection and rolling restart/upgrade behavior. + +### `performRuntimeUpgrade()` + +- What it does: re-resolves binaries, optionally installs packages, and restarts services. +- Called from and when: called through the runtime control server when a client requests an upgrade. +- Ends up calling: rolling restart of OpenCode, OpenWork server, and OpenCodeRouter. + +## Sidecar And Binary Resolution + +Disposition guidance: + +- all major functions in this section -> `Split` + +Reasoning: in the current orchestrator these are host/bootstrap concerns, but the Server V2 target changes the default distribution model. When the canonical runtime is `openwork-server-v2` bundling and extracting its own sidecars, the equivalent resolution logic should move into the server for that path. A thinner host shell may still need fallback or external-runtime resolution in some modes, so this boundary is better treated as `Split` than `Stay`. + +### `resolveOpenworkServerBin()` + +- What it does: decides which `openwork-server` binary to run. +- Called from and when: called during startup and upgrade. +- Ends up calling: local sidecar selection or downloaded/external binary resolution. + +### `resolveOpencodeBin()` + +- What it does: decides which OpenCode binary to run. +- Called from and when: called during startup, daemon mode, and upgrade. +- Ends up calling: local sidecar or installed OpenCode resolution. + +### `resolveOpenCodeRouterBin()` + +- What it does: decides which OpenCodeRouter binary to run. +- Called from and when: called when router support is enabled. +- Ends up calling: router binary resolution. + +### `resolveSidecarConfig()` + +- What it does: computes sidecar directories, manifests, target triple, and download URLs. +- Called from and when: called by startup and daemon flows. +- Ends up calling: all sidecar download logic. + +### `downloadSidecarBinary()` + +- What it does: downloads and verifies sidecar binaries. +- Called from and when: called when sidecar source is downloaded or auto-resolved that way. +- Ends up calling: local sidecar cache population. + +### version verification helpers + +- What they do: verify that the runtime is actually using the intended binary/version/config. +- Called from and when: called after service health checks and during upgrade. +- Ends up calling: startup failure or diagnostic reporting if versions do not match expectations. + +## Desktop Daemon And Workspace Activation + +Disposition guidance: + +- `runDaemonCommand()` -> `Split` +- `runRouterDaemon()` -> `Split` +- `ensureRouterDaemon()` -> `Split` +- `ensureOpencode()` inside daemon mode -> `Split` +- daemon HTTP API -> `Move` +- `runWorkspaceCommand()` -> `Move` +- `runInstanceCommand()` -> `Move` + +Reasoning: a host-local daemon boundary may still exist, but workspace activation and instance disposal are product/runtime control capabilities that should move into the main server. + +### `runDaemonCommand()` + +- What it does: handles daemon subcommands like start, status, stop, and run. +- Called from and when: called by the CLI daemon entry. +- Ends up calling: daemon startup or daemon status/control requests. + +### `runRouterDaemon()` + +- What it does: runs the long-lived desktop daemon that keeps OpenCode alive, persists workspace state, and serves the daemon HTTP API. +- Called from and when: called by `openwork daemon run`; desktop also spawns it for desktop host mode. +- Ends up calling: daemon HTTP service, workspace registry persistence, and OpenCode lifecycle management; this should be treated as transitional control-plane behavior rather than an enduring ownership boundary. + +### `ensureRouterDaemon()` + +- What it does: auto-starts the daemon if it is not already running and waits for health. +- Called from and when: called by CLI workspace/instance helpers. +- Ends up calling: daemon process startup. + +### `ensureOpencode()` in daemon mode + +- What it does: reuses or starts the single OpenCode runtime used by the daemon. +- Called from and when: called on daemon boot and when workspace operations need OpenCode. +- Ends up calling: daemon-managed OpenCode lifecycle. + +### daemon HTTP API + +- What it does: exposes local endpoints for health, workspace listing, workspace activation, workspace path lookup, instance disposal, and shutdown. +- Called from and when: used by desktop and orchestrator CLI helpers. +- Ends up calling: daemon state mutation and workspace/runtime activation behavior. + +### `runWorkspaceCommand()` + +- What it does: thin CLI wrapper for workspace add/list/switch/info/path commands. +- Called from and when: called by `openwork workspace ...`. +- Ends up calling: daemon HTTP API. + +### `runInstanceCommand()` + +- What it does: thin CLI wrapper for instance disposal commands. +- Called from and when: called by `openwork instance dispose ...`. +- Ends up calling: daemon HTTP API. + +## Auth, Tokens, And Local State Persistence + +Disposition guidance: + +- managed OpenCode credential resolution -> `Split` +- owner token issuance -> `Move` +- OpenCode state layout resolution/creation -> `Stay` +- router/orchestrator state persistence -> `Split` + +Reasoning: local secret and filesystem layout for host relaunches is still host-owned, but token semantics and control-plane identity should move closer to the main server. + +### managed OpenCode credential resolution + +- What it does: generates or reads OpenCode basic-auth credentials used by the orchestrator-managed runtime. +- Called from and when: called during host startup and daemon startup. +- Ends up calling: OpenCode auth configuration used to protect direct OpenCode access. + +### `issueOpenworkOwnerToken()` + +- What it does: asks the server to mint an owner token from a host token. +- Called from and when: called after server health checks in host and sandbox mode. +- Ends up calling: the server `/tokens` API and elevated host-control access. + +### OpenCode state layout helpers + +- What they do: choose and create OpenCode config/data/cache locations, especially in dev and orchestrated modes. +- Called from and when: called before OpenCode launch in host and daemon modes. +- Ends up calling: local directory creation and optional auth/config import. + +### orchestrator state persistence helpers + +- What they do: read and write `openwork-orchestrator-state.json` and related state snapshots. +- Called from and when: called throughout daemon lifecycle and desktop reconnect flows. +- Ends up calling: daemon/workspace/binary state persistence on disk. + +## Docker And Apple Container Sandbox Flows + +Disposition guidance: + +- all major sandbox/container functions in this section -> `Split` + +Reasoning: the container substrate itself is still host-shell territory, but the runtime graph and product behavior being started inside that substrate should converge on the same Server V2 ownership model. In practice that means some container launch hooks may stay outside the server, while runtime boot config, child supervision policy, and product-facing control semantics should collapse inward. + +### `resolveSandboxMode()` + +- What it does: decides whether the runtime should be host, Docker, Apple container, or auto-selected. +- Called from and when: called during `runStart()`. +- Ends up calling: the runtime-mode selection path. + +### `resolveSandboxExtraMounts()` + +- What it does: validates extra host mounts for sandbox mode. +- Called from and when: called when sandbox mounts are requested. +- Ends up calling: host-filesystem exposure policy into the sandbox. + +### `stageSandboxRuntime()` + +- What it does: stages sidecars into a persist area for sandbox execution. +- Called from and when: called before Docker or Apple sandbox launch. +- Ends up calling: local staged runtime payload creation. + +### `writeSandboxEntrypoint()` + +- What it does: writes the in-container boot script that starts OpenCode, optional router, then OpenWork server. +- Called from and when: called before container launch. +- Ends up calling: the actual sandbox boot sequence. + +### `startDockerSandbox()` + +- What it does: starts the whole host stack inside Docker. +- Called from and when: called from `runStart()` when Docker sandboxing is selected. +- Ends up calling: `docker run` with mounts, env, and ports. + +### `startAppleContainerSandbox()` + +- What it does: starts the same stack inside the Apple container backend. +- Called from and when: called from `runStart()` on supported macOS setups. +- Ends up calling: Apple container runtime launch. + +## HTTP Surfaces And Control-Plane Bridging + +Disposition guidance: + +- daemon HTTP API -> `Move` +- runtime control API -> `Move` +- generic router request client -> `Move` + +Reasoning: these are useful product capabilities, but they should ideally be owned by `apps/server`, not by a separate orchestrator control API forever. + +### daemon HTTP API + +- What it does: local control API for health, workspaces, activation, path lookup, disposal, and shutdown. +- Called from and when: used by desktop and CLI. +- Ends up calling: daemon state and workspace activation behavior. + +### runtime control API + +- What it does: local legacy `/runtime/versions` and `/runtime/upgrade` surface; the target normalized namespace is `/system/runtime/*` once this moves into the main server. +- Called from and when: started in host mode and later accessed through server proxy routes or clients. +- Ends up calling: runtime inspection and controlled restart/upgrade behavior. + +### `requestRouter()` + +- What it does: thin generic CLI client for the daemon API. +- Called from and when: used by workspace and instance commands. +- Ends up calling: daemon HTTP API requests. + +## Health, Diagnostics, Checks, And Detached Mode + +Disposition guidance: + +- local health polling helpers -> `Split` +- `runChecks()` / `runSandboxChecks()` -> `Split` +- `runStatus()` -> `Move` +- `handleDetach()` -> `Stay` + +Reasoning: host-local polling and detach UX are shell concerns, but canonical health/readiness/status surfaces should move into the main server. + +### health polling helpers + +- What they do: wait for OpenCode, server, and router health. +- Called from and when: called throughout startup, daemon, and check flows. +- Ends up calling: localhost health probes and startup gating. + +### `runChecks()` / `runSandboxChecks()` + +- What they do: run host-mode smoke tests for runtime correctness, sessions, router auth, workspaces, and optional event streams. +- Called from and when: called by `openwork start --check` and related flags. +- Ends up calling: broad end-to-end runtime verification. + +### `runStatus()` + +- What it does: reports current OpenWork/OpenCode URLs and related status in human-readable form. +- Called from and when: called by `openwork status`. +- Ends up calling: health endpoints and status rendering. + +### `handleDetach()` + +- What it does: detaches the orchestrator shell from the running child stack and leaves the runtime alive. +- Called from and when: called when detached mode is requested. +- Ends up calling: stdio unref/cleanup and detached runtime summary output. + +## Messaging Enablement And Managed Tool Injection + +Disposition guidance: + +- router enablement decision -> `Split` +- managed OpenCode tool injection -> `Move` +- generated router tool sources -> `Move` + +Reasoning: startup hooks may stay host-owned, but mutating OpenCode tool/config surfaces is the kind of capability that should belong to the main server/runtime layer. + +### router enablement decision helpers + +- What they do: determine whether OpenCodeRouter should run from flags, env, workspace config, or inferred defaults. +- Called from and when: called during host startup. +- Ends up calling: router startup decisions and sometimes persisted messaging defaults. + +### `ensureOpencodeManagedTools()` + +- What it does: writes managed router send/status tools into OpenCode config directories. +- Called from and when: called before OpenCode startup in host and daemon mode. +- Ends up calling: OpenCode tool-surface mutation. + +### generated router tool sources + +- What they do: generate tool implementations that send through or inspect OpenCodeRouter. +- Called from and when: called by the managed-tool injection path. +- Ends up calling: the OpenCodeRouter health/config/send surface. + +## Thin CLI Wrappers Over Server APIs + +Disposition guidance: + +- file CLI wrappers -> `Move` +- approvals CLI wrappers -> `Move` +- simple status wrapper -> `Move` + +Reasoning: these are not orchestrator responsibilities. They are convenience clients over `openwork-server` APIs. + +### `runFiles()` + +- What it does: exposes CLI wrappers for file session creation, read/write, catalog, events, mkdir, delete, and rename. +- Called from and when: called by `openwork files ...`. +- Ends up calling: OpenWork server file-session APIs. + +### `runApprovals()` + +- What it does: exposes CLI wrappers for approvals listing and replies. +- Called from and when: called by `openwork approvals ...`. +- Ends up calling: OpenWork server approvals APIs. + +### `runStatus()` + +- What it does: provides a convenience status CLI. +- Called from and when: called by `openwork status`. +- Ends up calling: health/status APIs. + +## Summary: What Should Stay Vs Move + +### Should stay in the orchestrator only temporarily + +- native process supervision +- sidecar and binary resolution +- host-local port/bootstrap/env setup +- Docker and Apple container lifecycle +- detach/TUI/log-multiplexing shell behavior + +### Should move into the main server + +- workspace/runtime control APIs as product capabilities +- daemon HTTP control surfaces +- file and approvals CLI wrappers over server APIs +- managed OpenCode tool/config mutation +- status/health/product control semantics that should be canonical server surfaces + +### Should be split during migration + +- host launch triggers can stay in the orchestrator while runtime capability ownership moves into the server +- local auth/state persistence for relaunch can stay host-owned while token semantics move into the server +- health polling can stay local while official health/control surfaces move into the server + +## Bottom Line + +The orchestrator is currently doing two jobs: + +1. host bootstrap shell +2. mini runtime control plane + +The mini runtime control plane should be progressively absorbed into the main server first. + +After that, the remaining bootstrap shell should also be questioned and collapsed into the main server wherever practical. + +## Can Orchestrator Disappear? + +Mostly yes, if by "orchestrator" we mean a separate control-plane product layer. + +### Full-collapse idea + +The strongest form of the target architecture is: + +```text +desktop app or CLI +-> one OpenWork server process +-> server owns workspace/runtime/product behavior +-> server supervises the local runtime pieces it needs +``` + +In that model, clients do not need to understand or talk to a separate orchestrator API surface. + +### What should ideally be folded into the server too + +Even the bootstrap shell responsibilities should be treated as collapse candidates: + +- launching local child processes +- resolving sidecars and binaries +- choosing ports and env vars +- Docker and Apple container startup +- detach and local process supervision + +The strongest target is: + +- no separate orchestrator control plane +- no enduring separate orchestrator bootstrap layer +- one main server as the canonical runtime and product API + +### Recommended target architecture + +```text +desktop app +-> launches or connects to OpenWork server +-> calls one main server API surface + +OpenWork server +-> starts and supervises local OpenCode/router/container/runtime pieces as needed +``` + +### Migration implication + +As Server V2 grows, the priority should be: + +1. move orchestrator-owned workspace/runtime APIs into the server +2. move orchestrator-owned config/tool/control semantics into the server +3. move bootstrap and supervision responsibilities into the server as the final collapse step + +That is the path to "doing away with the orchestrator" in practice. diff --git a/prds/server-v2-plan/plan.md b/prds/server-v2-plan/plan.md new file mode 100644 index 00000000..5ef4ce76 --- /dev/null +++ b/prds/server-v2-plan/plan.md @@ -0,0 +1,543 @@ +# PRD: Server V2 New Server Plan + +## Status: Draft +## Date: 2026-04-09 + +## Problem + +The current server architecture is not the one we want to keep. + +We want to build a whole new server as its own server package and process, make that server the real owner of product/runtime/workspace behavior, and then switch the desktop app to start and consume that new server directly. + +For planning purposes in this doc set, `openwork-server-v2` is the working name for that new package, binary, and runtime bundle. We can rename it later without changing the architecture direction. + +## Goals + +- Build a new server implementation in new files without extending the lifetime of the legacy architecture. +- Build the new server as a separate server, not as a mounted sub-application inside the old server. +- Keep full TypeScript type safety across server routes, generated clients, and the app-side SDK adapter. +- Make the desktop app a thin client that starts the server, maintains local UI state, and sends all workspace behavior through the server. +- Give the desktop app a clean migration layer so UI features can move to the new server bit by bit. +- End with the desktop app starting only the new server, with the old server removed. + +## Non-Goals + +- Doing a single big-bang rewrite. +- Repointing all desktop traffic in one release. +- Keeping both architectures around indefinitely. +- Rewriting storage or domain behavior unless it is required for the new server path. +- Preserving Tauri-only or app-only workspace capabilities as a permanent parallel system. + +## Working Approach + +### 1. Build a whole new server package + +The new server should exist as its own package and process. + +Example shape: + +```text +apps/server-v2/ +├── src/ +│ └── cli.ts +├── openapi/ +└── package.json +``` + +This gives us: + +- a clean architecture from day one +- no need to preserve old server structure while designing the new one +- a direct path to making the desktop app launch the new server when ready +- a clear ownership boundary for new work + +### 2. Put all new server work in the new server package + +Create a clearly isolated package for the replacement server so the migration is obvious and deletion is easy later. + +Proposed shape: + +```text +apps/server-v2/ +├── src/ +│ ├── app.ts +│ ├── cli.ts +│ ├── routes/ +│ ├── middleware/ +│ ├── services/ +│ ├── schemas/ +│ └── adapters/ +└── openapi/ +``` + +Rule: new server functionality goes into the new server package, not into legacy server files. + +### 3. Migrate the desktop app through an explicit API layer + +The desktop app should not scatter raw server paths throughout the UI. More importantly, it should stop owning workspace behavior directly. + +The target model is: + +- the desktop app spins up or connects to servers +- the desktop app maintains local UI state and a list of connected servers +- the desktop app maintains a list of workspaces that belong to those servers +- all real workspace operations go through the server + +That means the desktop app should not be the long-term owner of: + +- file reads +- file writes +- workspace mutation +- AI/session/task operations +- project/runtime inspection +- skill/plugin/config mutation +- other workspace-scoped business logic + +Those should become server responsibilities, even in desktop-hosted mode. + +To move incrementally, the app needs a small client-side API layer that can move features onto the new server without changing the rest of the UI shape all at once. + +That layer should: + +- centralize server route construction +- expose named operations instead of raw URL strings +- allow per-feature or per-endpoint migration +- make fallback possible while the new server is incomplete + +Example migration shape: + +```text +desktop feature +-> app server client module +-> legacy path or v2 path +``` + +This lets the backend and frontend migrate independently but in a coordinated way. + +## Ownership Boundary + +The long-term ownership boundary should be explicit. + +### Desktop app responsibilities + +- launch or connect to one or more servers +- maintain local UI state +- maintain presentation state, navigation state, drafts, and preferences +- cache and render the visible list of servers and workspaces returned by the server +- render server-backed data and send user intent to the server + +### Server responsibilities + +- own all workspace-scoped behavior +- own all file reads and writes +- own all AI, session, and task execution behavior +- own project discovery and runtime inspection +- own skill, plugin, MCP, and config mutation +- own local-runtime integration with OpenCode and related sidecars +- expose all of that through a stable API surface for the app + +### Rule of thumb + +If something is a real workspace capability rather than transient UI state, it should live behind the server. + +The app is the interface. The server does the work. + +## Orchestrator Collapse Target + +The target architecture is not just "move app behavior behind the server". + +It is also: + +- stop treating the orchestrator as a separate long-term control plane +- fold orchestrator-owned product/runtime capabilities into the main server +- fold bootstrap and supervision responsibilities into the main server itself wherever possible + +Desired end state: + +```text +desktop app or CLI +-> starts or connects to one OpenWork server process +-> OpenWork server owns workspace/runtime/product behavior +-> OpenWork server supervises the local runtime pieces it needs +``` + +Not the desired end state: + +```text +desktop app +-> orchestrator control plane +-> separate server control plane +``` + +What should move into the main server: + +- workspace activation and runtime control APIs +- runtime status and health product surfaces +- upgrade/control semantics exposed to clients +- config/skill/plugin/MCP mutation flows +- OpenCode integration behavior that is really a workspace capability +- other orchestrator control-plane logic that clients should not need to understand separately +- process supervision for OpenCode/router/runtime pieces where practical +- sidecar/binary/runtime resolution where practical +- local bootstrap logic that only exists to support the OpenWork runtime + +The desktop app should ideally only launch the main server process, not assemble and supervise a second runtime graph itself. + +## Route Strategy + +The new server should expose OpenWork-shaped routes directly. + +Recommendation: + +- use workspace-first OpenWork routes as the real public API shape +- use `/system/*` for server-level operational and runtime endpoints that are not scoped to a workspace +- do not design the route system around mounting under a legacy subpath +- treat versioning as a deployment or compatibility concern, not as the primary organizing principle of the new server + +## Contract and SDK Strategy + +The new server should be the source of truth for its API contract. + +Detailed generator and script choices live in `prds/server-v2-plan/sdk-generation.md`. + +Planned approach: + +- define new-server routes in TypeScript with Hono and typed schemas +- generate an OpenAPI spec from the Hono app, likely with `hono-openapi` +- generate a TypeScript SDK from that OpenAPI spec +- consume that SDK from a small app-side `createSdk({ serverId })` adapter instead of calling raw paths directly + +This keeps the server contract synchronized through code generation instead of manual duplication. + +### Recommended package shape + +```text +apps/server-v2/ +├── src/... +└── openapi/ + └── openapi.json # generated + +packages/openwork-server-sdk/ +├── generated/ # generated from OpenAPI +├── src/index.ts # stable server-agnostic exports +└── package.json + +apps/app/ +└── ... app-side `createSdk({ serverId })` adapter +``` + +### Rules + +- The Hono route definitions and schemas are the source of truth. +- The OpenAPI spec is a generated artifact. +- `hono-openapi` is the leading candidate for spec generation because it is built for Hono and aligns with the V2 stack. +- The SDK is generated from the spec and stays TypeScript-native. +- The generated SDK package should stay server-agnostic and reusable. +- App features should call a single app-side entrypoint such as `createSdk({ serverId })`. +- `createSdk({ serverId })` should live in app code, resolve server config locally, and prepare a typed client with the correct base URL and token. +- The app should not pass raw `baseUrl` and `token` around feature code. +- The app should not implement parallel workspace behavior when that behavior can be expressed as a server capability. +- For standard JSON endpoints, the generated SDK should be the primary client surface. +- For the one or two SSE endpoints, we may need small handwritten streaming helpers exposed from the same SDK package. +- `hono-openapi` covers contract generation, not the full client story; SDK generation and SSE helpers remain separate concerns. + +### Why not import server code directly? + +We want shared contracts, not shared runtime implementation. + +- clients should share types and operations with the server +- clients should not import server internals, Hono handlers, or server runtime wiring +- the server must remain free to evolve internally without leaking implementation structure into the app + +### App-facing SDK shape + +Preferred app usage: + +```ts +await createSdk({ serverId }).sessions.listMessages({ workspaceId, sessionId }) +``` + +This gives us: + +- generated endpoint methods and types +- explicit server selection through `serverId` +- explicit resource selection through `workspaceId`, `sessionId`, and similar params +- no need for a large handwritten fluent wrapper layer +- no coupling between app code and server source files + +## Local Dev Contract Workflow + +The generated SDK should work in local development, not only in CI. + +Detailed watch-mode workflow lives in `prds/server-v2-plan/local-dev.md`. + +Desired loop: + +1. change a new-server Hono endpoint or schema +2. regenerate the OpenAPI spec locally +3. regenerate the TypeScript SDK locally +4. app code sees the updated types and client methods immediately + +Recommended local setup: + +- `apps/server-v2` watches `src/**` and regenerates `openapi/openapi.json` +- `packages/openwork-server-sdk` watches `openapi/openapi.json` and regenerates the reusable generated client package +- `packages/openwork-server-sdk` regenerates the reusable server-agnostic client package +- the app watches its own `createSdk({ serverId })` adapter alongside normal app code +- the app depends on `openwork-server-sdk` through the workspace so type updates are visible immediately +- if the SDK needs a build step, run that build in watch mode too + +To avoid restart loops, the server runtime watcher should ignore generated spec and SDK files. + +This should make endpoint changes flow into the app with minimal delay during development. + +### CI enforcement + +Local watch mode is a convenience. CI should still be the guardrail. + +CI should: + +- regenerate the OpenAPI spec +- regenerate the SDK +- fail if regeneration produces a git diff + +That makes contract drift visible immediately and keeps the generated client trustworthy. + +## Migration Strategy + +Detailed UI and desktop rollout strategy lives in `prds/server-v2-plan/ui-migration.md`. + +### Phase 0: Create the new server package and contract loop + +- Create the new server package under `apps/server-v2/`. +- Add a minimal Hono app entrypoint. +- Add a minimal health or test route to prove the server boots and serves requests. +- Add OpenAPI generation for the new server, likely via `hono-openapi`. +- Add a generated TypeScript SDK package for the new server. +- Add an app-side `createSdk({ serverId })` adapter before migrating individual features. +- Document which desktop-owned capabilities must move behind the server over time. +- Define the first-run import path for existing app/orchestrator state that should move into the new server DB. + +Success criteria: + +- The new server boots independently. +- OpenAPI generation and SDK generation succeed locally. +- The app can target the new server through one adapter layer. + +### Phase 0.5: Absorb existing local product state into the server DB + +Before feature slices can fully move, the new server needs a clear story for taking over the durable state the app and orchestrator own today. + +- import or normalize workspace records from current desktop state such as `openwork-workspaces.json` +- import remote workspace mappings and selected connection metadata into the server registry +- import or reconstruct cloud auth/session metadata into the server-owned sqlite model +- import or normalize orchestrator state snapshots that still matter for reconnect or migration +- make the migration idempotent so the server can retry safely on startup + +Success criteria: + +- the server can reconstruct its canonical sqlite state from current local product state without manual hand edits +- post-migration app startup reads server-owned workspace and connection state instead of rebuilding it locally +- migration failures are visible and recoverable instead of silently leaving split ownership behind + +### Phase 1: Move low-risk read endpoints first + +Start with read-only or low-risk endpoints so the migration path is proven before touching write flows. + +- Implement new endpoints in Hono. +- Point a small, isolated desktop surface at the new server. +- Compare behavior against the existing implementation. + +Success criteria: + +- The desktop app can use at least one new-server endpoint in production-like flows. +- The app-side adapter can route that surface to the new server cleanly. + +### Phase 2: Move mutations and workflow endpoints + +Once the structure is stable, move write paths and workflow endpoints into the new server in slices. + +- Port one capability area at a time. +- Keep domain behavior consistent while the transport layer changes. +- Avoid broad dual-write logic unless absolutely necessary. + +Success criteria: + +- End-to-end feature flows work through the new server for selected areas. +- The new server becomes credible as the real future server. + +### Phase 3: Collapse orchestrator control-plane responsibilities into the server + +Once the server surface is credible, start moving orchestrator-owned product capabilities into the main server. + +- move workspace/runtime control APIs into the server +- move orchestrator daemon API semantics into server-owned routes +- move config/skill/plugin/MCP mutation ownership into the server +- move bootstrap and supervision logic into the server so clients do not depend on a separate host runtime manager + +Success criteria: + +- clients do not need a separate orchestrator API model +- server routes become the canonical runtime/workspace control surface +- orchestrator disappears as a meaningful product layer + +### Phase 4: Make the new server the default desktop runtime + +- Switch desktop startup to launch the new server. +- Switch desktop API clients to use the new server by default. +- Monitor for gaps in auth, payload shape, and error handling. + +Success criteria: + +- New desktop traffic uses the new server by default. +- The old server is no longer on the critical path for normal desktop usage. + +### Phase 5: Remove the old server and leftover orchestrator code + +- Delete the old server implementation once all consumers are moved. +- Promote the new server package to be the only server implementation that matters. +- delete or absorb orchestrator code that only existed to provide a separate control plane or bootstrap layer + +Success criteria: + +- No active desktop path depends on the old server. +- All server behavior lives in the new server package. +- orchestrator is no longer needed as a separate product/runtime layer. + +## Desktop App Requirements + +To migrate safely, the desktop app should introduce a server-facing boundary before moving features. + +The desired end state is not just route migration. It is responsibility migration. + +The desktop app should become a thin client. + +Requirements: + +- one module owns server resolution from `serverId` +- features call typed operations, not literal URL paths +- route selection can happen per endpoint or per feature area while migration lasts +- the target server is selected explicitly by `serverId`, not hidden global state +- it is easy to see which calls have been moved to the new server +- the app only owns transient UI state, not durable workspace behavior +- the app can list known servers and the workspaces available within each server +- workspace reads, writes, AI actions, and config mutations should route through the server + +Nice-to-have follow-ups: + +- a feature flag or config switch for targeted rollout +- a capability probe so the app can detect new-server support from the server +- simple request logging that shows whether traffic used the current or new server during migration + +### Client SDK model + +The app may talk to multiple server destinations, but the preferred API is still one SDK entrypoint. + +Examples: + +- local desktop-hosted server +- remote worker-backed server +- hosted OpenWork Cloud server + +Because of that, SDK creation may still take an explicit `serverId` during migration and server-management flows. + +The key separation is: + +- the SDK resolves which server to call from `serverId` when needed +- each operation receives the workspace ID to use on that server + +That matters because one server can host many workspaces, and the system can know about many servers at once. + +Example shape: + +```ts +const sdk = createSdk({ serverId }) + +await sdk.sessions.list({ workspaceId }) +await sdk.sessions.get({ workspaceId, sessionId }) +await sdk.sessions.listMessages({ workspaceId, sessionId }) +``` + +Illustrative app-side record while migration is in progress: + +```ts +type WorkspaceRecord = { + id: string + serverTargetId: string +} +``` + +In the ideal model, the local OpenWork server owns the durable mapping between: + +- OpenWork workspace ID +- remote OpenWork workspace ID +- OpenCode project ID +- backend server identity + +The app should usually operate on stable OpenWork workspace IDs returned by the local server, not on remote backend IDs directly. + +The generated SDK should stay transport-level and typed. The thin handwritten adapter should own: + +- server target selection +- auth headers and tokens +- temporary old-server versus new-server decision-making during migration +- lightweight client preparation +- capability checks and fallbacks + +It should not grow into a second workspace engine inside the app. + +In the ideal steady state, normal product traffic should target the local OpenWork server as the canonical adapter and registry, while direct alternate `serverId` targeting is reserved for explicit server-management or migration/testing scenarios. + +### SSE endpoint strategy + +Most new-server endpoints should be standard request/response endpoints covered directly by the generated SDK. + +For the likely one or two SSE endpoints: + +- the OpenWork server should still be the only streaming surface the app talks to +- the SSE routes should still be documented in the new server contract +- event payloads should still be typed from generated or shared contract types, not imported directly from server source +- we may need a small handwritten streaming helper because most OpenAPI generators do not produce an ergonomic typed SSE client automatically + +Goal: + +- normal endpoints: fully generated TypeScript SDK methods +- SSE endpoints: small typed streaming helpers exposed from the same package so app usage still feels unified + +## Architectural Principles + +- **New code in new files**: treat the new server package as the replacement tree, not as an extension of legacy code. +- **New server first**: design the replacement as its own server, not as a mounted extension of the old one. +- **One slice at a time**: move vertical feature slices instead of mixing many partial migrations. +- **Explicit routing**: desktop traffic should move to the new server intentionally, not accidentally. +- **Server-owned workspace behavior**: file access, AI/runtime behavior, project/config mutation, and other workspace capabilities belong to the server, not the UI. +- **Thin desktop app**: the app should mainly launch/connect servers, hold local presentation state, and render server-backed workflows. +- **Delete as you go**: once a feature is fully on the new server, remove the corresponding legacy code instead of letting both versions linger. + +## Risks + +- The desktop app may have too many direct server path references, making migration noisy until a client boundary exists. +- The desktop app currently owns native and local behavior that should eventually move behind the server boundary. +- Shared auth/session/runtime behavior may be entangled with the old server boot path. +- Orchestrator responsibilities may be tightly coupled to host bootstrapping, making it harder to separate true bootstrap concerns from product control-plane concerns. +- Old-server and new-server payloads may drift if both are maintained for too long. + +## Open Questions + +- Which server surface is the best first slice to migrate as a proof point? +- Are there any external consumers besides the desktop app that must keep using the old server during the transition? +- At what point should desktop startup switch to the new server by default? +- What bootstrap responsibilities truly must remain outside the server process, if any, once orchestration is folded inward? + +## Immediate Next Steps + +1. Create `apps/server-v2/` as the new server package. +2. Add OpenAPI generation for the new Hono app. +3. Create a TypeScript SDK package generated from the new server OpenAPI spec. +4. Define the new server startup path the desktop app will eventually launch. +5. Add `createSdk({ serverId })` so the app resolves server config without passing raw URLs and tokens around. +6. Define the one or two SSE endpoints and their typed event payloads. +7. Inventory desktop-owned workspace capabilities and prioritize which ones move behind the server first. +8. Identify the first orchestrator-owned control-plane capability to fold into the main server. +9. Identify the first low-risk endpoint group to migrate. +10. Port the first feature slice end to end and use it as the template for the rest. diff --git a/prds/server-v2-plan/schema.md b/prds/server-v2-plan/schema.md new file mode 100644 index 00000000..60d72a92 --- /dev/null +++ b/prds/server-v2-plan/schema.md @@ -0,0 +1,497 @@ +# Server V2 Schema And State Model + +## Status: Draft +## Date: 2026-04-13 + +## Purpose + +This document defines the preferred server-owned sqlite schema direction for Server V2, along with the related filesystem layout and API-shape decisions. + +It also identifies desktop-owned state that should be moved into the server database as part of the thin-client transition. + +## Core Rule + +The local OpenWork server should become the durable owner of product state that is not inherently owned by OpenCode. + +That means: + +- OpenCode owns session/project runtime state +- OpenWork server owns workspace registry, config registry, remote connections, and product metadata +- the desktop app should stop being the long-term owner of those records + +## Database Scope + +The sqlite DB is for OpenWork-owned metadata and relationships. + +It should not become a duplicate source of truth for: + +- session messages +- archived flags +- session titles +- other OpenCode-native session state + +Those should still come live from OpenCode or remote OpenWork servers. + +## Tables + +### `servers` + +Purpose: + +- store the set of known server connections the local product knows about + +Rows should represent: + +- one local server +- zero or more remote servers + +Suggested columns: + +- `id` +- `kind` (`local`, `remote`) +- `hosting_kind` (`desktop`, `self_hosted`, `cloud`) +- `label` +- `base_url` +- `token_ref` or encrypted token material +- `capabilities_json` +- `is_local` +- `is_enabled` +- `created_at` +- `updated_at` +- `last_seen_at` + +Notes: + +- `server` is a real model in the product, even if it is not yet exposed directly in the UI. +- The app can still render a workspace-first experience while the server keeps the canonical registry. +- Remote and cloud servers should both be modeled as remote OpenWork servers at the product level; `hosting_kind` captures whether a remote server is cloud-hosted or not. + +### `workspaces` + +Purpose: + +- store the canonical OpenWork workspace registry + +Suggested columns: + +- `id` +- `server_id` +- `kind` (`local`, `remote`, `control`, `help`) +- `display_name` +- `slug` +- `is_hidden` +- `status` +- `opencode_project_id` nullable +- `remote_workspace_id` nullable +- `data_dir` nullable +- `config_dir` nullable +- `notes_json` nullable +- `created_at` +- `updated_at` + +Rules: + +- every workspace points at exactly one server +- local workspaces keep a stable OpenWork workspace ID that is separate from the OpenCode project ID +- remote workspaces map to exactly one workspace on a remote OpenWork server +- control/help workspaces are real workspaces with `is_hidden = true` +- the local OpenWork server is still the canonical routing layer that the app talks to, even when a workspace belongs to a remote server + +### `server_runtime_state` + +Purpose: + +- store server-wide supervision and health state for bundled runtime dependencies + +Suggested columns: + +- `server_id` +- `runtime_version` +- `opencode_status` +- `opencode_version` +- `opencode_base_url` nullable +- `router_status` +- `router_version` nullable +- `restart_policy_json` nullable +- `last_started_at` nullable +- `last_exit_json` nullable +- `updated_at` + +Notes: + +- this is the server-owned place for runtime health, crash history, and extracted-runtime metadata +- this is distinct from workspace-level diagnostic state because OpenCode and router are supervised at the server level + +### `workspace_runtime_state` + +Purpose: + +- optional server-owned state about runtime health and derived metadata for each workspace + +Suggested columns: + +- `workspace_id` +- `backend_kind` (`local_opencode`, `remote_openwork`) +- `last_sync_at` +- `last_session_refresh_at` +- `last_error_json` +- `health_json` + +Notes: + +- this is not the source of truth for sessions +- it is just runtime/cache/diagnostic state the server may want + +### `mcps` + +Purpose: + +- store MCP definitions and related auth/config metadata as OpenWork-managed records + +Suggested columns: + +- `id` +- `kind` +- `display_name` +- `config_json` +- `auth_json` +- `source` (`openwork_managed`, `imported`, `discovered`) +- `created_at` +- `updated_at` + +### `skills` + +Purpose: + +- store OpenWork-managed skill metadata + +Suggested columns: + +- `id` +- `slug` +- `display_name` +- `body_ref` or `content_hash` +- `source` (`openwork_managed`, `imported`, `discovered`, `cloud_synced`) +- `cloud_item_id` nullable +- `created_at` +- `updated_at` + +### `plugins` + +Purpose: + +- store plugin definitions and enabled-state metadata + +Suggested columns: + +- `id` +- `plugin_key` +- `display_name` +- `config_json` +- `source` +- `created_at` +- `updated_at` + +### `provider_configs` + +Purpose: + +- store provider definitions, credentials references, and assignment metadata + +Suggested columns: + +- `id` +- `provider_key` +- `display_name` +- `config_json` +- `auth_json` +- `source` +- `created_at` +- `updated_at` + +### `cloud_signin` + +Purpose: + +- store local server-owned cloud auth/session metadata + +Suggested columns: + +- `id` +- `cloud_base_url` +- `user_id` +- `org_id` +- `auth_json` +- `last_validated_at` +- `created_at` +- `updated_at` + +Notes: + +- cloud sign-in should stop being purely app-owned preference state +- the server should be able to own and apply this state directly + +### `workspace_shares` + +Purpose: + +- store share/access state for local workspaces that are exposed remotely + +Suggested columns: + +- `id` +- `workspace_id` +- `access_key_ref` or encrypted key material +- `status` (`active`, `revoked`, `disabled`) +- `last_used_at` nullable +- `audit_json` nullable +- `created_at` +- `updated_at` +- `revoked_at` nullable + +Notes: + +- access should be scoped to one workspace, not granted server-wide by default +- the initial product shape can assume zero or one active share record per local workspace, while leaving room for later expansion + +### `router_identities` + +Purpose: + +- store server-owned router identities and their persisted config/auth metadata + +Suggested columns: + +- `id` +- `server_id` +- `kind` +- `display_name` +- `config_json` +- `auth_json` +- `is_enabled` +- `created_at` +- `updated_at` + +Notes: + +- router identity state should be server-level, not app-local +- the server can still project this into router config files or runtime config as needed + +### `router_bindings` + +Purpose: + +- store server-owned router bindings and delivery targets + +Suggested columns: + +- `id` +- `server_id` +- `router_identity_id` +- `binding_key` +- `config_json` +- `is_enabled` +- `created_at` +- `updated_at` + +Notes: + +- bindings are what determine whether router startup is needed at all +- later models can add workspace scoping or policy tables without changing the basic server-owned direction + +## Linking Tables + +These tables are what let OpenWork own config items once and apply them to one or many workspaces. + +### `workspace_mcps` + +- `workspace_id` +- `mcp_id` +- `created_at` +- `updated_at` + +### `workspace_skills` + +- `workspace_id` +- `skill_id` +- `created_at` +- `updated_at` + +### `workspace_plugins` + +- `workspace_id` +- `plugin_id` +- `created_at` +- `updated_at` + +### `workspace_provider_configs` + +- `workspace_id` +- `provider_config_id` +- `created_at` +- `updated_at` + +Notes: + +- nothing should be global by default +- when a config item is created, the user chooses which workspaces it should apply to +- when a workspace is created, the server/UI can offer existing config items to attach + +## Other Desktop-Owned State That Should Move Into The Server DB + +Beyond the core tables above, the server should absorb other state that currently tends to live in the desktop app. + +### Cloud settings and user auth + +Should move into the server DB: + +- cloud base URL +- user auth/session state +- selected org or cloud account metadata +- cloud validation status + +Reason: + +- this is product state, not presentation state +- the server should be able to own cloud-backed behavior directly + +### Cloud-synced item metadata + +Should move into the server DB: + +- mappings to cloud-synced skills/plugins/providers +- imported/discovered state +- sync status and timestamps + +Reason: + +- it belongs with the config and workspace registry the server owns + +### Workspace share metadata + +Should move into the server DB: + +- workspace-scoped share/access keys +- share status +- share timestamps and audit fields + +Reason: + +- remote exposure of local workspaces is a server capability +- this should land in `workspace_shares` or an equivalent server-owned table + +### Server/workspace relationship state + +Should move into the server DB: + +- which workspace belongs to which server +- hidden control/help workspace metadata +- remote workspace mappings +- local OpenWork workspace ID <-> OpenCode project ID mappings + +Reason: + +- this is the canonical product graph the app should query, not reconstruct locally + +## Session Query Strategy + +For now: + +- session lists and session state can be queried live +- local server queries OpenCode for local workspaces +- local server queries remote OpenWork servers for remote workspaces + +Optional later enhancements: + +- light server-side cache/index tables +- UI cache for responsiveness + +But the authoritative state should remain live in the backend systems. + +## Config Reconciliation Strategy + +Config reconciliation should use both: + +- file watching +- periodic or pull-based reconciliation + +### File watching + +Use for: + +- fast local detection of config changes in managed directories +- quick projection/update of DB state + +### Periodic or pull-based reconciliation + +Use for: + +- startup repair +- recovering from missed file watcher events +- validating that disk state and DB state still match + +### Absorption rules + +- if a file clearly matches a known managed concept like MCP, Skill, Plugin, or Provider Config, absorb it into the DB +- if it does not match a known managed concept, leave it in the directory for now + +## Filesystem Layout + +Current preferred direction: + +```text +/workspaces//config +``` + +Where: + +- the user chooses the workspace data directory +- OpenWork creates and owns the config directory +- OpenCode is pointed at the OpenWork-owned config directory +- the user data directory is added as an authorized path + +This gives a clean separation between: + +- user-owned data +- OpenWork-owned config +- OpenCode-owned runtime state + +## API Shape + +All session and workspace behavior should be exposed through workspace-first OpenWork routes. + +That means: + +- the client sends `workspaceId` +- the server resolves the backend mapping +- the server talks to OpenCode or a remote OpenWork server +- the client never needs raw OpenCode endpoint shapes + +Recommended principle: + +- normalize everything into OpenWork-shaped endpoints +- do not expose raw OpenCode endpoint design directly as the public product API + +## Initial Primitive Surface + +The initial OpenCode-derived session surface should still include the primitives already listed in `prds/server-v2-plan/ideal-flow.md`, but wrapped in OpenWork route design. + +That means: + +- session listing +- session creation/update/delete +- session status and todos +- init/fork/abort/share/unshare/summarize +- message list/get/send/update/delete +- prompt async / command / shell +- revert / unrevert + +The naming and nesting should be workspace-first. + +## Recommended Next Step + +The next useful design doc after this one is a route-shape doc that turns these schema decisions into: + +- workspace-first REST route patterns +- request/response envelope shape +- mapping rules from OpenWork routes to OpenCode SDK calls diff --git a/prds/server-v2-plan/sdk-generation.md b/prds/server-v2-plan/sdk-generation.md new file mode 100644 index 00000000..ba0953e8 --- /dev/null +++ b/prds/server-v2-plan/sdk-generation.md @@ -0,0 +1,289 @@ +# Server V2 SDK Generation + +## Status: Draft +## Date: 2026-04-09 + +## Purpose + +This document defines the preferred toolchain for generating the Server V2 TypeScript SDK and how that generation should fit into local development and CI. + +This doc assumes Server V2 is a separate new server package, not a sub-application mounted inside the old server. + +## Current Recommendation + +Preferred stack: + +- OpenAPI spec generation: `hono-openapi` +- TypeScript SDK generation: `@hey-api/openapi-ts` +- Reusable client package: `packages/openwork-server-sdk` +- App entrypoint: app-owned `createSdk({ serverId })` +- SSE support: small handwritten helpers exposed from the SDK package, then wrapped by the app adapter + +## Why `@hey-api/openapi-ts` + +It is the leading SDK generator candidate because it fits the current plan well: + +- it generates TypeScript code from OpenAPI +- it supports SDK-oriented output, not just raw schema types +- it aligns better with a method-based client surface than a purely path-based fetch client +- it works well in a monorepo package setup + +Compared with `openapi-typescript` + `openapi-fetch`: + +- `openapi-fetch` is lightweight and good, but it encourages a path-shaped client surface +- `@hey-api/openapi-ts` is a better fit for the method-based SDK style we want underneath the app-side adapter + +## Important Caveat + +`@hey-api/openapi-ts` is still in active development and recommends pinning an exact version. + +We should treat that as a requirement: + +- pin an exact version in `package.json` +- upgrade intentionally +- regenerate the SDK in a dedicated PR when changing versions + +## Toolchain Roles + +### 1. `hono-openapi` + +Role: + +- derive the OpenAPI spec from the new server Hono app and its schemas + +Output: + +- `apps/server-v2/openapi/openapi.json` + +### 2. `@hey-api/openapi-ts` + +Role: + +- generate the TypeScript SDK package from `apps/server-v2/openapi/openapi.json` + +Output: + +- `packages/openwork-server-sdk/generated/**` + +### 3. Handwritten SDK package files + +Role: + +- expose server-agnostic helpers over the generated client +- expose small typed SSE helpers + +Files: + +- `packages/openwork-server-sdk/src/index.ts` +- `packages/openwork-server-sdk/src/streams/**` + +### 4. App-side adapter + +Role: + +- export the app-facing `createSdk({ serverId })` +- resolve `serverId` to current runtime config +- inject base URL and auth/token +- select between the current and new server behavior during migration + +Files: + +- `apps/app/.../createSdk.ts` + +## Proposed Package Layout + +```text +apps/server-v2/ +├── src/** +└── openapi/ + └── openapi.json + +packages/openwork-server-sdk/ +├── package.json +├── openapi-ts.config.ts +├── generated/** +├── src/ +│ ├── streams/ +│ └── index.ts +└── scripts/ + └── watch.mjs + +apps/app/ +└── ... app-side `createSdk({ serverId })` adapter +``` + +## App-Facing Shape + +The overall app-facing shape should be: + +```ts +await createSdk({ serverId }).sessions.listMessages({ workspaceId, sessionId }) +``` + +That means: + +- generated methods remain the main surface for normal endpoints +- `createSdk({ serverId })` is an app-owned thin runtime adapter +- the reusable SDK package stays server-agnostic +- SSE helpers live in the SDK package and are wrapped by the app adapter as needed + +## Generation Flow + +One-shot flow: + +```text +apps/server-v2/src/** +-> hono-openapi +-> apps/server-v2/openapi/openapi.json +-> @hey-api/openapi-ts +-> packages/openwork-server-sdk/generated/** +``` + +## Mixed Old/New Routing During Migration + +The generated SDK package should represent the new server contract only. + +During migration, typed fallback behavior for legacy server routes should live in app-owned adapter code rather than in the generated SDK package itself. + +Recommended split: + +- `packages/openwork-server-sdk`: generated Server V2 client plus small handwritten SSE helpers for Server V2 +- `apps/app/.../createSdk.ts`: rollout checks, capability gating, and per-operation routing +- `apps/app/.../legacy/`: small handwritten compatibility shims for old-server calls that have not been ported yet + +Rules: + +- do not try to generate one SDK that merges old and new server contracts together +- keep legacy compatibility shims thin and delete them as soon as a feature slice is fully on the new server +- if a legacy route must be used temporarily, normalize its result in the app adapter before returning it to the rest of the UI +- the app-facing call site should still look like `createSdk({ serverId })...` so migration logic stays out of feature code + +## Scripts Shape + +The exact implementation can vary, but the command model should look like this. + +### `apps/server-v2/package.json` + +```json +{ + "scripts": { + "openapi:generate": "node ./scripts/generate-openapi.mjs", + "openapi:watch": "node ./scripts/watch-openapi.mjs" + } +} +``` + +Notes: + +- these scripts should load the new server Hono app and emit `openapi/openapi.json` +- they should use `hono-openapi` +- `openapi:watch` should only watch `src/**` + +### `packages/openwork-server-sdk/package.json` + +```json +{ + "scripts": { + "generate": "openapi-ts -c openapi-ts.config.ts", + "watch": "node ./scripts/watch.mjs", + "typecheck": "tsc --noEmit" + } +} +``` + +Notes: + +- `generate` should run `@hey-api/openapi-ts` against `apps/server-v2/openapi/openapi.json` +- `watch` can be a small file watcher that reruns `generate` when `openapi/openapi.json` changes +- `typecheck` ensures the generated output and handwritten SDK helpers still compile together + +### Root `package.json` + +```json +{ + "scripts": { + "dev:server-v2": "pnpm run dev:server-v2:watchers", + "dev:server-v2:watchers": "node ./scripts/dev-server-v2.mjs", + "sdk:generate": "pnpm --filter openwork-server-v2 openapi:generate && pnpm --filter @openwork/server-sdk generate" + } +} +``` + +Intent: + +- `dev:server-v2` starts the combined dev graph +- `sdk:generate` is the one-shot contract regeneration command for local use and CI + +## Suggested Watch Implementation + +We should not depend on every tool having perfect built-in watch support. + +Instead, prefer small repo-local watcher scripts where needed. + +Examples: + +- `apps/server-v2/scripts/watch-openapi.mjs` + - watch `src/**` + - rerun OpenAPI generation +- `packages/openwork-server-sdk/scripts/watch.mjs` + - watch `../../apps/server-v2/openapi/openapi.json` + - rerun `openapi-ts` +- `scripts/dev-server-v2.mjs` + - run backend dev watch + - run OpenAPI watch + - run SDK watch + - run app dev, which includes the app-side adapter + +This gives us full control over debounce behavior, ignores, and restart-loop prevention. + +## Runtime Choice + +The server runtime remains Bun-based. + +The code generation toolchain does not need to match the runtime exactly. + +That means: + +- `apps/server-v2` can continue running with Bun in dev and production +- code generation can run via `pnpm` and Node-based tooling where needed + +This is acceptable because code generation is a build-time/dev-time concern, not a runtime server concern. + +## CI Commands + +The CI contract check should reduce to one command or one short chain. + +Preferred shape: + +```bash +pnpm --filter openwork-server-v2 openapi:generate && pnpm --filter @openwork/server-sdk generate && git diff --exit-code +``` + +That gives us: + +- one contract regeneration path +- identical logic between local and CI flows +- immediate detection of stale generated files + +## SSE and Generation Boundary + +The one or two SSE endpoints should still appear in the new server contract, but they should not block the rest of the SDK generation plan. + +Recommended split: + +- normal request/response endpoints: generated with `@hey-api/openapi-ts` +- SSE helpers: handwritten in `packages/openwork-server-sdk/src/streams/**` +- typed event payloads: generated or shared contract types only, never imported directly from server source + +This keeps the custom surface small. + +## Decision Summary + +We should plan around: + +- `hono-openapi` for OpenAPI generation +- `@hey-api/openapi-ts` for SDK generation +- app-owned `createSdk({ serverId })` as the app-facing entrypoint +- small handwritten SSE helpers for the limited streaming surface + +This is the most balanced path between strong typing, monorepo ergonomics, explicit contracts, and low ongoing maintenance. diff --git a/prds/server-v2-plan/spawning-opencode.md b/prds/server-v2-plan/spawning-opencode.md new file mode 100644 index 00000000..51596303 --- /dev/null +++ b/prds/server-v2-plan/spawning-opencode.md @@ -0,0 +1,488 @@ +# Spawning OpenCode + +## Status: Draft +## Date: 2026-04-13 + +## Purpose + +This document describes the local helper we need for starting OpenCode when the new server ships with a bundled OpenCode binary. + +The key problem is: + +- `@opencode-ai/sdk` provides a `createOpencode()` helper +- that helper expects to launch `opencode` from `PATH` +- our distribution plan wants the OpenWork server to ship its own bundled OpenCode binary +- therefore we need a local helper that mimics the useful startup behavior while allowing an explicit binary path + +## Design Goal + +Build a small local helper that: + +- starts OpenCode directly with Bun runtime process APIs +- accepts an explicit binary path +- waits until OpenCode is actually ready +- accepts a custom config object generated by the OpenWork server +- returns an OpenCode HTTP client from `@opencode-ai/sdk` +- does not modify the upstream SDK + +The SDK should still be used for the HTTP client. + +The SDK should not be used for server startup. + +## Why We Need This + +In the target Server V2 distribution model: + +- `openwork-server-v2` is the primary executable +- it embeds `opencode` +- it extracts `opencode` into a managed runtime directory +- it launches that exact binary path + +So the normal case is not: + +- look on `PATH` for `opencode` + +The normal case is: + +- use the exact extracted OpenCode binary path managed by the server runtime + +That means relying on `PATH` is the wrong startup model. + +We need deterministic control over: + +- which `opencode` binary is launched +- which hostname and port it binds to +- which config blob it receives +- how readiness and failure are detected + +That config blob should come from the OpenWork server's own database-backed config model, not from ad hoc client state. + +## Proposed Helper Location + +Suggested local helper location: + +```text +apps/server-v2/src/adapters/opencode/local.ts +``` + +This should remain app-local/server-local code, not a patch to `node_modules`. + +## Proposed Public API + +Use one exported function: + +```ts +createLocalOpencode(opts?) +``` + +Suggested signature: + +```ts +type CreateLocalOpencodeOptions = { + binary?: string + hostname?: string + port?: number + timeout?: number + signal?: AbortSignal + config?: Record + client?: Record +} +``` + +Suggested defaults: + +- `binary`: omitted only if the helper can resolve the extracted bundled runtime binary path from server-managed runtime state; otherwise startup should fail +- `hostname`: `"127.0.0.1"` +- `port`: `4096` +- `timeout`: `5000` + +Important rule: + +- the helper should not rely on `PATH` +- the helper should always launch the bundled OpenCode binary extracted by the server runtime +- if no explicit bundled binary path is available, that should be treated as a startup error, not as a signal to fall back to a system binary + +## Runtime Behavior + +The helper should: + +1. resolve the binary path +2. spawn OpenCode with `serve` +3. watch stdout/stderr for readiness +4. parse the listening URL +5. create an OpenCode HTTP client using `createOpencodeClient` +6. return both the client and server process handle + +## Spawn Contract + +Spawn should use: + +```text +binary +serve +--hostname= +--port= +``` + +If `config.logLevel` exists, append: + +```text +--log-level= +``` + +Environment should merge `process.env` with: + +```text +OPENCODE_CONFIG_CONTENT= +``` + +Where: + +```ts +JSON.stringify(config ?? {}) +``` + +## Config Source Of Truth + +The config object passed into this helper should be generated by the OpenWork server. + +That means: + +- the server reads its sqlite state +- the server resolves the effective config for the relevant runtime or workspace +- the server materializes an OpenCode config object +- the helper passes that object through `OPENCODE_CONFIG_CONTENT` + +The helper should not decide config policy. + +Its job is to: + +- receive the already-computed config object +- serialize it +- launch OpenCode with it + +This keeps the ownership boundary clean: + +- server DB and services own config state +- the spawn helper owns process startup only + +## SDK Usage + +Use the SDK only for the HTTP client: + +```ts +import { createOpencodeClient } from "@opencode-ai/sdk" +``` + +The helper should create the client after readiness: + +```ts +createOpencodeClient({ + baseUrl: url, + ...clientOpts, +}) +``` + +## Readiness Detection + +The helper should detect readiness by watching stdout and stderr for a line like: + +```text +opencode server listening on http://... +``` + +It should: + +- capture stdout and stderr incrementally +- parse the URL from the first matching line +- treat that as the canonical base URL + +Suggested parser behavior: + +- scan each emitted line +- match `http://...` or `https://...` +- store the parsed URL exactly as reported + +## Failure Handling + +The helper should fail clearly in three main cases. + +### 1. Missing binary / `ENOENT` + +If the binary does not exist or cannot be launched: + +- reject immediately +- include the binary path/name in the error +- include a clear explanation that the executable was not found + +Suggested error shape: + +```text +Failed to start OpenCode: executable not found at +``` + +### 2. Timeout before readiness + +If startup exceeds the timeout: + +- kill the child process +- reject with a timeout error +- include collected stdout/stderr in the error message + +Suggested error shape: + +```text +OpenCode did not become ready within 5000ms. +Collected output: +... +``` + +### 3. Early exit before ready + +If the child exits before a readiness line is seen: + +- reject with an early-exit error +- include exit code or signal +- include collected stdout/stderr + +Suggested error shape: + +```text +OpenCode exited before becoming ready (exit code 1). +Collected output: +... +``` + +## Runtime Exit And Crash Handling + +Startup success is not enough. + +The server also needs to handle the case where OpenCode exits or crashes after readiness. + +The helper and the surrounding runtime supervisor should support: + +- detecting unexpected child exit after readiness +- surfacing a clear runtime state change to the rest of the server +- exposing crash/error state to clients +- controlled restart behavior + +### Required runtime behavior + +Once OpenCode is ready, the server should continue monitoring the process. + +If the child exits unexpectedly: + +- mark OpenCode as unhealthy or offline in runtime state +- capture the exit code or signal +- capture recent stdout/stderr for diagnostics +- make that status visible through server APIs +- either restart automatically or leave the process down based on the server's restart policy + +### Restart policy + +The exact restart policy is still a design decision, but the system should be built so it can support: + +- no automatic restart +- bounded restart attempts with backoff +- explicit manual restart via server control surface + +The important point is that OpenCode process failure must be a first-class runtime state, not an invisible child-process problem. + +## Abort Support + +If `signal` is provided: + +- attach an abort listener +- kill the spawned child on abort if startup is still in progress +- reject with an abort-related error + +The same signal can also be used later by the caller to coordinate shutdown policy if desired. + +## Returned Shape + +Suggested return shape: + +```ts +type LocalOpencodeHandle = { + client: ReturnType + server: { + url: string + close: () => void + proc: Bun.Subprocess + } +} +``` + +Notes: + +- `client` is the upstream SDK HTTP client +- `server.url` is the parsed listening URL +- `server.close()` should terminate the spawned process +- `server.proc` is the Bun subprocess handle for diagnostics and advanced lifecycle management + +The process handle should also make it possible for the server's runtime supervisor to observe post-start exit/crash behavior. + +## Suggested Implementation Notes + +Use: + +```ts +import { createOpencodeClient } from "@opencode-ai/sdk" +``` + +Preferred runtime API inside `apps/server-v2`: + +```ts +Bun.spawn(...) +``` + +Reasoning: + +- the new server is Bun-based +- Bun is the runtime we are standardizing on for `apps/server-v2` +- using Bun-native process APIs keeps the implementation aligned with the rest of the server runtime + +Compatibility note: + +- `node:child_process.spawn` would also work under Bun for many cases +- but it should not be the preferred framing for this helper in the design docs +- the helper should be documented as a Bun-based local runtime helper + +Implementation guidance: + +- use TypeScript +- avoid `any` +- collect output safely with bounded buffers if needed +- make timeout cleanup and process cleanup deterministic +- remove abort hooks, timers, and background readers after resolve/reject +- only resolve once +- continue observing the process after readiness so unexpected exit/crash can be surfaced to the runtime supervisor + +## Suggested Example Usage + +```ts +const opencode = await createLocalOpencode({ + binary: "/absolute/path/to/opencode", + config: { + model: "anthropic/claude-3-5-sonnet-20241022", + }, +}) + +console.log(opencode.server.url) + +opencode.server.close() +``` + +## Tiny Usage Example + +```ts +import { createLocalOpencode } from "./adapters/opencode/local" + +const opencode = await createLocalOpencode({ + binary: "/runtime/opencode", + hostname: "127.0.0.1", + port: 4096, +}) + +const projects = await opencode.client.project.list() + +console.log(projects) + +opencode.server.close() +``` + +## Expected Implementation Skeleton + +Illustrative shape only: + +```ts +import { createOpencodeClient } from "@opencode-ai/sdk" + +export async function createLocalOpencode(opts: CreateLocalOpencodeOptions = {}) { + // resolve defaults, including extracted bundled binary path when present + // spawn process with Bun.spawn + // capture stdout/stderr + // wait for readiness line + // enforce timeout + // reject on early exit + // create client with parsed URL + // return { client, server } +} +``` + +## Relationship To The Distribution Plan + +This helper is important because it connects the runtime distribution plan to the actual server implementation. + +In the target distribution model: + +- `openwork-server-v2` embeds `opencode` +- extracts it to a managed runtime directory +- then uses this helper to launch the extracted binary by absolute path + +That makes startup deterministic and independent from whatever happens to be on `PATH`. + +## OpenCode Health Endpoint + +The server should expose an endpoint that allows clients to check OpenCode health. + +At minimum, clients should be able to ask: + +- is OpenCode running? +- what version is running? +- what URL is it bound to? +- when did it last start or fail? + +Suggested shape: + +```text +GET /system/opencode/health +``` + +Example response shape: + +```json +{ + "running": true, + "version": "1.2.27", + "baseUrl": "http://127.0.0.1:4096", + "lastStartedAt": "2026-04-13T12:00:00Z", + "lastExit": null +} +``` + +If OpenCode crashed, the server should be able to report something like: + +```json +{ + "running": false, + "version": "1.2.27", + "baseUrl": "http://127.0.0.1:4096", + "lastStartedAt": "2026-04-13T12:00:00Z", + "lastExit": { + "code": 1, + "signal": null, + "at": "2026-04-13T12:05:00Z" + } +} +``` + +This endpoint is important because the UI should not infer OpenCode health from indirect failures alone. + +The server should make runtime state explicit. + +## Recommendation + +Implement this helper locally in the new server codebase and treat it as the canonical OpenCode startup wrapper. + +It should be designed around the Bun-based runtime model of `apps/server-v2`, not around a generic Node-only process model. + +It should also be designed as one piece of a larger runtime supervision system, not just a fire-and-forget spawn helper. + +It should become the place where we later add: + +- version validation +- health probes beyond stdout readiness +- structured logs +- cleanup/restart semantics +- possibly router-adjacent startup coordination if needed diff --git a/prds/server-v2-plan/tauri-audit.md b/prds/server-v2-plan/tauri-audit.md new file mode 100644 index 00000000..cb30d958 --- /dev/null +++ b/prds/server-v2-plan/tauri-audit.md @@ -0,0 +1,568 @@ +# Tauri Audit + +## Scope + +This audit covers the desktop-native layer under `apps/desktop`, especially `apps/desktop/src-tauri/**` and the desktop scripts that are directly part of dev/build/runtime. + +## Alignment Note + +This audit documents the current desktop-native footprint, not the final target-state boundary. + +To fully match `prds/server-v2-plan/ideal-flow.md`, durable workspace registry state, workspace watchers, config and file mutation, remote workspace persistence, and runtime/workspace control should move behind the main server, leaving the desktop shell with only native bootstrap, reconnect state, and UI-hosting duties. + +The goal is to document the desktop app lifecycle and every meaningful place where the desktop shell touches the local system, native runtime, sidecars, files, or OS services. + +This document now assumes the target architecture is a single main server API surface, with bootstrap and supervision responsibilities also collapsing into the server over time. + +## Disposition Labels + +- `Stay`: should remain in the desktop shell because it is truly native-shell, OS, packaging, windowing, or local UI-hosting behavior. +- `Move`: should move behind the server because it is real workspace behavior or runtime coordination. +- `Split`: the trigger or presentation may stay in the desktop shell, but the actual workspace/runtime capability should move behind the server. + +## High-Level Lifecycle + +1. Tauri boots the desktop shell. +2. The shell registers native plugins, commands, deep-link handling, and exit behavior. +3. Dev/build scripts prepare sidecars and frontend assets. +4. The desktop layer starts or reconnects to the local OpenWork server host path. +5. Any deeper runtime ownership of OpenCode, router, or orchestrator behavior is migration debt that should collapse inward behind the server over time. +6. Workspace state is loaded and persisted. +7. Native file watchers, dialogs, updater checks, and process management continue during the session. + +## App Shell Bootstrap + +Disposition guidance: + +- `run()` -> `Stay` +- `stop_managed_services()` -> `Stay` +- deep-link forwarding helpers -> `Stay` +- window visibility helpers -> `Stay` +- `set_dev_app_name()` -> `Stay` + +### `run()` + +- What it does: boots the native desktop shell, installs Tauri plugins, registers all Tauri commands, and wires the application lifecycle. +- Called from and when: called from the desktop app entrypoint at application launch. +- Ends up calling: Tauri plugin setup, command registration, window lifecycle hooks, deep-link hooks, updater support, shell/process support, and cleanup on exit. + +### `stop_managed_services()` + +- What it does: best-effort shutdown for native child services managed by the desktop shell. +- Called from and when: called on app exit and exit-request flows. +- Ends up calling: child-process kill paths for the engine, orchestrator, OpenWork server, and router. + +### `forwarded_deep_links()`, `emit_native_deep_links()`, `emit_forwarded_deep_links()` + +- What they do: normalize incoming deep links and forward them into the frontend as native events. +- Called from and when: called when a second app instance is launched or when the OS opens the app via URL. +- Ends up calling: Tauri event emission into the frontend. + +### `show_main_window()` / `hide_main_window()` + +- What they do: show, focus, or hide the native window. +- Called from and when: called during reopen, second-instance handoff, and close interception flows. +- Ends up calling: native window APIs. + +### `set_dev_app_name()` + +- What it does: renames the macOS process to `OpenWork - Dev` in dev mode. +- Called from and when: called during desktop setup in dev mode. +- Ends up calling: macOS process metadata APIs. + +## Dev And Build Pipeline + +Disposition guidance: + +- `tauri-before-dev.mjs` -> `Stay` +- `tauri-before-build.mjs` -> `Stay` +- `prepare-sidecar.mjs` -> `Stay` +- `dev-windows.mjs` -> `Stay` +- `chrome-devtools-mcp-shim.ts` -> `Stay` +- `build.rs` -> `Stay` + +### `tauri-before-dev.mjs` + +- What it does: prepares sidecars, validates Linux desktop dependencies, detects or starts the UI dev server, and keeps process trees under control in dev mode. +- Called from and when: called by Tauri `beforeDevCommand` during `pnpm dev` for the desktop app. +- Ends up calling: sidecar preparation, `pkg-config`, the app dev server, process spawn/kill logic, and local HTTP checks against the Vite server. + +### `tauri-before-build.mjs` + +- What it does: prepares sidecars and builds the frontend before packaging. +- Called from and when: called by Tauri `beforeBuildCommand` during desktop builds. +- Ends up calling: sidecar prep and frontend build commands. + +### `prepare-sidecar.mjs` + +- What it does: builds and stages all bundled sidecars and downloads the pinned OpenCode binary. +- Called from and when: called by both dev and build prep scripts. +- Ends up calling: Bun compile steps, GitHub release downloads, filesystem copies into `src-tauri/sidecars`, and version metadata generation. + +### `dev-windows.mjs` + +- What it does: Windows-specific desktop dev launcher that cleans up stale processes and injects the right toolchain environment before running Tauri. +- Called from and when: called by `pnpm dev:windows`. +- Ends up calling: PowerShell/taskkill-style cleanup, Windows compiler environment setup, and `tauri dev`. + +### `chrome-devtools-mcp-shim.ts` + +- What it does: creates the bundled shim for Chrome DevTools MCP. +- Called from and when: built during sidecar prep, later executed when that sidecar is needed. +- Ends up calling: `npm exec` for the Chrome DevTools MCP package. + +### `build.rs` + +- What it does: injects build metadata and ensures sidecar placeholders or binaries exist during Rust compilation. +- Called from and when: called by Cargo during desktop compilation. +- Ends up calling: compile-time file generation/copy behavior and build metadata injection. + +## OpenCode Engine Lifecycle + +Disposition guidance: + +- `engine_start()` -> `Move` +- `spawn_engine()` -> `Move` +- `find_free_port()` / `build_engine_args()` -> `Move` +- `engine_stop()` / `engine_restart()` -> `Move` +- `engine_info()` -> `Split` +- `engine_doctor()` and related helpers -> `Split` +- `engine_install()` -> `Split` +- `EngineManager` -> `Move` +- `bun_env_overrides()` -> `Move` + +Reasoning: the desktop app should keep the ability to launch the OpenWork server, but OpenCode runtime ownership and coordination should move behind the server boundary over time. + +### `engine_start()` + +- What it does: starts the local OpenCode runtime and the supporting native stack around it. +- Called from and when: called from the UI when the user starts a local runtime, creates a workspace, reconnects, or begins a local session flow. +- Ends up calling: local project-directory setup, `opencode.json` seeding, port selection, credential generation, engine spawn, OpenWork server startup, and OpenCodeRouter startup. + +### `spawn_engine()` + +- What it does: launches `opencode serve` directly, either from a bundled sidecar or an installed binary. +- Called from and when: called by `engine_start()` in direct-engine mode. +- Ends up calling: process spawning, working-directory setup, PATH/env overrides, and OpenCode HTTP serving. + +### `find_free_port()` / `build_engine_args()` + +- What they do: allocate a local port and define the exact command arguments for OpenCode serve. +- Called from and when: called during engine startup. +- Ends up calling: localhost TCP allocation and process argument setup. + +### `engine_stop()` / `engine_restart()` + +- What they do: stop or restart the local engine stack. +- Called from and when: called from UI controls and restart/recovery flows. +- Ends up calling: child-process shutdown/restart across engine, router, orchestrator, and hosted OpenWork server. + +### `engine_info()` + +- What it does: reports current engine/runtime status, including reconnecting to orchestrator auth/state after relaunch. +- Called from and when: called by the UI during startup probes, status refresh, and settings pages. +- Ends up calling: in-memory manager state access and orchestrator state/auth file reads. + +### `engine_doctor()` and related helpers + +- What they do: detect whether OpenCode is installed and usable, inspect binary paths, and run lightweight capability probes. +- Called from and when: called from onboarding and settings diagnostics. +- Ends up calling: filesystem path checks and child-process execution such as `--version` and `serve --help`. + +### `engine_install()` + +- What it does: installs OpenCode from the official install script. +- Called from and when: called from onboarding or settings when the user asks to install OpenCode. +- Ends up calling: shell execution, network download, and writes under the user install directory. + +### `EngineManager` + +- What it does: stores the live engine child process, ports, credentials, and captured logs. +- Called from and when: used throughout engine startup, status, stop, and reconnect flows. +- Ends up calling: process lifecycle and in-memory runtime state management. + +### `bun_env_overrides()` + +- What it does: normalizes Bun-related environment for child processes. +- Called from and when: used whenever the desktop shell launches Bun-based sidecars. +- Ends up calling: child-process env mutation. + +## Orchestrator Lifecycle + +Disposition guidance: + +- all orchestrator lifecycle and sandbox functions in this section -> `Move` + +Reasoning: these are runtime and workspace orchestration concerns, not UI concerns. + +### `spawn_orchestrator_daemon()` + +- What it does: launches the OpenWork orchestrator daemon sidecar. +- Called from and when: called by `engine_start()` when the runtime mode uses the orchestrator. +- Ends up calling: child-process spawn, daemon HTTP startup, OpenCode management, PATH/env setup, and sidecar discovery. + +### `wait_for_orchestrator()` / `fetch_orchestrator_health()` / `fetch_orchestrator_workspaces_with_timeout()` + +- What they do: poll the orchestrator until it is healthy and query its workspace/runtime state. +- Called from and when: called during startup and UI status refresh. +- Ends up calling: localhost HTTP requests against orchestrator endpoints. + +### `resolve_orchestrator_data_dir()`, `read_orchestrator_state()`, `read_orchestrator_auth()`, `write_orchestrator_auth()`, `clear_orchestrator_auth()` + +- What they do: manage orchestrator runtime state and auth snapshots on disk. +- Called from and when: used during engine/orchestrator startup, reconnect, shutdown, and recovery. +- Ends up calling: reads and writes under the orchestrator data directory. + +### `request_orchestrator_shutdown()` / `OrchestratorManager::stop_locked()` + +- What they do: gracefully stop the orchestrator if possible, and kill it if needed. +- Called from and when: called during engine stop and app exit. +- Ends up calling: orchestrator `/shutdown` and child-process kill fallback. + +### `orchestrator_status()` + +- What it does: reports orchestrator/OpenCode/workspace status to the UI. +- Called from and when: called by settings and runtime status UI. +- Ends up calling: JSON state reads and localhost health/workspace requests. + +### `orchestrator_workspace_activate()` + +- What it does: registers and activates a workspace inside the orchestrator. +- Called from and when: called when the user points the app at a local workspace in orchestrator mode. +- Ends up calling: orchestrator workspace creation and activation endpoints. + +### `orchestrator_instance_dispose()` + +- What it does: disposes a running workspace instance. +- Called from and when: called from cleanup/dispose UI. +- Ends up calling: orchestrator disposal endpoints. + +### `orchestrator_start_detached()` + +- What it does: starts a detached host stack, optionally sandboxed, and reports progress back to the UI. +- Called from and when: called when the user creates a detached worker or remote sandbox. +- Ends up calling: `openwork-orchestrator start --detach`, token generation, health polling, optional owner-token issuance, and progress event emission. + +### `sandbox_doctor()`, `sandbox_stop()`, `sandbox_cleanup_openwork_containers()`, `sandbox_debug_probe()` + +- What they do: inspect and clean up Docker-based sandbox environments. +- Called from and when: called from developer/debug settings and recovery flows. +- Ends up calling: Docker CLI commands, temporary probe workspaces, and local filesystem cleanup. + +## Hosted OpenWork Server Lifecycle + +Disposition guidance: + +- `start_openwork_server()` -> `Split` +- `spawn_openwork_server()` -> `Stay` +- `resolve_openwork_port()` -> `Stay` +- token and state helpers in `openwork_server/mod.rs` -> `Split` +- `build_urls()` -> `Stay` +- `openwork_server_info()` / `openwork_server_restart()` -> `Split` +- `OpenworkServerManager` -> `Split` + +Reasoning: the desktop shell should keep launch/supervision of the local OpenWork server process, but token semantics, runtime info, and restart/control behavior should keep shrinking into the server over time. + +### `start_openwork_server()` + +- What it does: starts the local desktop-hosted OpenWork server and tracks its tokens, URLs, and health. +- Called from and when: called by engine startup and explicit OpenWork server restart flows. +- Ends up calling: port selection, token generation/load, server spawn, health checks, optional owner-token issuance, and persistence of port/token state; the launch path should stay, while token/control semantics should move inward. + +### `spawn_openwork_server()` + +- What it does: launches the `openwork-server` sidecar. +- Called from and when: called by `start_openwork_server()`. +- Ends up calling: child-process spawn, cwd selection, and env var injection including OpenCode base URL, OpenCode creds, and OpenWork tokens. + +### `resolve_openwork_port()` + +- What it does: picks an OpenWork server port with reuse and conflict avoidance. +- Called from and when: called on host-mode server start. +- Ends up calling: local TCP port selection logic. + +### token and state helpers in `openwork_server/mod.rs` + +- What they do: load/create workspace client/host/owner tokens and persist preferred ports and state. +- Called from and when: called on every hosted-server start and reconnect. +- Ends up calling: filesystem reads and writes in app data; the minimum host bootstrap state may stay, but owner/control semantics should move behind the server. + +### `build_urls()` + +- What it does: derives shareable LAN and mDNS URLs when remote access is enabled. +- Called from and when: called during hosted-server startup. +- Ends up calling: hostname and local-IP discovery. + +### `openwork_server_info()` / `openwork_server_restart()` + +- What they do: expose OpenWork server runtime info and restart it with the current engine credentials/workspaces. +- Called from and when: called from settings, remote-access, and recovery UI. +- Ends up calling: manager state reads or a full restart path; these are transitional control-plane responsibilities that should simplify as the server absorbs more runtime ownership. + +### `OpenworkServerManager` + +- What it does: stores hosted-server child state, URLs, tokens, and captured logs. +- Called from and when: used throughout hosted-server lifecycle management. +- Ends up calling: in-memory child-process/runtime state management, including some control-plane state that should shrink over time. + +## OpenCodeRouter Lifecycle + +Disposition guidance: + +- router child launch and stop helpers -> `Split` +- router status/config/product API functions -> `Move` + +Reasoning: router config, status, and product-facing control should become part of server-owned workspace behavior, and even router child launch should ideally end up under server-owned supervision. + +### `opencodeRouter_start()` + +- What it does: starts the router sidecar and captures its startup state. +- Called from and when: called automatically after engine start and from router settings UI. +- Ends up calling: router spawn, health-port allocation, and child-process log capture. + +### `spawn_opencode_router()` + +- What it does: launches `opencode-router serve` with the active OpenCode connection. +- Called from and when: called by `opencodeRouter_start()`. +- Ends up calling: child-process spawn and localhost router startup. + +### `opencodeRouter_status()` / `opencodeRouter_info()` + +- What they do: report router health, config, and CLI-derived status. +- Called from and when: called from router settings/status UI. +- Ends up calling: localhost router health/config endpoints and router CLI commands. + +### `opencodeRouter_stop()` + +- What it does: stops the router process. +- Called from and when: called from UI stop/recovery actions. +- Ends up calling: child-process kill. + +### `opencodeRouter_config_set()` + +- What it does: mutates router configuration via the router CLI. +- Called from and when: called from router settings UI. +- Ends up calling: router CLI config writes. + +## Workspace State, Files, And Watchers + +Disposition guidance: + +- `workspace_bootstrap()` -> `Split` +- workspace state load/save/repair helpers -> `Split` +- `workspace_create()` -> `Move` +- `ensure_workspace_files()` -> `Move` +- enterprise creator skills seeding helpers -> `Move` +- remote workspace create/update/forget/set-selected/set-runtime-active helpers -> `Split` +- authorized-root and `openwork.json` read/write helpers -> `Move` +- workspace import/export config helpers -> `Move` +- workspace watch/update helpers -> `Move` + +Reasoning: the app should keep only transient selection and reconnect state. The durable registry of servers/workspaces, plus mutation, file writes, config writes, import/export, and reload watching, should move behind the server. + +### `workspace_bootstrap()` + +- What it does: loads persisted workspace state, repairs it, and starts the native file watcher. +- Called from and when: called very early from the UI during app bootstrap. +- Ends up calling: workspace-state reads and watcher setup. + +### `load_workspace_state()`, `load_workspace_state_fast()`, `save_workspace_state()`, `repair_workspace_state()` + +- What they do: persist and normalize the desktop app’s current workspace registry. +- Called from and when: used throughout startup, workspace mutation, and selection flows. +- Ends up calling: `openwork-workspaces.json` reads/writes and path canonicalization; in the ideal model this becomes transitional reconnect/cache state rather than the canonical registry. + +### `workspace_create()` + +- What it does: creates a new local workspace and seeds its initial OpenWork/OpenCode files. +- Called from and when: called from onboarding and create-workspace UI. +- Ends up calling: directory creation, starter file writes, and workspace-state updates. + +### `ensure_workspace_files()` + +- What it does: writes `.opencode` and related starter config files into a workspace. +- Called from and when: called by `workspace_create()`. +- Ends up calling: filesystem writes under the workspace path. + +### `spawn_enterprise_creator_skills_seed()` / `seed_enterprise_creator_skills()` + +- What they do: download and unpack starter skills from GitHub in the background. +- Called from and when: called for starter workspace setup. +- Ends up calling: network download, ZIP extraction, and workspace file writes. + +### `workspace_create_remote()`, `workspace_update_remote()`, `workspace_forget()`, `workspace_set_selected()`, `workspace_set_runtime_active()` + +- What they do: manage the app-side list of local and remote workspaces in the current implementation. +- Called from and when: called from workspace selection, remote connect, and remote disconnect flows. +- Ends up calling: workspace-state mutation and watcher target changes; durable remote workspace persistence should move behind the server. + +### `workspace_add_authorized_root()`, `workspace_openwork_read()`, `workspace_openwork_write()` + +- What they do: manage `.opencode/openwork.json` data for authorized roots and related settings. +- Called from and when: called from settings/permissions UI. +- Ends up calling: workspace-local config file reads and writes; in the ideal model this is server-side materialization into the OpenWork-managed workspace config directory. + +### `workspace_export_config()` / `workspace_import_config()` + +- What they do: export or import portable workspace config bundles. +- Called from and when: called from import/export UI. +- Ends up calling: zip creation/extraction and workspace filesystem mutation. + +### `update_workspace_watch()` / `WorkspaceWatchState` + +- What they do: watch the active workspace root and `.opencode/` for changes that require reload handling. +- Called from and when: watcher is armed at bootstrap and updated when runtime-active workspace changes. +- Ends up calling: native filesystem watching and frontend reload events. + +## Local Config, Commands, And Skills + +Disposition guidance: + +- `read_opencode_config()` / `write_opencode_config()` -> `Move` +- command-file functions -> `Move` +- skill functions -> `Move` +- `copy_dir_recursive()` -> `Move` + +Reasoning: these are explicit workspace/config mutation capabilities and belong to the server. + +### `read_opencode_config()` / `write_opencode_config()` + +- What they do: read and write local or global `opencode.json[c]` files. +- Called from and when: called by settings/config editors and by startup paths that seed project config. +- Ends up calling: config file reads and writes. + +### command-file functions in `command_files.rs` + +- What they do: list, write, and delete local OpenCode command markdown files. +- Called from and when: called from command-management UI. +- Ends up calling: filesystem mutation under `.opencode/commands`. + +### skill functions in `skills.rs` + +- What they do: list, read, write, install, import, and remove local skill directories. +- Called from and when: called from skills UI and local import/install flows. +- Ends up calling: skill directory reads, recursive copies, and deletions. + +### `copy_dir_recursive()` + +- What it does: recursively copies local directories. +- Called from and when: used when importing local skills. +- Ends up calling: filesystem traversal and file copy operations. + +## Dialogs, Windowing, Deep Links, Updater + +Disposition guidance: + +- dialog wrappers -> `Stay` +- `set_window_decorations()` -> `Stay` +- deep-link scheme config and bridge -> `Stay` +- `updater_environment()` -> `Stay` + +Reasoning: these are true desktop-shell concerns. + +### dialog wrappers used by the frontend + +- What they do: open native file/folder/save dialogs. +- Called from and when: called when the user picks folders, files, or export destinations. +- Ends up calling: Tauri dialog plugin and OS-native dialogs. + +### `set_window_decorations()` + +- What it does: toggles native window decorations. +- Called from and when: called from hide-titlebar preference changes. +- Ends up calling: native window APIs. + +### deep-link scheme config and bridge + +- What they do: register `openwork://` and `openwork-dev://`, receive deep links, and forward them into the UI. +- Called from and when: called at OS/app integration time and when deep links are opened. +- Ends up calling: OS URL-handler integration and frontend event routing. + +### `updater_environment()` + +- What it does: detects whether native auto-update is safe/supported on the current install. +- Called from and when: called from startup and settings/about UI. +- Ends up calling: executable/app-bundle path inspection. + +## Scheduler, Reset, Cache, Auth + +Disposition guidance: + +- scheduler commands -> `Move` +- `reset_opencode_cache()` -> `Split` +- `reset_openwork_state()` -> `Split` +- `nuke_openwork_and_opencode_config_and_exit()` -> `Split` +- `opencode_mcp_auth()` -> `Move` +- `app_build_info()` -> `Stay` + +Reasoning: job/MCP/runtime behavior should move to the server, while destructive app reset and app metadata remain partly shell-owned. + +### scheduler commands + +- What they do: inspect and remove scheduled OpenCode jobs. +- Called from and when: called from scheduler UI. +- Ends up calling: scheduler file reads plus OS scheduler removal through `launchctl` or `systemctl --user`. + +### `reset_opencode_cache()` + +- What it does: deletes local OpenCode cache directories. +- Called from and when: called from reset/repair UI. +- Ends up calling: filesystem deletion under XDG/macOS/Windows cache locations. + +### `reset_openwork_state()` + +- What it does: stops managed services and clears OpenWork desktop state. +- Called from and when: called from reset UI. +- Ends up calling: process shutdown and filesystem deletion in app-data/config/cache paths. + +### `nuke_openwork_and_opencode_config_and_exit()` + +- What it does: performs a broader local reset across OpenWork and OpenCode state, then exits. +- Called from and when: called from destructive settings/reset flows. +- Ends up calling: large-scale filesystem deletion and app exit. + +### `opencode_mcp_auth()` + +- What it does: runs `opencode mcp auth ` in an authorized workspace. +- Called from and when: called from MCP authorization UI. +- Ends up calling: local process spawn, path validation, and any browser/OAuth flow the child process triggers. + +### `app_build_info()` + +- What it does: exposes desktop build/version metadata. +- Called from and when: called from about/settings UI. +- Ends up calling: compile-time metadata reads only. + +## Supporting Native Path And Platform Helpers + +Disposition guidance: + +- PATH helpers -> `Stay` +- platform process-launch helpers -> `Stay` +- Tauri capability manifest -> `Stay` + +Reasoning: these are implementation details of the native desktop shell itself. + +### `prepended_path_env()` / `sidecar_path_candidates()` + +- What they do: construct a safe PATH that includes bundled sidecars and common tool locations. +- Called from and when: used when launching engine, orchestrator, router, and similar sidecars. +- Ends up calling: child-process env mutation. + +### platform `command_for_program()` / `configure_hidden()` helpers + +- What they do: normalize native process spawning behavior across Unix and Windows. +- Called from and when: used by diagnostics, sandbox helpers, and auth/process tools. +- Ends up calling: OS-specific process-launch behavior. + +### Tauri capability manifest + +- What it does: grants the desktop app permission to use native dialogs, deep links, updater, HTTP, process, opener, and shell features. +- Called from and when: applied by the Tauri runtime. +- Ends up calling: Tauri capability gating for the whole native shell. + +## Coverage Limits + +- This audit stays focused on code in `apps/desktop`. +- It describes the frontend boundary only when needed to explain who triggers a native call. +- It does not attempt to re-document all downstream behavior inside `apps/server`, `apps/orchestrator`, or OpenCode itself. diff --git a/prds/server-v2-plan/ui-migration.md b/prds/server-v2-plan/ui-migration.md new file mode 100644 index 00000000..f86ff821 --- /dev/null +++ b/prds/server-v2-plan/ui-migration.md @@ -0,0 +1,436 @@ +# Server V2 UI Migration + +## Status: Draft +## Date: 2026-04-14 + +## Purpose + +This document defines how the app and desktop shell should migrate from the current mixed legacy call patterns to the new Server V2 SDK and startup model. + +Current Phase 10 note: + +- The migration rollout flag remains the practical opt-in switch while Server V2 is still being validated. +- Current app routing and desktop startup should continue to use the legacy path by default. +- Server V2 should be enabled explicitly with `OPENWORK_UI_USE_SERVER_V2=1` (or the Vite-exposed equivalent in the frontend build). + +It focuses on one practical requirement: + +- we need to move without breaking the current app +- we need a clean way to opt into the new server path +- we need to avoid sprinkling backend-selection logic across random UI call sites + +## Core Idea + +Yes, the overall migration approach makes sense. + +The right shape is: + +- Server V2 has its own generated SDK +- the current UI remains the default path at first +- a single rollout env var enables the new Server V2 path +- that same rollout decision also controls desktop startup so Tauri launches the new server instead of the old runtime stack +- during migration, each networked capability has two implementations behind one app-facing function: + - current legacy path by default + - new Server V2 path when the rollout flag is enabled + +The important refinement is how much refactoring we require up front. + +We should not block migration on first rewriting the whole UI behind one perfect adapter layer. + +Instead, we should support incremental per-call-site branching with one shared rollout helper. + +## Migration Goals + +- keep the current product working while Server V2 is still incomplete +- let developers boot the whole app against Server V2 with one flag +- make it easy to port one feature slice at a time +- keep UI call sites stable while the transport and runtime ownership change underneath +- make it easy to delete legacy paths once a feature area is fully migrated + +## One Logical Rollout Flag + +Use one logical rollout flag for the migration. + +Recommended behavior: + +- flag off or unset: current app behavior and current desktop runtime startup +- flag on: app routes eligible calls through the new Server V2 adapter path, and desktop startup launches Server V2 instead of the old local stack + +Example conceptual flag: + +```text +OPENWORK_UI_USE_SERVER_V2=1 +``` + +Current implementation note: + +- legacy/default behavior should continue when the flag is unset +- Server V2 should only become the active app + desktop path when this flag is explicitly enabled +- do not remove this flag until the old path is intentionally retired + +Implementation note: + +- the frontend build may still need a platform-specific env bridge such as a Vite-exposed variable +- that should still be treated as one logical rollout flag, not as two separate product switches + +## Main Rule: Use One Shared Flag Helper + +The migration can look like this in many places during rollout: + +```ts +const isServerV2 = checkServerFlag() + +if (isServerV2) { + // new server path +} else { + // old path +} +``` + +That is acceptable for this migration. + +Instead: + +- use one shared helper such as `checkServerFlag()` or `isServerV2Enabled()` +- avoid reading raw env state differently in many places +- branch at the existing call site when that is the lowest-risk migration path +- only introduce a deeper adapter when it clearly reduces repeated migration logic + +This keeps the migration practical without forcing a whole-app client refactor before the server migration can begin. + +## Recommended Helper Shape + +Recommended minimal shape: + +```text +apps/app/src/app/kernel/server-version/ +├── flag.ts # shared UI-side rollout helper +├── sdk.ts # Server V2 client creation helpers +└── index.ts + +apps/desktop/src-tauri/src/ +└── openwork_server/ + └── startup_mode.rs # desktop startup branch for old vs Server V2 +``` + +Example UI-side helper shape: + +```ts +export function checkServerFlag() { + return import.meta.env.VITE_OPENWORK_UI_USE_SERVER_V2 === "1" +} +``` + +Example usage at an existing call site: + +```ts +const isServerV2 = checkServerFlag() + +if (isServerV2) { + return createSdk({ serverId }).workspaces.list() +} + +return listWorkspacesLegacy() +``` + +Example desktop-side shape: + +```rust +if check_server_v2_flag() { + start_server_v2(...) +} else { + start_legacy_runtime(...) +} +``` + +Notes: + +- exact filenames can change, but the flag helper should live in one obvious shell or kernel location +- the helper should return a boolean and hide platform-specific env lookup details +- Tauri should follow the same logical flag, even if its implementation reads that value through a different runtime or build-time path +- `createSdk({ serverId })` should sit near the same shell or kernel area so the new-server branch is easy to reuse + +## Desired Layering + +Recommended split: + +- `packages/openwork-server-sdk` + - generated SDK for Server V2 only +- `apps/app/.../checkServerFlag.ts` or equivalent + - one shared helper for rollout checks +- `apps/app/.../createSdk.ts` + - app-owned runtime adapter for places that do use Server V2 calls +- `apps/app/.../legacy/` + - thin handwritten legacy fetch/client helpers for flows not yet migrated +- `apps/desktop/src-tauri/**` + - startup and runtime wiring that chooses whether to launch the old stack or the new server + +The stable rule is: + +- generated SDK for new server +- one shared rollout helper for migration checks +- thin legacy shims for old server +- reuse existing call sites where that is cheaper than introducing a new abstraction first + +## What Counts As A Migration Target + +The UI currently reaches behavior through several kinds of call sites. + +We should treat all of these as migration targets: + +- direct fetches to current OpenWork server endpoints +- direct calls to handwritten server client helpers +- Tauri commands that really represent server or runtime capabilities +- startup and reconnect flows that assemble local runtime pieces in the desktop shell +- local config/file mutations that exist only because the server does not own that capability yet + +In other words, the migration is not just about replacing `fetch()` calls. + +It also includes shrinking Tauri-owned runtime orchestration and local workspace mutation paths. + +It is fine if some migrated places switch to SDK calls while others still branch between raw endpoints, local files, or Tauri commands during the rollout period. + +## Dual-Path Pattern Per Capability + +For each capability being migrated, use the lightest-weight branch that gets the job done safely. + +Sometimes that will be a stable exported domain function. + +Sometimes that will simply be an existing function or store action updated to branch on the shared rollout helper. + +Example shape: + +```ts +const isServerV2 = checkServerFlag() + +export async function listWorkspaceSessions(args: ListWorkspaceSessionsArgs) { + if (isServerV2) { + return createSdk({ serverId }).sessions.list({ workspaceId: args.workspaceId }) + } + + return listWorkspaceSessionsLegacy(args) +} +``` + +Rules: + +- prefer a shared helper instead of repeated raw env lookups +- if the code already has a natural function boundary, branch there +- the Server V2 branch should use the generated SDK where possible +- the legacy branch should remain thin and isolated +- the function should normalize outputs so the rest of the UI sees one stable shape + +This gives us one place to remove the legacy path later, without requiring that every feature be fully re-layered first. + +## Startup Migration Pattern + +The rollout flag should affect both app request routing and desktop startup. + +### Flag Off + +- keep the current startup graph +- keep the current app network and Tauri paths +- keep legacy behavior as the safe default + +### Flag On + +- desktop starts Server V2 instead of assembling the old local runtime stack +- app connects to Server V2 using the normal port and token model +- eligible app capability adapters call Server V2 through its SDK +- legacy fallbacks remain only for capability areas not yet ported + +This matters because we do not just want a new SDK. + +We want the app to experience the new ownership model end to end. + +## App Adapter Strategy + +The app-facing shape should stay consistent with the rest of the plan: + +```ts +await createSdk({ serverId }).sessions.listMessages({ workspaceId, sessionId }) +``` + +Recommended responsibilities for `createSdk({ serverId })` during migration: + +- prepare the Server V2 SDK client for code paths that are on the new server +- create the Server V2 SDK client when that path is enabled +- hide base URL and token handling from feature code + +It should not: + +- expose raw `baseUrl` or `token` handling to domain code +- force the whole app to migrate behind one adapter before we can ship any Server V2 path + +Important clarification: + +- `createSdk({ serverId })` is for the new-server branch +- it does not need to absorb every legacy code path immediately +- some areas can branch locally and only use `createSdk({ serverId })` inside the Server V2 branch + +## Desktop / Tauri Strategy + +The same rollout decision should control desktop-native startup. + +Recommended behavior: + +- when the flag is off, the desktop shell uses the current startup path +- when the flag is on, the desktop shell launches Server V2 and connects the app to it + +That means the desktop migration should be treated as part of the same UI migration, not as a separate unrelated effort. + +Practical effect: + +- the UI should not talk to a legacy startup stack while pretending it is on the new server model +- if the flag says Server V2, startup should align with that choice as much as possible + +## Where To Put The Branches + +Recommended branching layers: + +### 1. Shell-level rollout resolver + +- parse the env var once +- expose a small `checkServerFlag()` or equivalent helper + +### 2. Desktop startup boundary + +- choose old runtime startup versus Server V2 startup + +### 3. Existing feature call sites or domain helpers + +- choose legacy versus Server V2 implementation for each migrated capability +- use the shared rollout helper rather than ad hoc env parsing + +### 4. Legacy compatibility helpers + +- preserve old transport details in a quarantined area until they can be deleted + +Not recommended: + +- reading env vars directly in many different ways +- large pre-migration refactors whose only purpose is to create a perfect adapter layer +- mixing transport selection with rendering logic when a nearby helper or action can own the branch instead + +## Migration Order + +Recommended order: + +### Phase 1: Introduce the rollout boundary + +- add the logical rollout flag +- add one shared rollout resolver +- add desktop startup branching for old versus new server launch + +Success criteria: + +- one switch can boot the app in legacy mode or Server V2 mode +- migrated call sites can switch behavior using one shared helper without duplicating env parsing logic + +### Phase 2: Wrap existing calls before porting behavior + +- inventory current direct fetches, client helpers, and Tauri commands that represent server behavior +- add branches only where needed for the next migration slice +- keep behavior unchanged at first unless the branch is explicitly being ported + +Success criteria: + +- the next feature slice can migrate without a large unrelated app refactor +- rollout logic is centralized in a shared helper even if branching still happens in many places + +### Phase 3: Port read paths first + +- move status, discovery, workspace list, session list, and similar read-heavy surfaces first +- use the generated Server V2 SDK for the new path +- keep legacy fallback until confidence is high + +Success criteria: + +- the app can render selected surfaces fully from Server V2 with the flag enabled + +### Phase 4: Port write and workflow paths + +- move mutations, workspace creation flows, config mutation, share/import/export flows, and runtime control flows in slices +- keep transport normalization as close to the migration branch as practical + +Success criteria: + +- whole workflows succeed in Server V2 mode without Tauri-only fallback paths for the migrated areas + +### Phase 5: Flip the default + +- once the new path is credible, make Server V2 the default +- keep a temporary escape hatch for the old path only if needed for rollback + +### Phase 6: Delete legacy paths + +- remove legacy helper modules +- remove old startup branching +- remove the rollout flag once it is no longer needed + +## Normalization Rule + +During migration, old and new backends may not return identical shapes at first. + +The UI should not absorb those differences directly. + +Instead: + +- normalize old and new responses inside the adapter layer +- return one stable shape to the rest of the UI +- keep temporary compatibility code close to the transport boundary + +This is especially important for: + +- workspace summaries +- session list/detail payloads +- runtime status and health payloads +- settings and config mutation responses + +## What Should Stay Local In The UI + +The migration does not mean everything moves out of the app. + +These should still stay UI-local: + +- modal state +- routing and presentation state +- draft text and transient form state +- attachment preprocessing before upload +- client-only search, scrolling, and render performance logic +- clipboard and opener helpers + +The migration target is server and runtime ownership, not the removal of normal UI state. + +## Guardrails + +- default to legacy behavior until a feature slice is proven +- use one shared rollout helper for all migration checks +- never require feature code to pass around raw server URLs and tokens +- do not dual-write unless there is no safer option +- keep the legacy path isolated and deletable +- prefer deleting a migrated legacy branch quickly instead of letting both paths fossilize +- do not treat Tauri-only filesystem mutation as a permanent parallel capability set + +## Relationship To Other Docs + +- `prds/server-v2-plan/plan.md` + - overall server migration phases +- `prds/server-v2-plan/sdk-generation.md` + - generated SDK plus legacy compatibility split +- `prds/server-v2-plan/app-audit.md` + - UI-owned areas and migration targets inside the app +- `prds/server-v2-plan/tauri-audit.md` + - desktop-native startup and runtime responsibilities that should shrink over time + +## Decision Summary + +The migration model should be: + +- one logical env var controls whether the app is in legacy mode or Server V2 mode +- that same rollout decision affects both UI network routing and desktop startup +- the new server uses its own generated SDK +- each migrated area can branch locally using one shared rollout helper +- `createSdk({ serverId })` powers the Server V2 path without requiring a whole-app adapter refactor first + +That is the safest way to migrate the UI incrementally without locking the app into a permanent dual-architecture mess. diff --git a/scripts/check-server-v2-contract.mjs b/scripts/check-server-v2-contract.mjs new file mode 100644 index 00000000..f0a0fc61 --- /dev/null +++ b/scripts/check-server-v2-contract.mjs @@ -0,0 +1,43 @@ +import { spawn } from "node:child_process"; +import path from "node:path"; +import process from "node:process"; +import { fileURLToPath } from "node:url"; + +const scriptDir = path.dirname(fileURLToPath(import.meta.url)); +const repoDir = path.resolve(scriptDir, ".."); + +function run(command, args) { + return new Promise((resolve, reject) => { + const child = spawn(command, args, { + cwd: repoDir, + env: process.env, + stdio: "inherit", + }); + + child.once("error", reject); + child.once("exit", (code, signal) => { + if (code === 0) { + resolve(); + return; + } + + reject(new Error(`${command} ${args.join(" ")} failed with ${signal ? `signal ${signal}` : `exit code ${code ?? 1}`}`)); + }); + }); +} + +async function main() { + await run("pnpm", ["run", "sdk:generate"]); + await run("git", [ + "diff", + "--exit-code", + "--", + "apps/server-v2/openapi/openapi.json", + "packages/openwork-server-sdk/generated", + ]); +} + +main().catch((error) => { + process.stderr.write(`${error instanceof Error ? error.stack ?? error.message : String(error)}\n`); + process.exit(1); +}); diff --git a/scripts/dev-server-v2.mjs b/scripts/dev-server-v2.mjs new file mode 100644 index 00000000..b84972d2 --- /dev/null +++ b/scripts/dev-server-v2.mjs @@ -0,0 +1,96 @@ +import { spawn } from "node:child_process"; +import process from "node:process"; + +const includeApp = !process.argv.includes("--no-app"); + +function run(command, args) { + return new Promise((resolve, reject) => { + const child = spawn(command, args, { + stdio: "inherit", + env: process.env, + cwd: process.cwd(), + }); + + child.once("error", reject); + child.once("exit", (code, signal) => { + if (code === 0) { + resolve(); + return; + } + + reject(new Error(`${command} ${args.join(" ")} exited with ${code ?? signal ?? "unknown status"}`)); + }); + }); +} + +const commands = [ + { + name: "server", + args: ["--filter", "openwork-server-v2", "dev"], + }, + { + name: "openapi", + args: ["--filter", "openwork-server-v2", "openapi:watch"], + }, + { + name: "sdk", + args: ["--filter", "@openwork/server-sdk", "watch"], + }, +]; + +if (includeApp) { + commands.push({ + name: "app", + args: ["dev:ui"], + }); +} + +const children = []; +let shuttingDown = false; + +function stopAll(exitCode = 0) { + if (shuttingDown) { + return; + } + + shuttingDown = true; + + for (const child of children) { + if (child.exitCode === null) { + child.kill("SIGTERM"); + } + } + + setTimeout(() => { + process.exit(exitCode); + }, 100); +} + +for (const signal of ["SIGINT", "SIGTERM"]) { + process.on(signal, () => stopAll(0)); +} + +async function main() { + await run("pnpm", ["run", "sdk:generate"]); + + for (const command of commands) { + const child = spawn("pnpm", command.args, { + stdio: "inherit", + env: process.env, + cwd: process.cwd(), + }); + children.push(child); + child.once("exit", (code, signal) => { + if (shuttingDown) { + return; + } + const exitCode = code ?? (signal ? 1 : 0); + stopAll(exitCode); + }); + } +} + +main().catch((error) => { + process.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + process.exit(1); +});