diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..333d6134 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +.git +.github +.opencode +node_modules +**/node_modules +tmp +dist +**/dist +.env +.env.* diff --git a/.github/workflows/deploy-den-v2.yml b/.github/workflows/deploy-den-v2.yml new file mode 100644 index 00000000..911941e4 --- /dev/null +++ b/.github/workflows/deploy-den-v2.yml @@ -0,0 +1,394 @@ +name: Deploy Den v2 + +on: + push: + branches: + - dev + paths: + - "services/den-v2/**" + - "packages/den-db/**" + - "packages/utils/**" + - ".github/workflows/deploy-den-v2.yml" + workflow_dispatch: + inputs: + render_service_id: + description: "Optional Render service id override for test/staging deploys" + required: false + type: string + +permissions: + contents: read + +concurrency: + group: deploy-den-v2-${{ github.ref }} + cancel-in-progress: true + +jobs: + deploy: + runs-on: ubuntu-latest + if: github.repository == 'different-ai/openwork' + steps: + - name: Validate required secrets + env: + RENDER_API_KEY: ${{ secrets.RENDER_API_KEY }} + RENDER_DEN_CONTROL_PLANE_SERVICE_ID: ${{ inputs.render_service_id || 'srv-d6sajsua2pns7383mis0' }} + RENDER_OWNER_ID: ${{ secrets.RENDER_OWNER_ID }} + DEN_DATABASE_URL: ${{ secrets.DEN_DATABASE_URL || secrets.DATABASE_URL }} + DEN_DATABASE_HOST: ${{ secrets.DEN_DATABASE_HOST || secrets.DATABASE_HOST }} + DEN_DATABASE_USERNAME: ${{ secrets.DEN_DATABASE_USERNAME || secrets.DATABASE_USERNAME }} + DEN_DATABASE_PASSWORD: ${{ secrets.DEN_DATABASE_PASSWORD || secrets.DATABASE_PASSWORD }} + DEN_BETTER_AUTH_SECRET: ${{ secrets.DEN_BETTER_AUTH_SECRET }} + DEN_GITHUB_CLIENT_ID: ${{ secrets.DEN_GITHUB_CLIENT_ID }} + DEN_GITHUB_CLIENT_SECRET: ${{ secrets.DEN_GITHUB_CLIENT_SECRET }} + DEN_GOOGLE_CLIENT_ID: ${{ secrets.DEN_GOOGLE_CLIENT_ID }} + DEN_GOOGLE_CLIENT_SECRET: ${{ secrets.DEN_GOOGLE_CLIENT_SECRET }} + DAYTONA_API_KEY: ${{ secrets.DAYTONA_API_KEY }} + POLAR_ACCESS_TOKEN: ${{ secrets.POLAR_ACCESS_TOKEN }} + POLAR_PRODUCT_ID: ${{ secrets.POLAR_PRODUCT_ID }} + POLAR_BENEFIT_ID: ${{ secrets.POLAR_BENEFIT_ID }} + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + DEN_PROVISIONER_MODE: ${{ vars.DEN_PROVISIONER_MODE }} + DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: ${{ vars.DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX }} + DEN_DAYTONA_WORKER_PROXY_BASE_URL: ${{ vars.DEN_DAYTONA_WORKER_PROXY_BASE_URL }} + DEN_POLAR_FEATURE_GATE_ENABLED: ${{ vars.DEN_POLAR_FEATURE_GATE_ENABLED }} + run: | + missing=0 + for key in RENDER_API_KEY RENDER_DEN_CONTROL_PLANE_SERVICE_ID RENDER_OWNER_ID DEN_BETTER_AUTH_SECRET; do + if [ -z "${!key}" ]; then + echo "::error::Missing required secret: $key" + missing=1 + fi + done + + if [ -z "$DEN_DATABASE_URL" ]; then + for key in DEN_DATABASE_HOST DEN_DATABASE_USERNAME DEN_DATABASE_PASSWORD; do + if [ -z "${!key}" ]; then + echo "::error::Missing required database secret: $key (required when DEN_DATABASE_URL is not set)" + missing=1 + fi + done + fi + + vanity_suffix="${DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX:-openwork.studio}" + if [ -n "$vanity_suffix" ] && [ -z "$VERCEL_TOKEN" ]; then + echo "::error::Missing required secret: VERCEL_TOKEN (required when vanity domains are enabled)" + missing=1 + fi + + feature_enabled="${DEN_POLAR_FEATURE_GATE_ENABLED:-false}" + feature_enabled="$(echo "$feature_enabled" | tr '[:upper:]' '[:lower:]')" + provisioner_mode="${DEN_PROVISIONER_MODE:-daytona}" + provisioner_mode="$(echo "$provisioner_mode" | tr '[:upper:]' '[:lower:]')" + + if [ "$provisioner_mode" = "daytona" ]; then + if [ -z "$DAYTONA_API_KEY" ]; then + echo "::error::Missing required secret: DAYTONA_API_KEY (required when DEN_PROVISIONER_MODE=daytona)" + missing=1 + fi + + if [ -z "$DEN_DAYTONA_WORKER_PROXY_BASE_URL" ]; then + echo "::error::Missing required variable: DEN_DAYTONA_WORKER_PROXY_BASE_URL (required when DEN_PROVISIONER_MODE=daytona)" + missing=1 + fi + fi + + if [ "$feature_enabled" = "true" ]; then + for key in POLAR_ACCESS_TOKEN POLAR_PRODUCT_ID POLAR_BENEFIT_ID; do + if [ -z "${!key}" ]; then + echo "::error::Missing required paywall secret: $key" + missing=1 + fi + done + fi + + if [ -n "$DEN_GITHUB_CLIENT_ID" ] && [ -z "$DEN_GITHUB_CLIENT_SECRET" ]; then + echo "::error::Missing required secret: DEN_GITHUB_CLIENT_SECRET (required when DEN_GITHUB_CLIENT_ID is set)" + missing=1 + fi + + if [ -n "$DEN_GITHUB_CLIENT_SECRET" ] && [ -z "$DEN_GITHUB_CLIENT_ID" ]; then + echo "::error::Missing required secret: DEN_GITHUB_CLIENT_ID (required when DEN_GITHUB_CLIENT_SECRET is set)" + missing=1 + fi + + if [ -n "$DEN_GOOGLE_CLIENT_ID" ] && [ -z "$DEN_GOOGLE_CLIENT_SECRET" ]; then + echo "::error::Missing required secret: DEN_GOOGLE_CLIENT_SECRET (required when DEN_GOOGLE_CLIENT_ID is set)" + missing=1 + fi + + if [ -n "$DEN_GOOGLE_CLIENT_SECRET" ] && [ -z "$DEN_GOOGLE_CLIENT_ID" ]; then + echo "::error::Missing required secret: DEN_GOOGLE_CLIENT_ID (required when DEN_GOOGLE_CLIENT_SECRET is set)" + missing=1 + fi + + if [ "$missing" -ne 0 ]; then + exit 1 + fi + + - name: Sync Render env vars and deploy latest commit + env: + RENDER_API_KEY: ${{ secrets.RENDER_API_KEY }} + RENDER_DEN_CONTROL_PLANE_SERVICE_ID: ${{ inputs.render_service_id || 'srv-d6sajsua2pns7383mis0' }} + RENDER_OWNER_ID: ${{ secrets.RENDER_OWNER_ID }} + DEN_DATABASE_URL: ${{ secrets.DEN_DATABASE_URL || secrets.DATABASE_URL }} + DEN_DATABASE_HOST: ${{ secrets.DEN_DATABASE_HOST || secrets.DATABASE_HOST }} + DEN_DATABASE_USERNAME: ${{ secrets.DEN_DATABASE_USERNAME || secrets.DATABASE_USERNAME }} + DEN_DATABASE_PASSWORD: ${{ secrets.DEN_DATABASE_PASSWORD || secrets.DATABASE_PASSWORD }} + DEN_BETTER_AUTH_SECRET: ${{ secrets.DEN_BETTER_AUTH_SECRET }} + DEN_GITHUB_CLIENT_ID: ${{ secrets.DEN_GITHUB_CLIENT_ID }} + DEN_GITHUB_CLIENT_SECRET: ${{ secrets.DEN_GITHUB_CLIENT_SECRET }} + DEN_GOOGLE_CLIENT_ID: ${{ secrets.DEN_GOOGLE_CLIENT_ID }} + DEN_GOOGLE_CLIENT_SECRET: ${{ secrets.DEN_GOOGLE_CLIENT_SECRET }} + DAYTONA_API_KEY: ${{ secrets.DAYTONA_API_KEY }} + DEN_BETTER_AUTH_URL: ${{ vars.DEN_BETTER_AUTH_URL }} + DEN_PROVISIONER_MODE: ${{ vars.DEN_PROVISIONER_MODE }} + DEN_RENDER_WORKER_PLAN: ${{ vars.DEN_RENDER_WORKER_PLAN }} + DEN_RENDER_WORKER_OPENWORK_VERSION: ${{ vars.DEN_RENDER_WORKER_OPENWORK_VERSION }} + DEN_CORS_ORIGINS: ${{ vars.DEN_CORS_ORIGINS }} + DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: ${{ vars.DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX }} + DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS: ${{ vars.DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS }} + DEN_VERCEL_API_BASE: ${{ vars.DEN_VERCEL_API_BASE }} + DEN_VERCEL_TEAM_ID: ${{ vars.DEN_VERCEL_TEAM_ID }} + DEN_VERCEL_TEAM_SLUG: ${{ vars.DEN_VERCEL_TEAM_SLUG }} + DEN_VERCEL_DNS_DOMAIN: ${{ vars.DEN_VERCEL_DNS_DOMAIN }} + DEN_DAYTONA_API_URL: ${{ vars.DEN_DAYTONA_API_URL }} + DEN_DAYTONA_TARGET: ${{ vars.DEN_DAYTONA_TARGET }} + DEN_DAYTONA_SNAPSHOT: ${{ vars.DEN_DAYTONA_SNAPSHOT }} + DEN_DAYTONA_WORKER_PROXY_BASE_URL: ${{ vars.DEN_DAYTONA_WORKER_PROXY_BASE_URL }} + DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: ${{ vars.DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS }} + DEN_DAYTONA_OPENWORK_VERSION: ${{ vars.DEN_DAYTONA_OPENWORK_VERSION }} + DEN_POLAR_FEATURE_GATE_ENABLED: ${{ vars.DEN_POLAR_FEATURE_GATE_ENABLED }} + DEN_POLAR_API_BASE: ${{ vars.DEN_POLAR_API_BASE }} + DEN_POLAR_SUCCESS_URL: ${{ vars.DEN_POLAR_SUCCESS_URL }} + DEN_POLAR_RETURN_URL: ${{ vars.DEN_POLAR_RETURN_URL }} + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + POLAR_ACCESS_TOKEN: ${{ secrets.POLAR_ACCESS_TOKEN }} + POLAR_PRODUCT_ID: ${{ secrets.POLAR_PRODUCT_ID }} + POLAR_BENEFIT_ID: ${{ secrets.POLAR_BENEFIT_ID }} + run: | + python3 <<'PY' + import json + import os + import time + import urllib.error + import urllib.parse + import urllib.request + + api_key = os.environ["RENDER_API_KEY"] + service_id = os.environ["RENDER_DEN_CONTROL_PLANE_SERVICE_ID"] + owner_id = os.environ["RENDER_OWNER_ID"] + openwork_version = os.environ.get("DEN_RENDER_WORKER_OPENWORK_VERSION") + worker_plan = os.environ.get("DEN_RENDER_WORKER_PLAN") or "standard" + provisioner_mode = (os.environ.get("DEN_PROVISIONER_MODE") or "daytona").strip().lower() or "daytona" + configured_cors_origins = os.environ.get("DEN_CORS_ORIGINS") or "" + worker_public_domain_suffix = os.environ.get("DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX") or "openwork.studio" + custom_domain_ready_timeout_ms = os.environ.get("DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS") or "240000" + vercel_api_base = os.environ.get("DEN_VERCEL_API_BASE") or "https://api.vercel.com" + vercel_team_id = os.environ.get("DEN_VERCEL_TEAM_ID") or "" + vercel_team_slug = os.environ.get("DEN_VERCEL_TEAM_SLUG") or "prologe" + vercel_dns_domain = os.environ.get("DEN_VERCEL_DNS_DOMAIN") or worker_public_domain_suffix + vercel_token = os.environ.get("VERCEL_TOKEN") or "" + daytona_api_url = os.environ.get("DEN_DAYTONA_API_URL") or "https://app.daytona.io/api" + daytona_api_key = os.environ.get("DAYTONA_API_KEY") or "" + daytona_target = os.environ.get("DEN_DAYTONA_TARGET") or "" + daytona_snapshot = os.environ.get("DEN_DAYTONA_SNAPSHOT") or "" + daytona_worker_proxy_base_url = os.environ.get("DEN_DAYTONA_WORKER_PROXY_BASE_URL") or "" + daytona_signed_preview_expires_seconds = os.environ.get("DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS") or "86400" + daytona_openwork_version = os.environ.get("DEN_DAYTONA_OPENWORK_VERSION") or "" + paywall_enabled = (os.environ.get("DEN_POLAR_FEATURE_GATE_ENABLED") or "false").lower() == "true" + polar_api_base = os.environ.get("DEN_POLAR_API_BASE") or "https://api.polar.sh" + polar_success_url = os.environ.get("DEN_POLAR_SUCCESS_URL") or "https://app.openwork.software" + polar_return_url = os.environ.get("DEN_POLAR_RETURN_URL") or polar_success_url + polar_access_token = os.environ.get("POLAR_ACCESS_TOKEN") or "" + polar_product_id = os.environ.get("POLAR_PRODUCT_ID") or "" + polar_benefit_id = os.environ.get("POLAR_BENEFIT_ID") or "" + github_client_id = os.environ.get("DEN_GITHUB_CLIENT_ID") or "" + github_client_secret = os.environ.get("DEN_GITHUB_CLIENT_SECRET") or "" + google_client_id = os.environ.get("DEN_GOOGLE_CLIENT_ID") or "" + google_client_secret = os.environ.get("DEN_GOOGLE_CLIENT_SECRET") or "" + better_auth_url = os.environ.get("DEN_BETTER_AUTH_URL") or "https://app.openwork.software" + + if bool(github_client_id) != bool(github_client_secret): + raise RuntimeError( + "DEN_GITHUB_CLIENT_ID and DEN_GITHUB_CLIENT_SECRET must either both be set or both be empty" + ) + + if bool(google_client_id) != bool(google_client_secret): + raise RuntimeError( + "DEN_GOOGLE_CLIENT_ID and DEN_GOOGLE_CLIENT_SECRET must either both be set or both be empty" + ) + + def validate_redirect_url(name: str, value: str): + parsed = urllib.parse.urlparse(value) + if parsed.scheme not in {"http", "https"} or not parsed.netloc: + raise RuntimeError(f"{name} must be an absolute http(s) URL, got: {value}") + + validate_redirect_url("DEN_POLAR_SUCCESS_URL", polar_success_url) + validate_redirect_url("DEN_POLAR_RETURN_URL", polar_return_url) + validate_redirect_url("DEN_BETTER_AUTH_URL", better_auth_url) + + if provisioner_mode == "daytona": + if not daytona_api_key: + raise RuntimeError("DEN_PROVISIONER_MODE=daytona requires DAYTONA_API_KEY") + if not daytona_worker_proxy_base_url: + raise RuntimeError("DEN_PROVISIONER_MODE=daytona requires DEN_DAYTONA_WORKER_PROXY_BASE_URL") + validate_redirect_url("DEN_DAYTONA_WORKER_PROXY_BASE_URL", daytona_worker_proxy_base_url) + + if paywall_enabled and (not polar_access_token or not polar_product_id or not polar_benefit_id): + raise RuntimeError( + "DEN_POLAR_FEATURE_GATE_ENABLED=true requires POLAR_ACCESS_TOKEN, POLAR_PRODUCT_ID, and POLAR_BENEFIT_ID" + ) + + def normalize_origin(value: str) -> str: + trimmed = value.strip() + if trimmed == "*": + return trimmed + return trimmed.rstrip("/") + + def build_cors_origins(raw: str, defaults: list[str]) -> str: + candidates: list[str] = [] + if raw.strip(): + candidates.extend(raw.split(",")) + candidates.extend(defaults) + + seen = set() + normalized = [] + for value in candidates: + origin = normalize_origin(value) + if not origin or origin in seen: + continue + seen.add(origin) + normalized.append(origin) + + if not normalized: + raise RuntimeError("Unable to derive CORS_ORIGINS for Den deployment") + + return ",".join(normalized) + + headers = { + "Authorization": f"Bearer {api_key}", + "Accept": "application/json", + "Content-Type": "application/json", + } + + def request(method: str, path: str, body=None): + url = f"https://api.render.com/v1{path}" + data = None + if body is not None: + data = json.dumps(body).encode("utf-8") + req = urllib.request.Request(url, data=data, method=method, headers=headers) + try: + with urllib.request.urlopen(req, timeout=60) as resp: + text = resp.read().decode("utf-8") + return resp.status, json.loads(text) if text else None + except urllib.error.HTTPError as err: + text = err.read().decode("utf-8", "replace") + raise RuntimeError(f"{method} {path} failed ({err.code}): {text[:600]}") + + _, service = request("GET", f"/services/{service_id}") + service_url = (service.get("serviceDetails") or {}).get("url") + if not service_url: + raise RuntimeError(f"Render service {service_id} has no public URL") + + cors_origins = build_cors_origins( + configured_cors_origins, + [ + "https://app.openwork.software", + "https://api.openwork.software", + service_url, + ], + ) + + env_vars = [ + {"key": "BETTER_AUTH_SECRET", "value": os.environ["DEN_BETTER_AUTH_SECRET"]}, + {"key": "BETTER_AUTH_URL", "value": better_auth_url}, + {"key": "GITHUB_CLIENT_ID", "value": github_client_id}, + {"key": "GITHUB_CLIENT_SECRET", "value": github_client_secret}, + {"key": "GOOGLE_CLIENT_ID", "value": google_client_id}, + {"key": "GOOGLE_CLIENT_SECRET", "value": google_client_secret}, + {"key": "CORS_ORIGINS", "value": cors_origins}, + {"key": "PROVISIONER_MODE", "value": provisioner_mode}, + {"key": "RENDER_API_BASE", "value": "https://api.render.com/v1"}, + {"key": "RENDER_API_KEY", "value": api_key}, + {"key": "RENDER_OWNER_ID", "value": owner_id}, + {"key": "RENDER_WORKER_REPO", "value": "https://github.com/different-ai/openwork"}, + {"key": "RENDER_WORKER_BRANCH", "value": "dev"}, + {"key": "RENDER_WORKER_ROOT_DIR", "value": "services/den-worker-runtime"}, + {"key": "RENDER_WORKER_PLAN", "value": worker_plan}, + {"key": "RENDER_WORKER_REGION", "value": "oregon"}, + {"key": "RENDER_WORKER_NAME_PREFIX", "value": "den-worker-openwork"}, + {"key": "RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX", "value": worker_public_domain_suffix}, + {"key": "RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS", "value": custom_domain_ready_timeout_ms}, + {"key": "RENDER_PROVISION_TIMEOUT_MS", "value": "900000"}, + {"key": "RENDER_HEALTHCHECK_TIMEOUT_MS", "value": "180000"}, + {"key": "RENDER_POLL_INTERVAL_MS", "value": "5000"}, + {"key": "VERCEL_API_BASE", "value": vercel_api_base}, + {"key": "VERCEL_TOKEN", "value": vercel_token}, + {"key": "VERCEL_TEAM_ID", "value": vercel_team_id}, + {"key": "VERCEL_TEAM_SLUG", "value": vercel_team_slug}, + {"key": "VERCEL_DNS_DOMAIN", "value": vercel_dns_domain}, + {"key": "POLAR_FEATURE_GATE_ENABLED", "value": "true" if paywall_enabled else "false"}, + {"key": "POLAR_API_BASE", "value": polar_api_base}, + {"key": "POLAR_ACCESS_TOKEN", "value": polar_access_token}, + {"key": "POLAR_PRODUCT_ID", "value": polar_product_id}, + {"key": "POLAR_BENEFIT_ID", "value": polar_benefit_id}, + {"key": "POLAR_SUCCESS_URL", "value": polar_success_url}, + {"key": "POLAR_RETURN_URL", "value": polar_return_url}, + ] + + database_url = os.environ.get("DEN_DATABASE_URL") or "" + database_host = os.environ.get("DEN_DATABASE_HOST") or "" + database_username = os.environ.get("DEN_DATABASE_USERNAME") or "" + database_password = os.environ.get("DEN_DATABASE_PASSWORD") or "" + + if database_url: + env_vars.append({"key": "DATABASE_URL", "value": database_url}) + else: + env_vars.extend( + [ + {"key": "DATABASE_HOST", "value": database_host}, + {"key": "DATABASE_USERNAME", "value": database_username}, + {"key": "DATABASE_PASSWORD", "value": database_password}, + ] + ) + + if provisioner_mode == "daytona": + env_vars.extend( + [ + {"key": "DAYTONA_API_URL", "value": daytona_api_url}, + {"key": "DAYTONA_API_KEY", "value": daytona_api_key}, + {"key": "DAYTONA_TARGET", "value": daytona_target}, + {"key": "DAYTONA_SNAPSHOT", "value": daytona_snapshot}, + {"key": "DAYTONA_WORKER_PROXY_BASE_URL", "value": daytona_worker_proxy_base_url}, + {"key": "DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS", "value": daytona_signed_preview_expires_seconds}, + ] + ) + + if daytona_openwork_version: + env_vars.append({"key": "DAYTONA_OPENWORK_VERSION", "value": daytona_openwork_version}) + + if openwork_version: + env_vars.append({"key": "RENDER_WORKER_OPENWORK_VERSION", "value": openwork_version}) + + request("PUT", f"/services/{service_id}/env-vars", env_vars) + _, deploy = request("POST", f"/services/{service_id}/deploys", {}) + deploy_id = deploy.get("id") or (deploy.get("deploy") or {}).get("id") + if not deploy_id: + raise RuntimeError(f"Unexpected deploy response: {deploy}") + + terminal = {"live", "update_failed", "build_failed", "canceled"} + started = time.time() + + while time.time() - started < 1800: + _, deploys = request("GET", f"/services/{service_id}/deploys?limit=1") + latest = deploys[0]["deploy"] if deploys else None + if latest and latest.get("id") == deploy_id and latest.get("status") in terminal: + status = latest.get("status") + if status != "live": + raise RuntimeError(f"Render deploy {deploy_id} ended with {status}") + print(f"Render deploy {deploy_id} is live at {service_url}") + break + time.sleep(10) + else: + raise RuntimeError(f"Timed out waiting for deploy {deploy_id}") + PY diff --git a/.github/workflows/deploy-den.yml b/.github/workflows/deploy-den.yml new file mode 100644 index 00000000..52087e30 --- /dev/null +++ b/.github/workflows/deploy-den.yml @@ -0,0 +1,17 @@ +name: Deploy Den (disabled) + +on: + workflow_dispatch: + +jobs: + disabled: + if: false + runs-on: ubuntu-latest + steps: + - name: Disabled + run: | + echo "deploy-den.yml is intentionally disabled" + +# Historical workflow intentionally commented out. +# The previous Render deployment automation for the legacy Den service +# should not run anymore. diff --git a/packages/den-db/.env.example b/packages/den-db/.env.example new file mode 100644 index 00000000..346af836 --- /dev/null +++ b/packages/den-db/.env.example @@ -0,0 +1,10 @@ +# MySQL mode: if DATABASE_URL is set, den-db uses mysql/mysql2. +DATABASE_URL= + +# PlanetScale mode: used when DATABASE_URL is not set. +DATABASE_HOST= +DATABASE_USERNAME= +DATABASE_PASSWORD= + +# Optional explicit env file path for Drizzle commands. +# OPENWORK_DEN_DB_ENV_PATH=/absolute/path/to/.env.production diff --git a/packages/den-db/drizzle.config.ts b/packages/den-db/drizzle.config.ts new file mode 100644 index 00000000..dbe1865f --- /dev/null +++ b/packages/den-db/drizzle.config.ts @@ -0,0 +1,36 @@ +import "./src/load-env.ts" +import path from "node:path" +import { fileURLToPath } from "node:url" +import { defineConfig } from "drizzle-kit" +import { parseMySqlConnectionConfig } from "./src/mysql-config.ts" + +const currentDir = path.dirname(fileURLToPath(import.meta.url)) + +const databaseUrl = process.env.DATABASE_URL?.trim() + +function resolveDrizzleDbCredentials() { + if (databaseUrl) { + return parseMySqlConnectionConfig(databaseUrl) + } + + const host = process.env.DATABASE_HOST?.trim() + const user = process.env.DATABASE_USERNAME?.trim() + const password = process.env.DATABASE_PASSWORD ?? "" + + if (!host || !user) { + throw new Error("Provide DATABASE_URL for mysql or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD for planetscale") + } + + return { + host, + user, + password, + } +} + +export default defineConfig({ + dialect: "mysql", + schema: path.join(currentDir, "src", "schema.ts"), + out: path.join(currentDir, "..", "..", "services", "den", "drizzle"), + dbCredentials: resolveDrizzleDbCredentials(), +}) diff --git a/packages/den-db/package.json b/packages/den-db/package.json new file mode 100644 index 00000000..f186eae8 --- /dev/null +++ b/packages/den-db/package.json @@ -0,0 +1,31 @@ +{ + "name": "@openwork/den-db", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": "./dist/index.js", + "./drizzle": "./dist/drizzle.js", + "./typeid": "./dist/typeid.js" + }, + "scripts": { + "build": "pnpm run build:utils && tsc -p tsconfig.json", + "build:utils": "pnpm --dir ../utils run build", + "db:generate": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs generate --config drizzle.config.ts", + "db:migrate": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs migrate --config drizzle.config.ts", + "db:push": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs push --config drizzle.config.ts" + }, + "dependencies": { + "@different-ai/openwork-utils": "workspace:*", + "@planetscale/database": "^1.19.0", + "drizzle-orm": "^0.45.1", + "mysql2": "^3.11.3" + }, + "devDependencies": { + "@types/node": "^20.11.30", + "drizzle-kit": "^0.31.9", + "tsx": "^4.21.0", + "typescript": "^5.5.4" + } +} diff --git a/packages/den-db/src/client.ts b/packages/den-db/src/client.ts new file mode 100644 index 00000000..1f981c58 --- /dev/null +++ b/packages/den-db/src/client.ts @@ -0,0 +1,182 @@ +import { Client } from "@planetscale/database" +import { drizzle } from "drizzle-orm/mysql2" +import { drizzle as drizzlePlanetScale } from "drizzle-orm/planetscale-serverless" +import type { FieldPacket, QueryOptions, QueryResult } from "mysql2" +import mysql from "mysql2/promise" +import { parseMySqlConnectionConfig } from "./mysql-config.js" +import * as schema from "./schema.js" + +export type DenDbMode = "mysql" | "planetscale" +type DenDb = ReturnType +export type PlanetScaleCredentials = { + host: string + username: string + password: string +} + +const TRANSIENT_DB_ERROR_CODES = new Set([ + "ECONNRESET", + "EPIPE", + "ETIMEDOUT", + "PROTOCOL_CONNECTION_LOST", + "PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR", +]) + +const RETRYABLE_QUERY_PREFIXES = ["select", "show", "describe", "explain"] + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null +} + +function getErrorCode(error: unknown): string | null { + if (!isRecord(error)) { + return null + } + + if (typeof error.code === "string") { + return error.code + } + + return getErrorCode(error.cause) +} + +export function isTransientDbConnectionError(error: unknown): boolean { + const code = getErrorCode(error) + if (!code) { + return false + } + return TRANSIENT_DB_ERROR_CODES.has(code) +} + +function extractSql(value: unknown): string | null { + if (typeof value === "string") { + return value + } + + if (!isRecord(value)) { + return null + } + + if (typeof value.sql === "string") { + return value.sql + } + + return null +} + +function isRetryableReadQuery(sql: string | null): boolean { + if (!sql) { + return false + } + + const normalized = sql.trimStart().toLowerCase() + return RETRYABLE_QUERY_PREFIXES.some((prefix) => normalized.startsWith(prefix)) +} + +async function retryReadQuery(label: "query" | "execute", sql: string | null, run: () => Promise): Promise { + try { + return await run() + } catch (error) { + if (!isRetryableReadQuery(sql) || !isTransientDbConnectionError(error)) { + throw error + } + + const queryType = sql?.trimStart().split(/\s+/, 1)[0]?.toUpperCase() ?? "QUERY" + console.warn(`[db] transient mysql error on ${label} (${queryType}); retrying once`) + return run() + } +} + +function parsePlanetScaleConfigFromDatabaseUrl(databaseUrl: string): PlanetScaleCredentials { + const parsed = new URL(databaseUrl) + if (!parsed.hostname || !parsed.username) { + throw new Error("DATABASE_URL must include host and username when DB_MODE=planetscale") + } + + return { + host: parsed.hostname, + username: decodeURIComponent(parsed.username), + password: decodeURIComponent(parsed.password), + } +} + +function resolveDbMode(input: { mode?: DenDbMode; databaseUrl?: string | null }): DenDbMode { + if (input.mode) { + return input.mode + } + + return input.databaseUrl ? "mysql" : "planetscale" +} + +export function createDenDb(input: { + databaseUrl?: string | null + mode?: DenDbMode + planetscale?: PlanetScaleCredentials | null +}) { + const mode = resolveDbMode(input) + + if (mode === "planetscale") { + const credentials = input.planetscale ?? (input.databaseUrl ? parsePlanetScaleConfigFromDatabaseUrl(input.databaseUrl) : null) + if (!credentials) { + throw new Error("PlanetScale mode requires DATABASE_HOST, DATABASE_USERNAME, and DATABASE_PASSWORD") + } + + const client = new Client(credentials) + return { + client, + db: drizzlePlanetScale(client, { schema }) as unknown as DenDb, + } + } + + if (!input.databaseUrl) { + throw new Error("MySQL mode requires DATABASE_URL") + } + + const client = mysql.createPool({ + ...parseMySqlConnectionConfig(input.databaseUrl), + waitForConnections: true, + connectionLimit: 10, + maxIdle: 10, + idleTimeout: 60_000, + queueLimit: 0, + enableKeepAlive: true, + keepAliveInitialDelay: 0, + }) + + const query = client.query.bind(client) + + async function retryingQuery(sql: string): Promise<[T, FieldPacket[]]> + async function retryingQuery(sql: string, values: unknown): Promise<[T, FieldPacket[]]> + async function retryingQuery(options: QueryOptions): Promise<[T, FieldPacket[]]> + async function retryingQuery(options: QueryOptions, values: unknown): Promise<[T, FieldPacket[]]> + async function retryingQuery( + sqlOrOptions: string | QueryOptions, + values?: unknown, + ): Promise<[T, FieldPacket[]]> { + const sql = extractSql(sqlOrOptions) + return retryReadQuery("query", sql, () => query(sqlOrOptions as never, values as never)) + } + + client.query = retryingQuery + + const execute = client.execute.bind(client) + + async function retryingExecute(sql: string): Promise<[T, FieldPacket[]]> + async function retryingExecute(sql: string, values: unknown): Promise<[T, FieldPacket[]]> + async function retryingExecute(options: QueryOptions): Promise<[T, FieldPacket[]]> + async function retryingExecute(options: QueryOptions, values: unknown): Promise<[T, FieldPacket[]]> + async function retryingExecute( + sqlOrOptions: string | QueryOptions, + values?: unknown, + ): Promise<[T, FieldPacket[]]> { + const sql = extractSql(sqlOrOptions) + return retryReadQuery("execute", sql, () => execute(sqlOrOptions as never, values as never)) + } + + client.execute = retryingExecute + + return { + client, + db: drizzle(client, { schema, mode: "default" }) as unknown as DenDb, + } +} diff --git a/packages/den-db/src/columns.ts b/packages/den-db/src/columns.ts new file mode 100644 index 00000000..d082254e --- /dev/null +++ b/packages/den-db/src/columns.ts @@ -0,0 +1,28 @@ +import { customType, varchar } from "drizzle-orm/mysql-core" +import { + type DenTypeId, + type DenTypeIdName, + normalizeDenTypeId, +} from "@different-ai/openwork-utils/typeid" + +const INTERNAL_ID_LENGTH = 64 +const AUTH_EXTERNAL_ID_LENGTH = 36 + +export const authExternalIdColumn = (columnName: string) => + varchar(columnName, { length: AUTH_EXTERNAL_ID_LENGTH }) + +export const denTypeIdColumn = ( + name: TName, + columnName: string, +) => + customType<{ data: DenTypeId; driverData: string }>({ + dataType() { + return `varchar(${INTERNAL_ID_LENGTH})` + }, + toDriver(value) { + return normalizeDenTypeId(name, value) + }, + fromDriver(value) { + return normalizeDenTypeId(name, value) + }, + })(columnName) diff --git a/packages/den-db/src/drizzle.ts b/packages/den-db/src/drizzle.ts new file mode 100644 index 00000000..3888f93b --- /dev/null +++ b/packages/den-db/src/drizzle.ts @@ -0,0 +1 @@ +export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "drizzle-orm" diff --git a/packages/den-db/src/index.ts b/packages/den-db/src/index.ts new file mode 100644 index 00000000..60eb8c4a --- /dev/null +++ b/packages/den-db/src/index.ts @@ -0,0 +1,5 @@ +export * from "./client.js" +export * from "./columns.js" +export * from "./mysql-config.js" +export * from "./schema.js" +export * from "./typeid.js" diff --git a/packages/den-db/src/load-env.ts b/packages/den-db/src/load-env.ts new file mode 100644 index 00000000..008463c4 --- /dev/null +++ b/packages/den-db/src/load-env.ts @@ -0,0 +1,75 @@ +import { existsSync, readFileSync } from "node:fs" +import path from "node:path" +import { fileURLToPath } from "node:url" + +function findUpwards(startDir: string, fileName: string, maxDepth = 6) { + let current = startDir + + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = path.join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + + const parent = path.dirname(current) + if (parent === current) { + break + } + current = parent + } + + return null +} + +function parseEnvFile(contents: string) { + for (const rawLine of contents.split(/\r?\n/)) { + const line = rawLine.trim() + if (!line || line.startsWith("#")) { + continue + } + + const match = line.match(/^([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)$/) + if (!match) { + continue + } + + const key = match[1] + let value = match[2] ?? "" + + if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) { + value = value.slice(1, -1) + } + + if (process.env[key] === undefined) { + process.env[key] = value + } + } +} + +function loadEnvFile(filePath: string) { + if (!existsSync(filePath)) { + return + } + + parseEnvFile(readFileSync(filePath, "utf8")) +} + +const currentDir = path.dirname(fileURLToPath(import.meta.url)) +const packageDir = path.resolve(currentDir, "..") + +for (const filePath of [ + path.join(packageDir, ".env.local"), + path.join(packageDir, ".env"), +]) { + loadEnvFile(filePath) +} + +const explicitEnvPath = + process.env.OPENWORK_DEN_DB_ENV_PATH?.trim() || + process.env.DATABASE_ENV_FILE?.trim() +const detectedRootEnvPath = findUpwards(path.resolve(packageDir, "..", ".."), ".env") +const envPath = explicitEnvPath || detectedRootEnvPath + +if (envPath) { + loadEnvFile(envPath) +} diff --git a/packages/den-db/src/mysql-config.ts b/packages/den-db/src/mysql-config.ts new file mode 100644 index 00000000..aa6f6f19 --- /dev/null +++ b/packages/den-db/src/mysql-config.ts @@ -0,0 +1,48 @@ +type ParsedMySqlConfig = { + host: string + port: number + user: string + password: string + database: string + ssl?: { + rejectUnauthorized: boolean + } +} + +function readSslSettings(parsed: URL) { + const sslAccept = parsed.searchParams.get("sslaccept")?.trim().toLowerCase() + const sslMode = + parsed.searchParams.get("sslmode")?.trim().toLowerCase() ?? + parsed.searchParams.get("ssl-mode")?.trim().toLowerCase() + + const needsSsl = Boolean(sslAccept || sslMode) + if (!needsSsl) { + return undefined + } + + const rejectUnauthorized = + sslAccept === "strict" || + sslMode === "verify-ca" || + sslMode === "verify-full" || + sslMode === "require" + + return { rejectUnauthorized } +} + +export function parseMySqlConnectionConfig(databaseUrl: string): ParsedMySqlConfig { + const parsed = new URL(databaseUrl) + const database = parsed.pathname.replace(/^\//, "") + + if (!parsed.hostname || !parsed.username || !database) { + throw new Error("DATABASE_URL must include host, username, and database for mysql mode") + } + + return { + host: parsed.hostname, + port: Number(parsed.port || "3306"), + user: decodeURIComponent(parsed.username), + password: decodeURIComponent(parsed.password), + database, + ssl: readSslSettings(parsed), + } +} diff --git a/packages/den-db/src/schema.ts b/packages/den-db/src/schema.ts new file mode 100644 index 00000000..2fe716f6 --- /dev/null +++ b/packages/den-db/src/schema.ts @@ -0,0 +1,235 @@ +import { sql } from "drizzle-orm" +import { + boolean, + index, + json, + mysqlEnum, + mysqlTable, + text, + timestamp, + uniqueIndex, + varchar, +} from "drizzle-orm/mysql-core" +import { denTypeIdColumn } from "./columns.js" + +const timestamps = { + created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + updated_at: timestamp("updated_at", { fsp: 3 }) + .notNull() + .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), +} + +export const OrgRole = ["owner", "member"] as const +export const WorkerDestination = ["local", "cloud"] as const +export const WorkerStatus = ["provisioning", "healthy", "failed", "stopped"] as const +export const TokenScope = ["client", "host"] as const + +export const AuthUserTable = mysqlTable( + "user", + { + id: denTypeIdColumn("user", "id").notNull().primaryKey(), + name: varchar("name", { length: 255 }).notNull(), + email: varchar("email", { length: 255 }).notNull(), + emailVerified: boolean("email_verified").notNull().default(false), + image: text("image"), + createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + updatedAt: timestamp("updated_at", { fsp: 3 }) + .notNull() + .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), + }, + (table) => [uniqueIndex("user_email").on(table.email)], +) + +export const AuthSessionTable = mysqlTable( + "session", + { + id: denTypeIdColumn("session", "id").notNull().primaryKey(), + userId: denTypeIdColumn("user", "user_id").notNull(), + token: varchar("token", { length: 255 }).notNull(), + expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(), + ipAddress: text("ip_address"), + userAgent: text("user_agent"), + createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + updatedAt: timestamp("updated_at", { fsp: 3 }) + .notNull() + .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), + }, + (table) => [ + uniqueIndex("session_token").on(table.token), + index("session_user_id").on(table.userId), + ], +) + +export const AuthAccountTable = mysqlTable( + "account", + { + id: denTypeIdColumn("account", "id").notNull().primaryKey(), + userId: denTypeIdColumn("user", "user_id").notNull(), + accountId: text("account_id").notNull(), + providerId: text("provider_id").notNull(), + accessToken: text("access_token"), + refreshToken: text("refresh_token"), + accessTokenExpiresAt: timestamp("access_token_expires_at", { fsp: 3 }), + refreshTokenExpiresAt: timestamp("refresh_token_expires_at", { fsp: 3 }), + scope: text("scope"), + idToken: text("id_token"), + password: text("password"), + createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + updatedAt: timestamp("updated_at", { fsp: 3 }) + .notNull() + .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), + }, + (table) => [index("account_user_id").on(table.userId)], +) + +export const AuthVerificationTable = mysqlTable( + "verification", + { + id: denTypeIdColumn("verification", "id").notNull().primaryKey(), + identifier: varchar("identifier", { length: 255 }).notNull(), + value: text("value").notNull(), + expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(), + createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + updatedAt: timestamp("updated_at", { fsp: 3 }) + .notNull() + .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), + }, + (table) => [index("verification_identifier").on(table.identifier)], +) + +export const user = AuthUserTable +export const session = AuthSessionTable +export const account = AuthAccountTable +export const verification = AuthVerificationTable + +export const OrgTable = mysqlTable( + "org", + { + id: denTypeIdColumn("org", "id").notNull().primaryKey(), + name: varchar("name", { length: 255 }).notNull(), + slug: varchar("slug", { length: 255 }).notNull(), + owner_user_id: denTypeIdColumn("user", "owner_user_id").notNull(), + ...timestamps, + }, + (table) => [uniqueIndex("org_slug").on(table.slug), index("org_owner_user_id").on(table.owner_user_id)], +) + +export const OrgMembershipTable = mysqlTable( + "org_membership", + { + id: denTypeIdColumn("orgMembership", "id").notNull().primaryKey(), + org_id: denTypeIdColumn("org", "org_id").notNull(), + user_id: denTypeIdColumn("user", "user_id").notNull(), + role: mysqlEnum("role", OrgRole).notNull(), + created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + }, + (table) => [index("org_membership_org_id").on(table.org_id), index("org_membership_user_id").on(table.user_id)], +) + +export const AdminAllowlistTable = mysqlTable( + "admin_allowlist", + { + id: denTypeIdColumn("adminAllowlist", "id").notNull().primaryKey(), + email: varchar("email", { length: 255 }).notNull(), + note: varchar("note", { length: 255 }), + ...timestamps, + }, + (table) => [uniqueIndex("admin_allowlist_email").on(table.email)], +) + +export const WorkerTable = mysqlTable( + "worker", + { + id: denTypeIdColumn("worker", "id").notNull().primaryKey(), + org_id: denTypeIdColumn("org", "org_id").notNull(), + created_by_user_id: denTypeIdColumn("user", "created_by_user_id"), + name: varchar("name", { length: 255 }).notNull(), + description: varchar("description", { length: 1024 }), + destination: mysqlEnum("destination", WorkerDestination).notNull(), + status: mysqlEnum("status", WorkerStatus).notNull(), + image_version: varchar("image_version", { length: 128 }), + workspace_path: varchar("workspace_path", { length: 1024 }), + sandbox_backend: varchar("sandbox_backend", { length: 64 }), + ...timestamps, + }, + (table) => [ + index("worker_org_id").on(table.org_id), + index("worker_created_by_user_id").on(table.created_by_user_id), + index("worker_status").on(table.status), + ], +) + +export const WorkerInstanceTable = mysqlTable( + "worker_instance", + { + id: denTypeIdColumn("workerInstance", "id").notNull().primaryKey(), + worker_id: denTypeIdColumn("worker", "worker_id").notNull(), + provider: varchar("provider", { length: 64 }).notNull(), + region: varchar("region", { length: 64 }), + url: varchar("url", { length: 2048 }).notNull(), + status: mysqlEnum("status", WorkerStatus).notNull(), + ...timestamps, + }, + (table) => [index("worker_instance_worker_id").on(table.worker_id)], +) + +export const DaytonaSandboxTable = mysqlTable( + "daytona_sandbox", + { + id: denTypeIdColumn("daytonaSandbox", "id").notNull().primaryKey(), + worker_id: denTypeIdColumn("worker", "worker_id").notNull(), + sandbox_id: varchar("sandbox_id", { length: 128 }).notNull(), + workspace_volume_id: varchar("workspace_volume_id", { length: 128 }).notNull(), + data_volume_id: varchar("data_volume_id", { length: 128 }).notNull(), + signed_preview_url: varchar("signed_preview_url", { length: 2048 }).notNull(), + signed_preview_url_expires_at: timestamp("signed_preview_url_expires_at", { fsp: 3 }).notNull(), + region: varchar("region", { length: 64 }), + ...timestamps, + }, + (table) => [ + uniqueIndex("daytona_sandbox_worker_id").on(table.worker_id), + uniqueIndex("daytona_sandbox_sandbox_id").on(table.sandbox_id), + ], +) + +export const WorkerTokenTable = mysqlTable( + "worker_token", + { + id: denTypeIdColumn("workerToken", "id").notNull().primaryKey(), + worker_id: denTypeIdColumn("worker", "worker_id").notNull(), + scope: mysqlEnum("scope", TokenScope).notNull(), + token: varchar("token", { length: 128 }).notNull(), + created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + revoked_at: timestamp("revoked_at", { fsp: 3 }), + }, + (table) => [ + index("worker_token_worker_id").on(table.worker_id), + uniqueIndex("worker_token_token").on(table.token), + ], +) + +export const WorkerBundleTable = mysqlTable( + "worker_bundle", + { + id: denTypeIdColumn("workerBundle", "id").notNull().primaryKey(), + worker_id: denTypeIdColumn("worker", "worker_id").notNull(), + storage_url: varchar("storage_url", { length: 2048 }).notNull(), + status: varchar("status", { length: 64 }).notNull(), + created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + }, + (table) => [index("worker_bundle_worker_id").on(table.worker_id)], +) + +export const AuditEventTable = mysqlTable( + "audit_event", + { + id: denTypeIdColumn("auditEvent", "id").notNull().primaryKey(), + org_id: denTypeIdColumn("org", "org_id").notNull(), + worker_id: denTypeIdColumn("worker", "worker_id"), + actor_user_id: denTypeIdColumn("user", "actor_user_id").notNull(), + action: varchar("action", { length: 128 }).notNull(), + payload: json("payload"), + created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), + }, + (table) => [index("audit_event_org_id").on(table.org_id), index("audit_event_worker_id").on(table.worker_id)], +) diff --git a/packages/den-db/src/typeid.ts b/packages/den-db/src/typeid.ts new file mode 100644 index 00000000..975cdda6 --- /dev/null +++ b/packages/den-db/src/typeid.ts @@ -0,0 +1 @@ +export * from "@different-ai/openwork-utils/typeid" diff --git a/packages/den-db/tsconfig.json b/packages/den-db/tsconfig.json new file mode 100644 index 00000000..850ef6bb --- /dev/null +++ b/packages/den-db/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "Bundler", + "rootDir": "src", + "outDir": "dist", + "declaration": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": ["src"] +} diff --git a/packages/utils/package.json b/packages/utils/package.json new file mode 100644 index 00000000..d2683e83 --- /dev/null +++ b/packages/utils/package.json @@ -0,0 +1,21 @@ +{ + "name": "@different-ai/openwork-utils", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": "./dist/index.js", + "./typeid": "./dist/typeid.js" + }, + "scripts": { + "build": "tsc -p tsconfig.json" + }, + "dependencies": { + "typeid-js": "^1.2.0" + }, + "devDependencies": { + "@types/node": "^20.11.30", + "typescript": "^5.5.4" + } +} diff --git a/packages/utils/src/index.ts b/packages/utils/src/index.ts new file mode 100644 index 00000000..c2cb6c54 --- /dev/null +++ b/packages/utils/src/index.ts @@ -0,0 +1 @@ +export * from "./typeid.js" diff --git a/packages/utils/src/typeid.ts b/packages/utils/src/typeid.ts new file mode 100644 index 00000000..db788435 --- /dev/null +++ b/packages/utils/src/typeid.ts @@ -0,0 +1,55 @@ +import { fromString, getType, typeid } from "typeid-js" + +export const denTypeIdPrefixes = { + user: "usr", + session: "ses", + account: "acc", + verification: "ver", + org: "org", + orgMembership: "om", + adminAllowlist: "aal", + worker: "wrk", + workerInstance: "wki", + daytonaSandbox: "dts", + workerToken: "wkt", + workerBundle: "wkb", + auditEvent: "aev", +} as const + +export type DenTypeIdName = keyof typeof denTypeIdPrefixes +export type DenTypeIdPrefix = (typeof denTypeIdPrefixes)[TName] +export type DenTypeId = `${DenTypeIdPrefix}_${string}` + +export function createDenTypeId(name: TName): DenTypeId { + return typeid(denTypeIdPrefixes[name]).toString() as DenTypeId +} + +export function normalizeDenTypeId( + name: TName, + value: string, +): DenTypeId { + const parsed = fromString(value) + const expectedPrefix = denTypeIdPrefixes[name] + + if (getType(parsed) !== expectedPrefix) { + throw new Error(`invalid_den_typeid_prefix:${name}:${getType(parsed)}`) + } + + return parsed as DenTypeId +} + +export function isDenTypeId( + name: TName, + value: unknown, +): value is DenTypeId { + if (typeof value !== "string") { + return false + } + + try { + normalizeDenTypeId(name, value) + return true + } catch { + return false + } +} diff --git a/packages/utils/tsconfig.json b/packages/utils/tsconfig.json new file mode 100644 index 00000000..850ef6bb --- /dev/null +++ b/packages/utils/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "Bundler", + "rootDir": "src", + "outDir": "dist", + "declaration": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": ["src"] +} diff --git a/packages/web/components/cloud-control.tsx b/packages/web/components/cloud-control.tsx index 59858fb6..46fd4b76 100644 --- a/packages/web/components/cloud-control.tsx +++ b/packages/web/components/cloud-control.tsx @@ -463,7 +463,7 @@ function getWorker(payload: unknown): WorkerLaunch | null { return { workerId: worker.id, workerName: worker.name, - status: typeof worker.status === "string" ? worker.status : "unknown", + status: getEffectiveWorkerStatus(worker.status, instance), provider: instance && typeof instance.provider === "string" ? instance.provider : null, instanceUrl: instance && typeof instance.url === "string" ? instance.url : null, openworkUrl: instance && typeof instance.url === "string" ? instance.url : null, @@ -489,7 +489,7 @@ function getWorkerSummary(payload: unknown): WorkerSummary | null { return { workerId: worker.id, workerName: worker.name, - status: typeof worker.status === "string" ? worker.status : "unknown", + status: getEffectiveWorkerStatus(worker.status, instance), instanceUrl: instance && typeof instance.url === "string" ? instance.url : null, provider: instance && typeof instance.provider === "string" ? instance.provider : null, isMine: worker.isMine === true @@ -667,7 +667,7 @@ function parseWorkerListItem(value: unknown): WorkerListItem | null { return { workerId, workerName, - status: typeof value.status === "string" ? value.status : "unknown", + status: getEffectiveWorkerStatus(value.status, instance), instanceUrl: instance && typeof instance.url === "string" ? instance.url : null, provider: instance && typeof instance.provider === "string" ? instance.provider : null, isMine: value.isMine === true, @@ -728,6 +728,22 @@ function getWorkerStatusCopy(status: string): string { } } +function getEffectiveWorkerStatus(workerStatus: unknown, instance: Record | null): string { + const normalizedWorkerStatus = typeof workerStatus === "string" ? workerStatus : "unknown"; + const normalized = normalizedWorkerStatus.trim().toLowerCase(); + const instanceStatus = instance && typeof instance.status === "string" ? instance.status.trim().toLowerCase() : null; + + if (!instanceStatus) { + return normalizedWorkerStatus; + } + + if (normalized === "provisioning" || normalized === "starting") { + return instanceStatus; + } + + return normalizedWorkerStatus; +} + function isWorkerLaunch(value: unknown): value is WorkerLaunch { if (!isRecord(value)) { return false; diff --git a/packaging/docker/Dockerfile.den b/packaging/docker/Dockerfile.den new file mode 100644 index 00000000..fe498b74 --- /dev/null +++ b/packaging/docker/Dockerfile.den @@ -0,0 +1,26 @@ +FROM node:22-bookworm-slim + +RUN corepack enable + +WORKDIR /app + +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml /app/ +COPY .npmrc /app/.npmrc +COPY patches /app/patches +COPY packages/utils/package.json /app/packages/utils/package.json +COPY packages/den-db/package.json /app/packages/den-db/package.json +COPY services/den/package.json /app/services/den/package.json + +RUN pnpm install --frozen-lockfile + +COPY packages/utils /app/packages/utils +COPY packages/den-db /app/packages/den-db +COPY services/den /app/services/den + +RUN pnpm --dir /app/packages/utils run build +RUN pnpm --dir /app/packages/den-db run build +RUN pnpm --dir /app/services/den run build + +EXPOSE 8788 + +CMD ["sh", "-lc", "node dist/index.js"] diff --git a/packaging/docker/Dockerfile.den-web b/packaging/docker/Dockerfile.den-web new file mode 100644 index 00000000..b2fc3658 --- /dev/null +++ b/packaging/docker/Dockerfile.den-web @@ -0,0 +1,13 @@ +FROM node:22-bookworm-slim + +WORKDIR /app/packages/web + +COPY packages/web/package.json /app/packages/web/package.json + +RUN npm install --no-package-lock --no-fund --no-audit + +COPY packages/web /app/packages/web + +EXPOSE 3005 + +CMD ["npm", "run", "dev"] diff --git a/packaging/docker/Dockerfile.den-worker-proxy b/packaging/docker/Dockerfile.den-worker-proxy new file mode 100644 index 00000000..3ead446a --- /dev/null +++ b/packaging/docker/Dockerfile.den-worker-proxy @@ -0,0 +1,26 @@ +FROM node:22-bookworm-slim + +RUN corepack enable + +WORKDIR /app + +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml /app/ +COPY .npmrc /app/.npmrc +COPY patches /app/patches +COPY packages/utils/package.json /app/packages/utils/package.json +COPY packages/den-db/package.json /app/packages/den-db/package.json +COPY services/den-worker-proxy/package.json /app/services/den-worker-proxy/package.json + +RUN pnpm install --frozen-lockfile + +COPY packages/utils /app/packages/utils +COPY packages/den-db /app/packages/den-db +COPY services/den-worker-proxy /app/services/den-worker-proxy + +RUN pnpm --dir /app/packages/utils run build +RUN pnpm --dir /app/packages/den-db run build +RUN pnpm --dir /app/services/den-worker-proxy run build + +EXPOSE 8789 + +CMD ["sh", "-lc", "node dist/server.js"] diff --git a/packaging/docker/den-dev-up.sh b/packaging/docker/den-dev-up.sh index fc88277e..b3aeaf9a 100755 --- a/packaging/docker/den-dev-up.sh +++ b/packaging/docker/den-dev-up.sh @@ -14,6 +14,7 @@ set -euo pipefail ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" COMPOSE_FILE="$ROOT_DIR/packaging/docker/docker-compose.den-dev.yml" RUNTIME_DIR="$ROOT_DIR/tmp/docker-den-dev" +DAYTONA_ENV_FILE="${DAYTONA_ENV_FILE:-$ROOT_DIR/.env.daytona}" if ! command -v docker >/dev/null 2>&1; then echo "docker is required" >&2 @@ -37,72 +38,42 @@ random_hex() { node -e "console.log(require('crypto').randomBytes(${bytes}).toString('hex'))" } -detect_public_host() { - if [ -n "${DEN_PUBLIC_HOST:-}" ]; then - printf '%s\n' "$DEN_PUBLIC_HOST" - return - fi - - local host - host="$(hostname -s 2>/dev/null || hostname 2>/dev/null || true)" - host="${host//$'\n'/}" - host="${host// /}" - if [ -n "$host" ]; then - printf '%s\n' "$host" - return - fi - - printf '%s\n' "localhost" -} - -detect_lan_ipv4() { - node -e ' - const os = require("os"); - const nets = os.networkInterfaces(); - for (const entries of Object.values(nets)) { - for (const entry of entries || []) { - if (!entry || entry.internal || entry.family !== "IPv4") continue; - if (entry.address.startsWith("127.")) continue; - process.stdout.write(entry.address); - process.exit(0); - } - } - ' -} - -join_csv_unique() { - printf "%s\n" "$@" | awk 'NF && !seen[$0]++' | paste -sd, - -} - DEV_ID="$(node -e "console.log(require('crypto').randomUUID().slice(0, 8))")" PROJECT="openwork-den-dev-$DEV_ID" DEN_API_PORT="${DEN_API_PORT:-$(pick_port)}" DEN_WEB_PORT="${DEN_WEB_PORT:-$(pick_port)}" +DEN_WORKER_PROXY_PORT="${DEN_WORKER_PROXY_PORT:-$(pick_port)}" +DEN_MYSQL_PORT="${DEN_MYSQL_PORT:-$(pick_port)}" if [ "$DEN_WEB_PORT" = "$DEN_API_PORT" ]; then DEN_WEB_PORT="$(pick_port)" fi - -PUBLIC_HOST="$(detect_public_host)" -LAN_IPV4="$(detect_lan_ipv4 || true)" - -DEN_BETTER_AUTH_SECRET="${DEN_BETTER_AUTH_SECRET:-$(random_hex 32)}" -DEN_BETTER_AUTH_URL="${DEN_BETTER_AUTH_URL:-http://$PUBLIC_HOST:$DEN_WEB_PORT}" -DEN_PROVISIONER_MODE="${DEN_PROVISIONER_MODE:-stub}" -DEN_WORKER_URL_TEMPLATE="${DEN_WORKER_URL_TEMPLATE:-https://workers.local/{workerId}}" -if [ -z "${DEN_CORS_ORIGINS:-}" ]; then - DEN_CORS_ORIGINS="$(join_csv_unique \ - "http://$PUBLIC_HOST:$DEN_WEB_PORT" \ - "http://$PUBLIC_HOST:$DEN_API_PORT" \ - "http://localhost:$DEN_WEB_PORT" \ - "http://127.0.0.1:$DEN_WEB_PORT" \ - "http://localhost:$DEN_API_PORT" \ - "http://127.0.0.1:$DEN_API_PORT" \ - "${LAN_IPV4:+http://$LAN_IPV4:$DEN_WEB_PORT}" \ - "${LAN_IPV4:+http://$LAN_IPV4:$DEN_API_PORT}")" +if [ "$DEN_WORKER_PROXY_PORT" = "$DEN_API_PORT" ] || [ "$DEN_WORKER_PROXY_PORT" = "$DEN_WEB_PORT" ]; then + DEN_WORKER_PROXY_PORT="$(pick_port)" +fi +if [ "$DEN_MYSQL_PORT" = "$DEN_API_PORT" ] || [ "$DEN_MYSQL_PORT" = "$DEN_WEB_PORT" ] || [ "$DEN_MYSQL_PORT" = "$DEN_WORKER_PROXY_PORT" ]; then + DEN_MYSQL_PORT="$(pick_port)" fi -DEN_BETTER_AUTH_TRUSTED_ORIGINS="${DEN_BETTER_AUTH_TRUSTED_ORIGINS:-$DEN_CORS_ORIGINS}" +DEN_BETTER_AUTH_SECRET="${DEN_BETTER_AUTH_SECRET:-$(random_hex 32)}" +DEN_BETTER_AUTH_URL="${DEN_BETTER_AUTH_URL:-http://localhost:$DEN_WEB_PORT}" +DEN_PROVISIONER_MODE="${DEN_PROVISIONER_MODE:-stub}" +DEN_WORKER_URL_TEMPLATE="${DEN_WORKER_URL_TEMPLATE:-https://workers.local/{workerId}}" +DEN_DAYTONA_WORKER_PROXY_BASE_URL="${DEN_DAYTONA_WORKER_PROXY_BASE_URL:-http://localhost:$DEN_WORKER_PROXY_PORT}" +DEN_CORS_ORIGINS="${DEN_CORS_ORIGINS:-http://localhost:$DEN_WEB_PORT,http://127.0.0.1:$DEN_WEB_PORT,http://localhost:$DEN_API_PORT,http://127.0.0.1:$DEN_API_PORT}" + +if [ "$DEN_PROVISIONER_MODE" = "daytona" ] && [ -f "$DAYTONA_ENV_FILE" ]; then + set -a + # shellcheck disable=SC1090 + source "$DAYTONA_ENV_FILE" + set +a +fi + +if [ "$DEN_PROVISIONER_MODE" = "daytona" ] && [ -z "${DAYTONA_API_KEY:-}" ]; then + echo "DAYTONA_API_KEY is required when DEN_PROVISIONER_MODE=daytona" >&2 + echo "Set DAYTONA_ENV_FILE to your .env.daytona path or export DAYTONA_API_KEY before running den-dev-up.sh" >&2 + exit 1 +fi mkdir -p "$RUNTIME_DIR" RUNTIME_FILE="$ROOT_DIR/tmp/.den-dev-env-$DEV_ID" @@ -111,9 +82,12 @@ cat > "$RUNTIME_FILE" <&2 echo "- DEN_API_PORT=$DEN_API_PORT" >&2 echo "- DEN_WEB_PORT=$DEN_WEB_PORT" >&2 +echo "- DEN_WORKER_PROXY_PORT=$DEN_WORKER_PROXY_PORT" >&2 +echo "- DEN_MYSQL_PORT=$DEN_MYSQL_PORT" >&2 echo "- DEN_BETTER_AUTH_URL=$DEN_BETTER_AUTH_URL" >&2 -echo "- DEN_BETTER_AUTH_TRUSTED_ORIGINS=$DEN_BETTER_AUTH_TRUSTED_ORIGINS" >&2 -echo "- DEN_CORS_ORIGINS=$DEN_CORS_ORIGINS" >&2 echo "- DEN_PROVISIONER_MODE=$DEN_PROVISIONER_MODE" >&2 +if [ "$DEN_PROVISIONER_MODE" = "daytona" ]; then + echo "- DAYTONA_API_URL=${DAYTONA_API_URL:-https://app.daytona.io/api}" >&2 + if [ -n "${DAYTONA_TARGET:-}" ]; then + echo "- DAYTONA_TARGET=$DAYTONA_TARGET" >&2 + fi +fi if ! DEN_API_PORT="$DEN_API_PORT" \ DEN_WEB_PORT="$DEN_WEB_PORT" \ + DEN_WORKER_PROXY_PORT="$DEN_WORKER_PROXY_PORT" \ + DEN_MYSQL_PORT="$DEN_MYSQL_PORT" \ DEN_BETTER_AUTH_SECRET="$DEN_BETTER_AUTH_SECRET" \ DEN_BETTER_AUTH_URL="$DEN_BETTER_AUTH_URL" \ - DEN_BETTER_AUTH_TRUSTED_ORIGINS="$DEN_BETTER_AUTH_TRUSTED_ORIGINS" \ DEN_CORS_ORIGINS="$DEN_CORS_ORIGINS" \ DEN_PROVISIONER_MODE="$DEN_PROVISIONER_MODE" \ DEN_WORKER_URL_TEMPLATE="$DEN_WORKER_URL_TEMPLATE" \ - docker compose -p "$PROJECT" -f "$COMPOSE_FILE" up -d --wait; then + DEN_DAYTONA_WORKER_PROXY_BASE_URL="$DEN_DAYTONA_WORKER_PROXY_BASE_URL" \ + DAYTONA_API_URL="${DAYTONA_API_URL:-}" \ + DAYTONA_API_KEY="${DAYTONA_API_KEY:-}" \ + DAYTONA_TARGET="${DAYTONA_TARGET:-}" \ + DAYTONA_SNAPSHOT="${DAYTONA_SNAPSHOT:-}" \ + DAYTONA_OPENWORK_VERSION="${DAYTONA_OPENWORK_VERSION:-}" \ + docker compose -p "$PROJECT" -f "$COMPOSE_FILE" up -d --build --wait; then echo "Den Docker stack failed to start. Recent logs:" >&2 docker compose -p "$PROJECT" -f "$COMPOSE_FILE" logs --tail=200 >&2 || true exit 1 @@ -142,15 +129,9 @@ fi echo "" >&2 echo "OpenWork Cloud web UI: http://localhost:$DEN_WEB_PORT" >&2 -echo "OpenWork Cloud web UI (LAN/public): http://$PUBLIC_HOST:$DEN_WEB_PORT" >&2 -if [ -n "$LAN_IPV4" ]; then - echo "OpenWork Cloud web UI (LAN IP): http://$LAN_IPV4:$DEN_WEB_PORT" >&2 -fi echo "Den demo/API: http://localhost:$DEN_API_PORT" >&2 -echo "Den demo/API (LAN/public): http://$PUBLIC_HOST:$DEN_API_PORT" >&2 -if [ -n "$LAN_IPV4" ]; then - echo "Den demo/API (LAN IP): http://$LAN_IPV4:$DEN_API_PORT" >&2 -fi +echo "Worker proxy: http://localhost:$DEN_WORKER_PROXY_PORT" >&2 +echo "MySQL: mysql://root:password@127.0.0.1:$DEN_MYSQL_PORT/openwork_den" >&2 echo "Health check: http://localhost:$DEN_API_PORT/health" >&2 echo "Runtime env file: $RUNTIME_FILE" >&2 echo "" >&2 diff --git a/packaging/docker/docker-compose.den-dev.yml b/packaging/docker/docker-compose.den-dev.yml index 4bfaba18..48b4c9c0 100644 --- a/packaging/docker/docker-compose.den-dev.yml +++ b/packaging/docker/docker-compose.den-dev.yml @@ -8,80 +8,54 @@ # Env overrides (optional, via export or .env): # DEN_API_PORT — host port to map to Den control plane :8788 # DEN_WEB_PORT — host port to map to the cloud web app :3005 +# DEN_WORKER_PROXY_PORT — host port to map to the worker proxy :8789 +# DEN_MYSQL_PORT — host port to map to MySQL :3306 # DEN_BETTER_AUTH_SECRET — Better Auth secret (auto-generated by den-dev-up.sh) -# DEN_PUBLIC_HOST — host used to build default auth origin + LAN/public URLs -# DEN_BETTER_AUTH_URL — browser-facing auth origin (default: http://:) -# DEN_BETTER_AUTH_TRUSTED_ORIGINS — Better Auth trusted origins (defaults to DEN_CORS_ORIGINS) -# DEN_CORS_ORIGINS — comma-separated trusted origins for Express CORS (auto-populated by den-dev-up.sh) -# DEN_PROVISIONER_MODE — stub or render (default: stub) +# DEN_BETTER_AUTH_URL — browser-facing auth origin (default: http://localhost:) +# DEN_CORS_ORIGINS — comma-separated trusted origins for Better Auth + CORS +# DEN_PROVISIONER_MODE — stub, render, or daytona (default: stub) # DEN_WORKER_URL_TEMPLATE — worker URL template used by stub provisioning +# DAYTONA_API_URL / DAYTONA_API_KEY / DAYTONA_TARGET / DAYTONA_SNAPSHOT / DAYTONA_OPENWORK_VERSION +# — optional Daytona passthrough vars when DEN_PROVISIONER_MODE=daytona x-shared: &shared - image: node:22-bookworm-slim - working_dir: /app - volumes: - - ../../:/app - - pnpm-store:/root/.local/share/pnpm/store + restart: unless-stopped services: mysql: image: mysql:8.4 command: - --performance_schema=OFF - - --innodb_buffer_pool_size=128M - - --innodb_log_buffer_size=8M - - --max_connections=30 - - --table_open_cache=128 - - --tmp_table_size=16M - - --max_heap_table_size=16M + - --innodb-buffer-pool-size=64M + - --innodb-log-buffer-size=8M + - --tmp-table-size=16M + - --max-heap-table-size=16M environment: MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: openwork_den - MYSQL_INITDB_SKIP_TZINFO: "1" healthcheck: test: ["CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -ppassword --silent"] interval: 5s timeout: 5s retries: 30 start_period: 10s + ports: + - "${DEN_MYSQL_PORT:-3306}:3306" volumes: - den-mysql-data:/var/lib/mysql den: <<: *shared + build: + context: ../../ + dockerfile: packaging/docker/Dockerfile.den depends_on: mysql: condition: service_healthy - entrypoint: ["/bin/sh", "-c"] - command: - - | - set -e - - apt-get update -qq && apt-get install -y -qq --no-install-recommends \ - curl ca-certificates >/dev/null 2>&1 - - corepack enable && corepack prepare pnpm@10.27.0 --activate - - echo "[den] Installing dependencies..." - pnpm install --no-frozen-lockfile --network-concurrency 1 --child-concurrency 1 - - echo "[den] Running migrations..." - pnpm --filter @openwork/den db:migrate - - echo "" - echo "============================================" - echo " Den control plane" - echo " Demo/API: http://localhost:${DEN_API_PORT:-8788}" - echo " Health: http://localhost:${DEN_API_PORT:-8788}/health" - echo " Auth URL: ${DEN_BETTER_AUTH_URL:-http://localhost:3005}" - echo "============================================" - echo "" - - exec pnpm --filter @openwork/den dev ports: - "${DEN_API_PORT:-8788}:8788" healthcheck: - test: ["CMD-SHELL", "curl -sf http://localhost:8788/health || exit 1"] + test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:8788/health').then((res)=>process.exit(res.ok?0:1)).catch(()=>process.exit(1))"] interval: 5s timeout: 5s retries: 30 @@ -92,44 +66,57 @@ services: DATABASE_URL: mysql://root:password@mysql:3306/openwork_den BETTER_AUTH_SECRET: ${DEN_BETTER_AUTH_SECRET:-dev-den-local-auth-secret-please-override-1234567890} BETTER_AUTH_URL: ${DEN_BETTER_AUTH_URL:-http://localhost:3005} - DEN_BETTER_AUTH_TRUSTED_ORIGINS: ${DEN_BETTER_AUTH_TRUSTED_ORIGINS:-${DEN_CORS_ORIGINS:-http://localhost:3005,http://127.0.0.1:3005,http://localhost:8788,http://127.0.0.1:8788}} PORT: "8788" CORS_ORIGINS: ${DEN_CORS_ORIGINS:-http://localhost:3005,http://127.0.0.1:3005,http://localhost:8788,http://127.0.0.1:8788} PROVISIONER_MODE: ${DEN_PROVISIONER_MODE:-stub} WORKER_URL_TEMPLATE: ${DEN_WORKER_URL_TEMPLATE:-} POLAR_FEATURE_GATE_ENABLED: "false" + DAYTONA_API_URL: ${DAYTONA_API_URL:-} + DAYTONA_API_KEY: ${DAYTONA_API_KEY:-} + DAYTONA_TARGET: ${DAYTONA_TARGET:-} + DAYTONA_SNAPSHOT: ${DAYTONA_SNAPSHOT:-} + DAYTONA_OPENWORK_VERSION: ${DAYTONA_OPENWORK_VERSION:-} + DAYTONA_WORKER_PROXY_BASE_URL: ${DEN_DAYTONA_WORKER_PROXY_BASE_URL:-http://localhost:8789} + + worker-proxy: + <<: *shared + build: + context: ../../ + dockerfile: packaging/docker/Dockerfile.den-worker-proxy + depends_on: + mysql: + condition: service_healthy + ports: + - "${DEN_WORKER_PROXY_PORT:-8789}:8789" + healthcheck: + test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:8789/unknown').then((res)=>process.exit([404,502].includes(res.status)?0:1)).catch(()=>process.exit(1))"] + interval: 5s + timeout: 5s + retries: 30 + start_period: 90s + environment: + CI: "true" + DATABASE_URL: mysql://root:password@mysql:3306/openwork_den + PORT: "8789" + OPENWORK_DAYTONA_ENV_PATH: ${OPENWORK_DAYTONA_ENV_PATH:-} + DAYTONA_API_URL: ${DAYTONA_API_URL:-} + DAYTONA_API_KEY: ${DAYTONA_API_KEY:-} + DAYTONA_TARGET: ${DAYTONA_TARGET:-} + DAYTONA_OPENWORK_PORT: ${DAYTONA_OPENWORK_PORT:-8787} + DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: ${DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS:-86400} web: <<: *shared + build: + context: ../../ + dockerfile: packaging/docker/Dockerfile.den-web depends_on: den: condition: service_healthy - entrypoint: ["/bin/sh", "-c"] - command: - - | - set -e - - apt-get update -qq && apt-get install -y -qq --no-install-recommends \ - curl ca-certificates >/dev/null 2>&1 - - corepack enable && corepack prepare pnpm@10.27.0 --activate - - echo "[den-web] Installing dependencies..." - pnpm install --no-frozen-lockfile --network-concurrency 1 --child-concurrency 1 - - echo "" - echo "============================================" - echo " OpenWork Cloud web app" - echo " URL: http://localhost:${DEN_WEB_PORT:-3005}" - echo " Den API: http://localhost:${DEN_API_PORT:-8788}" - echo "============================================" - echo "" - - exec pnpm --filter @different-ai/openwork-web dev ports: - "${DEN_WEB_PORT:-3005}:3005" healthcheck: - test: ["CMD-SHELL", "curl -sf http://localhost:3005/api/den/health || exit 1"] + test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:3005/api/den/health').then((res)=>process.exit(res.ok?0:1)).catch(()=>process.exit(1))"] interval: 5s timeout: 10s retries: 30 @@ -144,4 +131,3 @@ services: volumes: den-mysql-data: - pnpm-store: diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e0bf989e..a7a1b82b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -101,6 +101,34 @@ importers: specifier: ^2.11.0 version: 2.11.10(solid-js@1.9.10)(vite@6.4.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + packages/den-db: + dependencies: + '@different-ai/openwork-utils': + specifier: workspace:* + version: link:../utils + '@planetscale/database': + specifier: ^1.19.0 + version: 1.19.0 + drizzle-orm: + specifier: ^0.45.1 + version: 0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) + mysql2: + specifier: ^3.11.3 + version: 3.17.4 + devDependencies: + '@types/node': + specifier: ^20.11.30 + version: 20.12.12 + drizzle-kit: + specifier: ^0.31.9 + version: 0.31.9 + tsx: + specifier: ^4.21.0 + version: 4.21.0 + typescript: + specifier: ^5.5.4 + version: 5.9.3 + packages/desktop: devDependencies: '@tauri-apps/cli': @@ -120,7 +148,7 @@ importers: version: 0.577.0(react@18.2.0) next: specifier: 14.2.5 - version: 14.2.5(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + version: 14.2.5(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) react: specifier: 18.2.0 version: 18.2.0 @@ -255,11 +283,24 @@ importers: specifier: ^5.6.3 version: 5.9.3 + packages/utils: + dependencies: + typeid-js: + specifier: ^1.2.0 + version: 1.2.0 + devDependencies: + '@types/node': + specifier: ^20.11.30 + version: 20.12.12 + typescript: + specifier: ^5.5.4 + version: 5.9.3 + packages/web: dependencies: next: specifier: 14.2.5 - version: 14.2.5(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + version: 14.2.5(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) react: specifier: 18.2.0 version: 18.2.0 @@ -291,9 +332,12 @@ importers: services/den: dependencies: + '@daytonaio/sdk': + specifier: ^0.150.0 + version: 0.150.0(ws@8.19.0) better-auth: specifier: ^1.4.18 - version: 1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4))(mysql2@3.17.4)(next@16.1.6(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(solid-js@1.9.10) + version: 1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4))(mysql2@3.17.4)(next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(solid-js@1.9.10) cors: specifier: ^2.8.5 version: 2.8.6 @@ -302,7 +346,7 @@ importers: version: 16.6.1 drizzle-orm: specifier: ^0.45.1 - version: 0.45.1(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) + version: 0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) express: specifier: ^4.19.2 version: 4.22.1 @@ -332,6 +376,80 @@ importers: specifier: ^5.5.4 version: 5.9.3 + services/den-v2: + dependencies: + '@daytonaio/sdk': + specifier: ^0.150.0 + version: 0.150.0(ws@8.19.0) + better-auth: + specifier: ^1.4.18 + version: 1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4))(mysql2@3.17.4)(next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(solid-js@1.9.10) + cors: + specifier: ^2.8.5 + version: 2.8.6 + dotenv: + specifier: ^16.4.5 + version: 16.6.1 + drizzle-orm: + specifier: ^0.45.1 + version: 0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) + express: + specifier: ^4.19.2 + version: 4.22.1 + mysql2: + specifier: ^3.11.3 + version: 3.17.4 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@types/cors': + specifier: ^2.8.17 + version: 2.8.19 + '@types/express': + specifier: ^4.17.21 + version: 4.17.25 + '@types/node': + specifier: ^20.11.30 + version: 20.12.12 + drizzle-kit: + specifier: ^0.31.9 + version: 0.31.9 + tsx: + specifier: ^4.15.7 + version: 4.21.0 + typescript: + specifier: ^5.5.4 + version: 5.9.3 + + services/den-worker-proxy: + dependencies: + '@daytonaio/sdk': + specifier: ^0.150.0 + version: 0.150.0(ws@8.19.0) + '@hono/node-server': + specifier: ^1.13.8 + version: 1.19.11(hono@4.12.8) + dotenv: + specifier: ^16.4.5 + version: 16.6.1 + hono: + specifier: ^4.7.2 + version: 4.12.8 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@types/node': + specifier: ^20.11.30 + version: 20.12.12 + tsx: + specifier: ^4.15.7 + version: 4.21.0 + typescript: + specifier: ^5.5.4 + version: 5.9.3 + services/openwork-share: dependencies: '@paper-design/shaders-react': @@ -345,7 +463,7 @@ importers: version: 3.3.1 next: specifier: 16.1.6 - version: 16.1.6(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + version: 16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react: specifier: 19.2.4 version: 19.2.4 @@ -388,6 +506,171 @@ packages: resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} engines: {node: '>=6.0.0'} + '@aws-crypto/crc32@5.2.0': + resolution: {integrity: sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/crc32c@5.2.0': + resolution: {integrity: sha512-+iWb8qaHLYKrNvGRbiYRHSdKRWhto5XlZUEBwDjYNf+ly5SVYG6zEoYIdxvf5R3zyeP16w4PLBn3rH1xc74Rag==} + + '@aws-crypto/sha1-browser@5.2.0': + resolution: {integrity: sha512-OH6lveCFfcDjX4dbAvCFSYUjJZjDr/3XJ3xHtjn3Oj5b9RjojQo8npoLeA/bNwkOkrSQ0wgrHzXk4tDRxGKJeg==} + + '@aws-crypto/sha256-browser@5.2.0': + resolution: {integrity: sha512-AXfN/lGotSQwu6HNcEsIASo7kWXZ5HYWvfOmSNKDsEqC4OashTp8alTmaz+F7TC2L083SFv5RdB+qU3Vs1kZqw==} + + '@aws-crypto/sha256-js@5.2.0': + resolution: {integrity: sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/supports-web-crypto@5.2.0': + resolution: {integrity: sha512-iAvUotm021kM33eCdNfwIN//F77/IADDSs58i+MDaOqFrVjZo9bAal0NK7HurRuWLLpF1iLX7gbWrjHjeo+YFg==} + + '@aws-crypto/util@5.2.0': + resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} + + '@aws-sdk/client-s3@3.1009.0': + resolution: {integrity: sha512-luy8CxallkoiGWTqU86ca/BbvkWJjs0oala7uIIRN1JtQxMb5i4Yl/PBZVcQFhbK9kQi0PK0GfD8gIpLkI91fw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/core@3.973.20': + resolution: {integrity: sha512-i3GuX+lowD892F3IuJf8o6AbyDupMTdyTxQrCJGcn71ni5hTZ82L4nQhcdumxZ7XPJRJJVHS/CR3uYOIIs0PVA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/crc64-nvme@3.972.5': + resolution: {integrity: sha512-2VbTstbjKdT+yKi8m7b3a9CiVac+pL/IY2PHJwsaGkkHmuuqkJZIErPck1h6P3T9ghQMLSdMPyW6Qp7Di5swFg==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-env@3.972.18': + resolution: {integrity: sha512-X0B8AlQY507i5DwjLByeU2Af4ARsl9Vr84koDcXCbAkplmU+1xBFWxEPrWRAoh56waBne/yJqEloSwvRf4x6XA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-http@3.972.20': + resolution: {integrity: sha512-ey9Lelj001+oOfrbKmS6R2CJAiXX7QKY4Vj9VJv6L2eE6/VjD8DocHIoYqztTm70xDLR4E1jYPTKfIui+eRNDA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-ini@3.972.20': + resolution: {integrity: sha512-5flXSnKHMloObNF+9N0cupKegnH1Z37cdVlpETVgx8/rAhCe+VNlkcZH3HDg2SDn9bI765S+rhNPXGDJJPfbtA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-login@3.972.20': + resolution: {integrity: sha512-gEWo54nfqp2jABMu6HNsjVC4hDLpg9HC8IKSJnp0kqWtxIJYHTmiLSsIfI4ScQjxEwpB+jOOH8dOLax1+hy/Hw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-node@3.972.21': + resolution: {integrity: sha512-hah8if3/B/Q+LBYN5FukyQ1Mym6PLPDsBOBsIgNEYD6wLyZg0UmUF/OKIVC3nX9XH8TfTPuITK+7N/jenVACWA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-process@3.972.18': + resolution: {integrity: sha512-Tpl7SRaPoOLT32jbTWchPsn52hYYgJ0kpiFgnwk8pxTANQdUymVSZkzFvv1+oOgZm1CrbQUP9MBeoMZ9IzLZjA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-sso@3.972.20': + resolution: {integrity: sha512-p+R+PYR5Z7Gjqf/6pvbCnzEHcqPCpLzR7Yf127HjJ6EAb4hUcD+qsNRnuww1sB/RmSeCLxyay8FMyqREw4p1RA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-web-identity@3.972.20': + resolution: {integrity: sha512-rWCmh8o7QY4CsUj63qopzMzkDq/yPpkrpb+CnjBEFSOg/02T/we7sSTVg4QsDiVS9uwZ8VyONhq98qt+pIh3KA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/lib-storage@3.1009.0': + resolution: {integrity: sha512-gHQh1sNeTuxZxPSMSQWOq/Xli8I5499uWyRKMakMSv8N7IYfoyDdyT52Ul6697qcqVaoPHixmYTllfEWMo1AKg==} + engines: {node: '>=20.0.0'} + peerDependencies: + '@aws-sdk/client-s3': ^3.1009.0 + + '@aws-sdk/middleware-bucket-endpoint@3.972.8': + resolution: {integrity: sha512-WR525Rr2QJSETa9a050isktyWi/4yIGcmY3BQ1kpHqb0LqUglQHCS8R27dTJxxWNZvQ0RVGtEZjTCbZJpyF3Aw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-expect-continue@3.972.8': + resolution: {integrity: sha512-5DTBTiotEES1e2jOHAq//zyzCjeMB78lEHd35u15qnrid4Nxm7diqIf9fQQ3Ov0ChH1V3Vvt13thOnrACmfGVQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-flexible-checksums@3.973.6': + resolution: {integrity: sha512-0nYEgkJH7Yt9k+nZJyllTghnkKaz17TWFcr5Mi0XMVMzYlF4ytDZADQpF2/iJo36cKL5AYSzRsvlykE4M/ErTA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-host-header@3.972.8': + resolution: {integrity: sha512-wAr2REfKsqoKQ+OkNqvOShnBoh+nkPurDKW7uAeVSu6kUECnWlSJiPvnoqxGlfousEY/v9LfS9sNc46hjSYDIQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-location-constraint@3.972.8': + resolution: {integrity: sha512-KaUoFuoFPziIa98DSQsTPeke1gvGXlc5ZGMhy+b+nLxZ4A7jmJgLzjEF95l8aOQN2T/qlPP3MrAyELm8ExXucw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-logger@3.972.8': + resolution: {integrity: sha512-CWl5UCM57WUFaFi5kB7IBY1UmOeLvNZAZ2/OZ5l20ldiJ3TiIz1pC65gYj8X0BCPWkeR1E32mpsCk1L1I4n+lA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-recursion-detection@3.972.8': + resolution: {integrity: sha512-BnnvYs2ZEpdlmZ2PNlV2ZyQ8j8AEkMTjN79y/YA475ER1ByFYrkVR85qmhni8oeTaJcDqbx364wDpitDAA/wCA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-sdk-s3@3.972.20': + resolution: {integrity: sha512-yhva/xL5H4tWQgsBjwV+RRD0ByCzg0TcByDCLp3GXdn/wlyRNfy8zsswDtCvr1WSKQkSQYlyEzPuWkJG0f5HvQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-ssec@3.972.8': + resolution: {integrity: sha512-wqlK0yO/TxEC2UsY9wIlqeeutF6jjLe0f96Pbm40XscTo57nImUk9lBcw0dPgsm0sppFtAkSlDrfpK+pC30Wqw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-user-agent@3.972.21': + resolution: {integrity: sha512-62XRl1GDYPpkt7cx1AX1SPy9wgNE9Iw/NPuurJu4lmhCWS7sGKO+kS53TQ8eRmIxy3skmvNInnk0ZbWrU5Dpyg==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/nested-clients@3.996.10': + resolution: {integrity: sha512-SlDol5Z+C7Ivnc2rKGqiqfSUmUZzY1qHfVs9myt/nxVwswgfpjdKahyTzLTx802Zfq0NFRs7AejwKzzzl5Co2w==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/region-config-resolver@3.972.8': + resolution: {integrity: sha512-1eD4uhTDeambO/PNIDVG19A6+v4NdD7xzwLHDutHsUqz0B+i661MwQB2eYO4/crcCvCiQG4SRm1k81k54FEIvw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/signature-v4-multi-region@3.996.8': + resolution: {integrity: sha512-n1qYFD+tbqZuyskVaxUE+t10AUz9g3qzDw3Tp6QZDKmqsjfDmZBd4GIk2EKJJNtcCBtE5YiUjDYA+3djFAFBBg==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/token-providers@3.1009.0': + resolution: {integrity: sha512-KCPLuTqN9u0Rr38Arln78fRG9KXpzsPWmof+PZzfAHMMQq2QED6YjQrkrfiH7PDefLWEposY1o4/eGwrmKA4JA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/types@3.973.6': + resolution: {integrity: sha512-Atfcy4E++beKtwJHiDln2Nby8W/mam64opFPTiHEqgsthqeydFS1pY+OUlN1ouNOmf8ArPU/6cDS65anOP3KQw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-arn-parser@3.972.3': + resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-endpoints@3.996.5': + resolution: {integrity: sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-locate-window@3.965.5': + resolution: {integrity: sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-user-agent-browser@3.972.8': + resolution: {integrity: sha512-B3KGXJviV2u6Cdw2SDY2aDhoJkVfY/Q/Trwk2CMSkikE1Oi6gRzxhvhIfiRpHfmIsAhV4EA54TVEX8K6CbHbkA==} + + '@aws-sdk/util-user-agent-node@3.973.7': + resolution: {integrity: sha512-Hz6EZMUAEzqUd7e+vZ9LE7mn+5gMbxltXy18v+YSFY+9LBJz15wkNZvw5JqfX3z0FS9n3bgUtz3L5rAsfh4YlA==} + engines: {node: '>=20.0.0'} + peerDependencies: + aws-crt: '>=1.0.0' + peerDependenciesMeta: + aws-crt: + optional: true + + '@aws-sdk/xml-builder@3.972.11': + resolution: {integrity: sha512-iitV/gZKQMvY9d7ovmyFnFuTHbBAtrmLnvaSb/3X8vOKyevwtpmEtyc8AdhVWZe0pI/1GsHxlEvQeOePFzy7KQ==} + engines: {node: '>=20.0.0'} + + '@aws/lambda-invoke-store@0.2.4': + resolution: {integrity: sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ==} + engines: {node: '>=18.0.0'} + '@babel/code-frame@7.28.6': resolution: {integrity: sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q==} engines: {node: '>=6.9.0'} @@ -576,6 +859,15 @@ packages: '@codemirror/view@6.39.14': resolution: {integrity: sha512-WJcvgHm/6Q7dvGT0YFv/6PSkoc36QlR0VCESS6x9tGsnF1lWLmmYxOgX3HH6v8fo6AvSLgpcs+H0Olre6MKXlg==} + '@daytonaio/api-client@0.150.0': + resolution: {integrity: sha512-NXGE1sgd8+VBzu3B7P/pLrlpci9nMoZecvLmK3zFDh8hr5Ra5vuXJN9pEVJmev93zUItQxHbuvaxaWrYzHevVA==} + + '@daytonaio/sdk@0.150.0': + resolution: {integrity: sha512-JmNulFaLhmpjVVFtaRDZa84fxPuy0axQYVLrj1lvRgcZzcrwJRdHv9FZPMLbKdrbicMh3D7GYA9XeBMYVZBTIg==} + + '@daytonaio/toolbox-api-client@0.150.0': + resolution: {integrity: sha512-7MCbD1FrzYjOaOmqpMDQe7cyoQTSImEOjQ+6Js4NlBOwPlz2PMi352XuG9qrBp9ngNpo8fpduYr35iDOjrpIVg==} + '@dimforge/rapier2d-simd-compat@0.17.3': resolution: {integrity: sha512-bijvwWz6NHsNj5e5i1vtd3dU2pDhthSaTUZSh14DUGGKJfw8eMnlWZsxwHBxB/a3AXVNDjL9abuHw1k9FGR+jg==} @@ -1044,6 +1336,24 @@ packages: '@grammyjs/types@3.23.0': resolution: {integrity: sha512-D3jQ4UWERPsyR3op/YFudMMIPNTU47vy7L51uO9/73tMELmjO/+LX5N36/Y0CG5IQfIsz43MxiHI5rgsK0/k+g==} + '@grpc/grpc-js@1.14.3': + resolution: {integrity: sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA==} + engines: {node: '>=12.10.0'} + + '@grpc/proto-loader@0.8.0': + resolution: {integrity: sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==} + engines: {node: '>=6'} + hasBin: true + + '@hono/node-server@1.19.11': + resolution: {integrity: sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g==} + engines: {node: '>=18.14.1'} + peerDependencies: + hono: ^4 + + '@iarna/toml@2.2.5': + resolution: {integrity: sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==} + '@img/colour@1.1.0': resolution: {integrity: sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ==} engines: {node: '>=18'} @@ -1189,6 +1499,10 @@ packages: resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==} engines: {node: 20 || >=22} + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + '@jimp/core@1.6.0': resolution: {integrity: sha512-EQQlKU3s9QfdJqiSrZWNTxBs3rKXgO2W+GxNXDtwchF3a4IqxDheFX1ti+Env9hdJXDiYLp2jTRjlxhPthsk8w==} engines: {node: '>=18'} @@ -1317,6 +1631,9 @@ packages: '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + '@js-sdsl/ordered-map@4.4.2': + resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} + '@lezer/common@1.5.1': resolution: {integrity: sha512-6YRVG9vBkaY7p1IVxL4s44n5nUnaNnGM2/AckNgYOnxTG2kWh1vR8BMxPseWPjRNpb5VtXnMpeYAEAADoRV1Iw==} @@ -1472,6 +1789,192 @@ packages: '@opencode-ai/sdk@1.1.39': resolution: {integrity: sha512-EUYBZAci0bzG9+a7JVINmqAqis71ipG2/D3juvmvvKFyu0YBIT/6b+g3+p82Eb5CU2dujxpPdJJCaexZ1389eQ==} + '@opentelemetry/api-logs@0.207.0': + resolution: {integrity: sha512-lAb0jQRVyleQQGiuuvCOTDVspc14nx6XJjP4FspJ1sNARo3Regq4ZZbrc3rN4b1TYSuUCvgH+UXUPug4SLOqEQ==} + engines: {node: '>=8.0.0'} + + '@opentelemetry/api@1.9.0': + resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} + engines: {node: '>=8.0.0'} + + '@opentelemetry/context-async-hooks@2.2.0': + resolution: {integrity: sha512-qRkLWiUEZNAmYapZ7KGS5C4OmBLcP/H2foXeOEaowYCR0wi89fHejrfYfbuLVCMLp/dWZXKvQusdbUEZjERfwQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/core@2.2.0': + resolution: {integrity: sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/core@2.6.0': + resolution: {integrity: sha512-HLM1v2cbZ4TgYN6KEOj+Bbj8rAKriOdkF9Ed3tG25FoprSiQl7kYc+RRT6fUZGOvx0oMi5U67GoFdT+XUn8zEg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/exporter-logs-otlp-grpc@0.207.0': + resolution: {integrity: sha512-K92RN+kQGTMzFDsCzsYNGqOsXRUnko/Ckk+t/yPJao72MewOLgBUTWVHhebgkNfRCYqDz1v3K0aPT9OJkemvgg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-logs-otlp-http@0.207.0': + resolution: {integrity: sha512-JpOh7MguEUls8eRfkVVW3yRhClo5b9LqwWTOg8+i4gjr/+8eiCtquJnC7whvpTIGyff06cLZ2NsEj+CVP3Mjeg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-logs-otlp-proto@0.207.0': + resolution: {integrity: sha512-RQJEV/K6KPbQrIUbsrRkEe0ufks1o5OGLHy6jbDD8tRjeCsbFHWfg99lYBRqBV33PYZJXsigqMaAbjWGTFYzLw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-grpc@0.207.0': + resolution: {integrity: sha512-6flX89W54gkwmqYShdcTBR1AEF5C1Ob0O8pDgmLPikTKyEv27lByr9yBmO5WrP0+5qJuNPHrLfgFQFYi6npDGA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-http@0.207.0': + resolution: {integrity: sha512-fG8FAJmvXOrKXGIRN8+y41U41IfVXxPRVwyB05LoMqYSjugx/FSBkMZUZXUT/wclTdmBKtS5MKoi0bEKkmRhSw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-proto@0.207.0': + resolution: {integrity: sha512-kDBxiTeQjaRlUQzS1COT9ic+et174toZH6jxaVuVAvGqmxOkgjpLOjrI5ff8SMMQE69r03L3Ll3nPKekLopLwg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-prometheus@0.207.0': + resolution: {integrity: sha512-Y5p1s39FvIRmU+F1++j7ly8/KSqhMmn6cMfpQqiDCqDjdDHwUtSq0XI0WwL3HYGnZeaR/VV4BNmsYQJ7GAPrhw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-grpc@0.207.0': + resolution: {integrity: sha512-7u2ZmcIx6D4KG/+5np4X2qA0o+O0K8cnUDhR4WI/vr5ZZ0la9J9RG+tkSjC7Yz+2XgL6760gSIM7/nyd3yaBLA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-http@0.207.0': + resolution: {integrity: sha512-HSRBzXHIC7C8UfPQdu15zEEoBGv0yWkhEwxqgPCHVUKUQ9NLHVGXkVrf65Uaj7UwmAkC1gQfkuVYvLlD//AnUQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-proto@0.207.0': + resolution: {integrity: sha512-ruUQB4FkWtxHjNmSXjrhmJZFvyMm+tBzHyMm7YPQshApy4wvZUTcrpPyP/A/rCl/8M4BwoVIZdiwijMdbZaq4w==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-zipkin@2.2.0': + resolution: {integrity: sha512-VV4QzhGCT7cWrGasBWxelBjqbNBbyHicWWS/66KoZoe9BzYwFB72SH2/kkc4uAviQlO8iwv2okIJy+/jqqEHTg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/instrumentation-http@0.207.0': + resolution: {integrity: sha512-FC4i5hVixTzuhg4SV2ycTEAYx+0E2hm+GwbdoVPSA6kna0pPVI4etzaA9UkpJ9ussumQheFXP6rkGIaFJjMxsw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/instrumentation@0.207.0': + resolution: {integrity: sha512-y6eeli9+TLKnznrR8AZlQMSJT7wILpXH+6EYq5Vf/4Ao+huI7EedxQHwRgVUOMLFbe7VFDvHJrX9/f4lcwnJsA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-exporter-base@0.207.0': + resolution: {integrity: sha512-4RQluMVVGMrHok/3SVeSJ6EnRNkA2MINcX88sh+d/7DjGUrewW/WT88IsMEci0wUM+5ykTpPPNbEOoW+jwHnbw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-grpc-exporter-base@0.207.0': + resolution: {integrity: sha512-eKFjKNdsPed4q9yYqeI5gBTLjXxDM/8jwhiC0icw3zKxHVGBySoDsed5J5q/PGY/3quzenTr3FiTxA3NiNT+nw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.207.0': + resolution: {integrity: sha512-+6DRZLqM02uTIY5GASMZWUwr52sLfNiEe20+OEaZKhztCs3+2LxoTjb6JxFRd9q1qNqckXKYlUKjbH/AhG8/ZA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/propagator-b3@2.2.0': + resolution: {integrity: sha512-9CrbTLFi5Ee4uepxg2qlpQIozoJuoAZU5sKMx0Mn7Oh+p7UrgCiEV6C02FOxxdYVRRFQVCinYR8Kf6eMSQsIsw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-jaeger@2.2.0': + resolution: {integrity: sha512-FfeOHOrdhiNzecoB1jZKp2fybqmqMPJUXe2ZOydP7QzmTPYcfPeuaclTLYVhK3HyJf71kt8sTl92nV4YIaLaKA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/resources@2.2.0': + resolution: {integrity: sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/resources@2.6.0': + resolution: {integrity: sha512-D4y/+OGe3JSuYUCBxtH5T9DSAWNcvCb/nQWIga8HNtXTVPQn59j0nTBAgaAXxUVBDl40mG3Tc76b46wPlZaiJQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.207.0': + resolution: {integrity: sha512-4MEQmn04y+WFe6cyzdrXf58hZxilvY59lzZj2AccuHW/+BxLn/rGVN/Irsi/F0qfBOpMOrrCLKTExoSL2zoQmg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.2.0': + resolution: {integrity: sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + + '@opentelemetry/sdk-node@0.207.0': + resolution: {integrity: sha512-hnRsX/M8uj0WaXOBvFenQ8XsE8FLVh2uSnn1rkWu4mx+qu7EKGUZvZng6y/95cyzsqOfiaDDr08Ek4jppkIDNg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.2.0': + resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.6.0': + resolution: {integrity: sha512-g/OZVkqlxllgFM7qMKqbPV9c1DUPhQ7d4n3pgZFcrnrNft9eJXZM2TNHTPYREJBrtNdRytYyvwjgL5geDKl3EQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@2.2.0': + resolution: {integrity: sha512-+OaRja3f0IqGG2kptVeYsrZQK9nKRSpfFrKtRBq4uh6nIB8bTBgaGvYQrQoRrQWQMA5dK5yLhDMDc0dvYvCOIQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.40.0': + resolution: {integrity: sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==} + engines: {node: '>=14'} + '@opentui/core-darwin-arm64@0.1.77': resolution: {integrity: sha512-SNqmygCMEsPCW7xWjzCZ5caBf36xaprwVdAnFijGDOuIzLA4iaDa6um8cj3TJh7awenN3NTRsuRc7OuH42UH+g==} cpu: [arm64] @@ -1527,11 +2030,45 @@ packages: '@pinojs/redact@0.4.0': resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==} + '@planetscale/database@1.19.0': + resolution: {integrity: sha512-Tv4jcFUFAFjOWrGSio49H6R2ijALv0ZzVBfJKIdm+kl9X046Fh4LLawrF9OMsglVbK6ukqMJsUCeucGAFTBcMA==} + engines: {node: '>=16'} + '@playwright/test@1.58.2': resolution: {integrity: sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA==} engines: {node: '>=18'} hasBin: true + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + '@radix-ui/colors@3.0.0': resolution: {integrity: sha512-FUOsGBkHrYJwCSEtWRCIfQbZG7q1e6DgxCIOe1SUQzDe/7rXXeA47s8yCn6fuTNQAj1Zq4oTFi9Yjp3wzElcxg==} @@ -1676,6 +2213,222 @@ packages: resolution: {integrity: sha512-ERcExbWrnkDN8ovoWWe6Wgt/usanj1dWUd18dJLpctUI4mlPS0nKt81Joh8VI+OPbNnY1lIilVt9gdMBD9U2ig==} engines: {node: '>= 18', npm: '>= 8.6.0'} + '@smithy/abort-controller@4.2.12': + resolution: {integrity: sha512-xolrFw6b+2iYGl6EcOL7IJY71vvyZ0DJ3mcKtpykqPe2uscwtzDZJa1uVQXyP7w9Dd+kGwYnPbMsJrGISKiY/Q==} + engines: {node: '>=18.0.0'} + + '@smithy/chunked-blob-reader-native@4.2.3': + resolution: {integrity: sha512-jA5k5Udn7Y5717L86h4EIv06wIr3xn8GM1qHRi/Nf31annXcXHJjBKvgztnbn2TxH3xWrPBfgwHsOwZf0UmQWw==} + engines: {node: '>=18.0.0'} + + '@smithy/chunked-blob-reader@5.2.2': + resolution: {integrity: sha512-St+kVicSyayWQca+I1rGitaOEH6uKgE8IUWoYnnEX26SWdWQcL6LvMSD19Lg+vYHKdT9B2Zuu7rd3i6Wnyb/iw==} + engines: {node: '>=18.0.0'} + + '@smithy/config-resolver@4.4.11': + resolution: {integrity: sha512-YxFiiG4YDAtX7WMN7RuhHZLeTmRRAOyCbr+zB8e3AQzHPnUhS8zXjB1+cniPVQI3xbWsQPM0X2aaIkO/ME0ymw==} + engines: {node: '>=18.0.0'} + + '@smithy/core@3.23.11': + resolution: {integrity: sha512-952rGf7hBRnhUIaeLp6q4MptKW8sPFe5VvkoZ5qIzFAtx6c/QZ/54FS3yootsyUSf9gJX/NBqEBNdNR7jMIlpQ==} + engines: {node: '>=18.0.0'} + + '@smithy/credential-provider-imds@4.2.12': + resolution: {integrity: sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-codec@4.2.12': + resolution: {integrity: sha512-FE3bZdEl62ojmy8x4FHqxq2+BuOHlcxiH5vaZ6aqHJr3AIZzwF5jfx8dEiU/X0a8RboyNDjmXjlbr8AdEyLgiA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-browser@4.2.12': + resolution: {integrity: sha512-XUSuMxlTxV5pp4VpqZf6Sa3vT/Q75FVkLSpSSE3KkWBvAQWeuWt1msTv8fJfgA4/jcJhrbrbMzN1AC/hvPmm5A==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-config-resolver@4.3.12': + resolution: {integrity: sha512-7epsAZ3QvfHkngz6RXQYseyZYHlmWXSTPOfPmXkiS+zA6TBNo1awUaMFL9vxyXlGdoELmCZyZe1nQE+imbmV+Q==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-node@4.2.12': + resolution: {integrity: sha512-D1pFuExo31854eAvg89KMn9Oab/wEeJR6Buy32B49A9Ogdtx5fwZPqBHUlDzaCDpycTFk2+fSQgX689Qsk7UGA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-universal@4.2.12': + resolution: {integrity: sha512-+yNuTiyBACxOJUTvbsNsSOfH9G9oKbaJE1lNL3YHpGcuucl6rPZMi3nrpehpVOVR2E07YqFFmtwpImtpzlouHQ==} + engines: {node: '>=18.0.0'} + + '@smithy/fetch-http-handler@5.3.15': + resolution: {integrity: sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-blob-browser@4.2.13': + resolution: {integrity: sha512-YrF4zWKh+ghLuquldj6e/RzE3xZYL8wIPfkt0MqCRphVICjyyjH8OwKD7LLlKpVEbk4FLizFfC1+gwK6XQdR3g==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-node@4.2.12': + resolution: {integrity: sha512-QhBYbGrbxTkZ43QoTPrK72DoYviDeg6YKDrHTMJbbC+A0sml3kSjzFtXP7BtbyJnXojLfTQldGdUR0RGD8dA3w==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-stream-node@4.2.12': + resolution: {integrity: sha512-O3YbmGExeafuM/kP7Y8r6+1y0hIh3/zn6GROx0uNlB54K9oihAL75Qtc+jFfLNliTi6pxOAYZrRKD9A7iA6UFw==} + engines: {node: '>=18.0.0'} + + '@smithy/invalid-dependency@4.2.12': + resolution: {integrity: sha512-/4F1zb7Z8LOu1PalTdESFHR0RbPwHd3FcaG1sI3UEIriQTWakysgJr65lc1jj6QY5ye7aFsisajotH6UhWfm/g==} + engines: {node: '>=18.0.0'} + + '@smithy/is-array-buffer@2.2.0': + resolution: {integrity: sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==} + engines: {node: '>=14.0.0'} + + '@smithy/is-array-buffer@4.2.2': + resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} + engines: {node: '>=18.0.0'} + + '@smithy/md5-js@4.2.12': + resolution: {integrity: sha512-W/oIpHCpWU2+iAkfZYyGWE+qkpuf3vEXHLxQQDx9FPNZTTdnul0dZ2d/gUFrtQ5je1G2kp4cjG0/24YueG2LbQ==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-content-length@4.2.12': + resolution: {integrity: sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-endpoint@4.4.25': + resolution: {integrity: sha512-dqjLwZs2eBxIUG6Qtw8/YZ4DvzHGIf0DA18wrgtfP6a50UIO7e2nY0FPdcbv5tVJKqWCCU5BmGMOUwT7Puan+A==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-retry@4.4.42': + resolution: {integrity: sha512-vbwyqHRIpIZutNXZpLAozakzamcINaRCpEy1MYmK6xBeW3xN+TyPRA123GjXnuxZIjc9848MRRCugVMTXxC4Eg==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-serde@4.2.14': + resolution: {integrity: sha512-+CcaLoLa5apzSRtloOyG7lQvkUw2ZDml3hRh4QiG9WyEPfW5Ke/3tPOPiPjUneuT59Tpn8+c3RVaUvvkkwqZwg==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-stack@4.2.12': + resolution: {integrity: sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw==} + engines: {node: '>=18.0.0'} + + '@smithy/node-config-provider@4.3.12': + resolution: {integrity: sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw==} + engines: {node: '>=18.0.0'} + + '@smithy/node-http-handler@4.4.16': + resolution: {integrity: sha512-ULC8UCS/HivdCB3jhi+kLFYe4B5gxH2gi9vHBfEIiRrT2jfKiZNiETJSlzRtE6B26XbBHjPtc8iZKSNqMol9bw==} + engines: {node: '>=18.0.0'} + + '@smithy/property-provider@4.2.12': + resolution: {integrity: sha512-jqve46eYU1v7pZ5BM+fmkbq3DerkSluPr5EhvOcHxygxzD05ByDRppRwRPPpFrsFo5yDtCYLKu+kreHKVrvc7A==} + engines: {node: '>=18.0.0'} + + '@smithy/protocol-http@5.3.12': + resolution: {integrity: sha512-fit0GZK9I1xoRlR4jXmbLhoN0OdEpa96ul8M65XdmXnxXkuMxM0Y8HDT0Fh0Xb4I85MBvBClOzgSrV1X2s1Hxw==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-builder@4.2.12': + resolution: {integrity: sha512-6wTZjGABQufekycfDGMEB84BgtdOE/rCVTov+EDXQ8NHKTUNIp/j27IliwP7tjIU9LR+sSzyGBOXjeEtVgzCHg==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-parser@4.2.12': + resolution: {integrity: sha512-P2OdvrgiAKpkPNKlKUtWbNZKB1XjPxM086NeVhK+W+wI46pIKdWBe5QyXvhUm3MEcyS/rkLvY8rZzyUdmyDZBw==} + engines: {node: '>=18.0.0'} + + '@smithy/service-error-classification@4.2.12': + resolution: {integrity: sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ==} + engines: {node: '>=18.0.0'} + + '@smithy/shared-ini-file-loader@4.4.7': + resolution: {integrity: sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw==} + engines: {node: '>=18.0.0'} + + '@smithy/signature-v4@5.3.12': + resolution: {integrity: sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw==} + engines: {node: '>=18.0.0'} + + '@smithy/smithy-client@4.12.5': + resolution: {integrity: sha512-UqwYawyqSr/aog8mnLnfbPurS0gi4G7IYDcD28cUIBhsvWs1+rQcL2IwkUQ+QZ7dibaoRzhNF99fAQ9AUcO00w==} + engines: {node: '>=18.0.0'} + + '@smithy/types@4.13.1': + resolution: {integrity: sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g==} + engines: {node: '>=18.0.0'} + + '@smithy/url-parser@4.2.12': + resolution: {integrity: sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA==} + engines: {node: '>=18.0.0'} + + '@smithy/util-base64@4.3.2': + resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-browser@4.2.2': + resolution: {integrity: sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-node@4.2.3': + resolution: {integrity: sha512-ZkJGvqBzMHVHE7r/hcuCxlTY8pQr1kMtdsVPs7ex4mMU+EAbcXppfo5NmyxMYi2XU49eqaz56j2gsk4dHHPG/g==} + engines: {node: '>=18.0.0'} + + '@smithy/util-buffer-from@2.2.0': + resolution: {integrity: sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==} + engines: {node: '>=14.0.0'} + + '@smithy/util-buffer-from@4.2.2': + resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} + engines: {node: '>=18.0.0'} + + '@smithy/util-config-provider@4.2.2': + resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-browser@4.3.41': + resolution: {integrity: sha512-M1w1Ux0rSVvBOxIIiqbxvZvhnjQ+VUjJrugtORE90BbadSTH+jsQL279KRL3Hv0w69rE7EuYkV/4Lepz/NBW9g==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-node@4.2.44': + resolution: {integrity: sha512-YPze3/lD1KmWuZsl9JlfhcgGLX7AXhSoaCDtiPntUjNW5/YY0lOHjkcgxyE9x/h5vvS1fzDifMGjzqnNlNiqOQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-endpoints@3.3.3': + resolution: {integrity: sha512-VACQVe50j0HZPjpwWcjyT51KUQ4AnsvEaQ2lKHOSL4mNLD0G9BjEniQ+yCt1qqfKfiAHRAts26ud7hBjamrwig==} + engines: {node: '>=18.0.0'} + + '@smithy/util-hex-encoding@4.2.2': + resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-middleware@4.2.12': + resolution: {integrity: sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-retry@4.2.12': + resolution: {integrity: sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-stream@4.5.19': + resolution: {integrity: sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==} + engines: {node: '>=18.0.0'} + + '@smithy/util-uri-escape@4.2.2': + resolution: {integrity: sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-utf8@2.3.0': + resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} + engines: {node: '>=14.0.0'} + + '@smithy/util-utf8@4.2.2': + resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-waiter@4.2.13': + resolution: {integrity: sha512-2zdZ9DTHngRtcYxJK1GUDxruNr53kv5W2Lupe0LMU+Imr6ohQg8M2T14MNkj1Y0wS3FFwpgpGQyvuaMF7CiTmQ==} + engines: {node: '>=18.0.0'} + + '@smithy/uuid@1.1.2': + resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} + engines: {node: '>=18.0.0'} + '@solid-primitives/event-bus@1.1.2': resolution: {integrity: sha512-l+n10/51neGcMaP3ypYt21bXfoeWh8IaC8k7fYuY3ww2a8S1Zv2N2a7FF5Qn+waTu86l0V8/nRHjkyqVIZBYwA==} peerDependencies: @@ -2007,6 +2760,24 @@ packages: resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} engines: {node: '>= 0.6'} + acorn-import-attributes@1.9.5: + resolution: {integrity: sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==} + peerDependencies: + acorn: ^8 + + acorn@8.16.0: + resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} + engines: {node: '>=0.4.0'} + hasBin: true + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + any-base@1.1.0: resolution: {integrity: sha512-uMgjozySS8adZZYePpaWs8cxB9/kdzmpX6SgJZ+wbz1K5eYk5QMYDVJaZKhxyIHUdnnJkfR7SVgStgH7LkGUyg==} @@ -2051,6 +2822,9 @@ packages: axios@1.13.4: resolution: {integrity: sha512-1wVkUaAO6WyaYtCkcYCOx12ZgpGf9Zif+qXa4n+oYzK558YryKqiL6UWwd5DqiH3VRW0GYhTZQ/vlgJrCoNQlg==} + axios@1.13.6: + resolution: {integrity: sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==} + babel-plugin-jsx-dom-expressions@0.40.3: resolution: {integrity: sha512-5HOwwt0BYiv/zxl7j8Pf2bGL6rDXfV6nUhLs8ygBX+EFJXzBPHM/euj9j/6deMZ6wa52Wb2PBaAV5U/jKwIY1w==} peerDependencies: @@ -2159,6 +2933,9 @@ packages: resolution: {integrity: sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + bowser@2.14.1: + resolution: {integrity: sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg==} + brace-expansion@2.0.2: resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} @@ -2174,6 +2951,9 @@ packages: buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + buffer@5.6.0: + resolution: {integrity: sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==} + buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} @@ -2235,9 +3015,27 @@ packages: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + + cjs-module-lexer@2.2.0: + resolution: {integrity: sha512-4bHTS2YuzUvtoLjdy+98ykbNB5jS0+07EvFNXerqZQJ89F7DI6ET7OQo/HJuW6K0aVsKA9hj9/RVb2kQVOrPDQ==} + client-only@0.0.1: resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -2337,6 +3135,10 @@ packages: resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} engines: {node: '>=12'} + dotenv@17.3.1: + resolution: {integrity: sha512-IO8C/dzEb6O3F9/twg6ZLXz164a2fhTnEWb95H23Dm4OuN+92NmEAlTrupP9VW6Jm3sO26tQlqyvyi4CsnY9GA==} + engines: {node: '>=12'} + drizzle-kit@0.31.9: resolution: {integrity: sha512-GViD3IgsXn7trFyBUUHyTFBpH/FsHTxYJ66qdbVggxef4UBPHRYxQaRzYLTuekYnk9i5FIEL9pbBIwMqX/Uwrg==} hasBin: true @@ -2443,6 +3245,9 @@ packages: electron-to-chromium@1.5.267: resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==} + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + encodeurl@2.0.0: resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} engines: {node: '>= 0.8'} @@ -2519,6 +3324,10 @@ packages: exif-parser@0.1.12: resolution: {integrity: sha512-c2bQfLNbMzLPmzQuOr8fy0csy84WmwnER81W88DzTp9CYNPJ6yzOj2EZAh9pywYpqHnshVLHQJ8WzldAyfY+Iw==} + expand-tilde@2.0.2: + resolution: {integrity: sha512-A5EmesHW6rfnZ9ysHQjPdJRni0SRar0tjtG5MNtm9n5TUvsYU8oozprtRD4AqHxcZWWlVuAmQo2nWKfN9oyjTw==} + engines: {node: '>=0.10.0'} + express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} @@ -2527,6 +3336,13 @@ packages: resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} engines: {node: '>=8.6.0'} + fast-xml-builder@1.1.3: + resolution: {integrity: sha512-1o60KoFw2+LWKQu3IdcfcFlGTW4dpqEWmjhYec6H82AYZU2TVBXep6tMl8Z1Y+wM+ZrzCwe3BZ9Vyd9N2rIvmg==} + + fast-xml-parser@5.4.1: + resolution: {integrity: sha512-BQ30U1mKkvXQXXkAGcuyUA/GA26oEB7NzOtsxCDtyu62sjGw5QraKFhx2Em3WQNjPw9PG6MQ9yuIIgkSDfGu5A==} + hasBin: true + fastq@1.20.1: resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==} @@ -2571,6 +3387,9 @@ packages: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} + forwarded-parse@2.1.2: + resolution: {integrity: sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==} + forwarded@0.2.0: resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} engines: {node: '>= 0.6'} @@ -2622,6 +3441,10 @@ packages: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} @@ -2672,6 +3495,14 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} + homedir-polyfill@1.0.3: + resolution: {integrity: sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==} + engines: {node: '>=0.10.0'} + + hono@4.12.8: + resolution: {integrity: sha512-VJCEvtrezO1IAR+kqEYnxUOoStaQPGrCmX3j4wDTNOcD1uRPFpGlwQUIW8niPuvHXaTUxeOUl5MMDGrl+tmO9A==} + engines: {node: '>=16.9.0'} + html-entities@2.3.3: resolution: {integrity: sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==} @@ -2693,6 +3524,9 @@ packages: image-q@4.0.0: resolution: {integrity: sha512-PfJGVgIfKQJuq3s0tTDOKtztksibuUEbJQIYT3by6wctQo+Rdlh7ef4evJ5NCdxY4CfMbvFkocEwbl4BF8RlJw==} + import-in-the-middle@2.0.6: + resolution: {integrity: sha512-3vZV3jX0XRFW3EJDTwzWoZa+RH1b8eTTx6YOCjglrLyPuepwoBti1k3L2dKwdCUrnVEfc5CuRuGstaC/uQJJaw==} + inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} @@ -2719,6 +3553,10 @@ packages: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} @@ -2741,6 +3579,11 @@ packages: resolution: {integrity: sha512-ZhMwEosbFJkA0YhFnNDgTM4ZxDRsS6HqTo7qsZM08fehyRYIYa0yHu5R6mgo1n/8MgaPBXiPimPD77baVFYg+A==} engines: {node: '>=12.13'} + isomorphic-ws@5.0.0: + resolution: {integrity: sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw==} + peerDependencies: + ws: '*' + jimp@1.6.0: resolution: {integrity: sha512-YcwCHw1kiqEeI5xRpDlPPBGL2EOpBKLwO4yIBJcXWHPj5PnA5urGq0jbyhM5KoNpypQ6VboSoxc9D8HyfvngSg==} engines: {node: '>=18'} @@ -2864,6 +3707,9 @@ packages: resolution: {integrity: sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==} engines: {node: '>=6'} + lodash.camelcase@4.3.0: + resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} + long@5.3.2: resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} @@ -2960,6 +3806,13 @@ packages: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} + minizlib@3.1.0: + resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==} + engines: {node: '>= 18'} + + module-details-from-path@1.0.4: + resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==} + motion-dom@12.35.1: resolution: {integrity: sha512-7n6r7TtNOsH2UFSAXzTkfzOeO5616v9B178qBIjmu/WgEyJK0uqwytCEhwKBTuM/HJA40ptAw7hLFpxtPAMRZQ==} @@ -3119,6 +3972,10 @@ packages: parse-bmfont-xml@1.1.6: resolution: {integrity: sha512-0cEliVMZEhrFDwMh4SxIyVJpqYoOWDJ9P895tFuS+XuNzI5UBmBk5U5O4KuJdTnZpSBI4LFA2+ZiJaiwfSwlMA==} + parse-passwd@1.0.0: + resolution: {integrity: sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q==} + engines: {node: '>=0.10.0'} + parse5@7.3.0: resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} @@ -3130,6 +3987,10 @@ packages: resolution: {integrity: sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==} engines: {node: '>=4'} + path-expression-matcher@1.1.3: + resolution: {integrity: sha512-qdVgY8KXmVdJZRSS1JdEPOKPdTiEK/pi0RkcT2sw1RhXxohdujUlJFPuS1TSkevZ9vzd3ZlL7ULl1MHGTApKzQ==} + engines: {node: '>=14.0.0'} + path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} @@ -3140,6 +4001,9 @@ packages: path-to-regexp@0.1.12: resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==} + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + peek-readable@4.1.0: resolution: {integrity: sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==} engines: {node: '>=8'} @@ -3261,6 +4125,10 @@ packages: resolution: {integrity: sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==} engines: {node: '>= 0.6.0'} + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + proxy-addr@2.0.7: resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} engines: {node: '>= 0.10'} @@ -3307,6 +4175,10 @@ packages: read-cache@1.0.0: resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + readable-stream@4.7.0: resolution: {integrity: sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -3323,6 +4195,14 @@ packages: resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} engines: {node: '>= 12.13.0'} + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-in-the-middle@8.0.1: + resolution: {integrity: sha512-QT7FVMXfWOYFbeRBF6nu+I6tr2Tf3u0q8RIEjNob/heKY/nh7drD/k7eeMFmSQgnTtCzLDcCu/XEnpW2wk4xCQ==} + engines: {node: '>=9.3.0 || >=8.10.0 <9.0.0'} + reselect@4.1.8: resolution: {integrity: sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==} @@ -3413,6 +4293,10 @@ packages: resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + shell-quote@1.8.3: + resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} + engines: {node: '>= 0.4'} + side-channel-list@1.0.0: resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} engines: {node: '>= 0.4'} @@ -3474,13 +4358,27 @@ packages: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} + stream-browserify@3.0.0: + resolution: {integrity: sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==} + streamsearch@1.1.0: resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} engines: {node: '>=10.0.0'} + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + string_decoder@1.3.0: resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strnum@2.2.0: + resolution: {integrity: sha512-Y7Bj8XyJxnPAORMZj/xltsfo55uOiyHcU2tnAVzHUnSJR/KsEX+9RoDeXEnsXtl/CX4fAcrt64gZ13aGaWPeBg==} + strtok3@6.3.0: resolution: {integrity: sha512-fZtbhtvI9I48xDSywd/somNqgUHl2L2cstmXCCif0itOf96jeW18MBSyrLuNicYQVkvpOxkZtkzujiTJ9LW5Jw==} engines: {node: '>=10'} @@ -3535,6 +4433,10 @@ packages: resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} engines: {node: '>=6'} + tar@7.5.11: + resolution: {integrity: sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==} + engines: {node: '>=18'} + thenify-all@1.6.0: resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} engines: {node: '>=0.8'} @@ -3589,6 +4491,9 @@ packages: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} + typeid-js@1.2.0: + resolution: {integrity: sha512-t76ZucAnvGC60ea/HjVsB0TSoB0cw9yjnfurUgtInXQWUI/VcrlZGpO23KN3iSe8yOGUgb1zr7W7uEzJ3hSljA==} + typescript@5.4.5: resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==} engines: {node: '>=14.17'} @@ -3636,6 +4541,10 @@ packages: resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} engines: {node: '>= 0.4.0'} + uuid@10.0.0: + resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} + hasBin: true + vary@1.1.2: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} @@ -3715,6 +4624,10 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + ws@8.19.0: resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} engines: {node: '>=10.0.0'} @@ -3738,14 +4651,30 @@ packages: resolution: {integrity: sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==} engines: {node: '>=4.0'} + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + yallist@3.1.1: resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + yaml@2.8.2: resolution: {integrity: sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==} engines: {node: '>= 14.6'} hasBin: true + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + yoga-layout@3.2.1: resolution: {integrity: sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==} @@ -3764,6 +4693,461 @@ snapshots: '@jridgewell/gen-mapping': 0.3.13 '@jridgewell/trace-mapping': 0.3.31 + '@aws-crypto/crc32@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.6 + tslib: 2.8.1 + + '@aws-crypto/crc32c@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.6 + tslib: 2.8.1 + + '@aws-crypto/sha1-browser@5.2.0': + dependencies: + '@aws-crypto/supports-web-crypto': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-locate-window': 3.965.5 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-browser@5.2.0': + dependencies: + '@aws-crypto/sha256-js': 5.2.0 + '@aws-crypto/supports-web-crypto': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-locate-window': 3.965.5 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-js@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.6 + tslib: 2.8.1 + + '@aws-crypto/supports-web-crypto@5.2.0': + dependencies: + tslib: 2.8.1 + + '@aws-crypto/util@5.2.0': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-sdk/client-s3@3.1009.0': + dependencies: + '@aws-crypto/sha1-browser': 5.2.0 + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-node': 3.972.21 + '@aws-sdk/middleware-bucket-endpoint': 3.972.8 + '@aws-sdk/middleware-expect-continue': 3.972.8 + '@aws-sdk/middleware-flexible-checksums': 3.973.6 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-location-constraint': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-sdk-s3': 3.972.20 + '@aws-sdk/middleware-ssec': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/signature-v4-multi-region': 3.996.8 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 + '@smithy/eventstream-serde-browser': 4.2.12 + '@smithy/eventstream-serde-config-resolver': 4.3.12 + '@smithy/eventstream-serde-node': 4.2.12 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-blob-browser': 4.2.13 + '@smithy/hash-node': 4.2.12 + '@smithy/hash-stream-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/md5-js': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-stream': 4.5.19 + '@smithy/util-utf8': 4.2.2 + '@smithy/util-waiter': 4.2.13 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/core@3.973.20': + dependencies: + '@aws-sdk/types': 3.973.6 + '@aws-sdk/xml-builder': 3.972.11 + '@smithy/core': 3.23.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/crc64-nvme@3.972.5': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-env@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-http@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/node-http-handler': 4.4.16 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-stream': 4.5.19 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-ini@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-login': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-login@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-node@3.972.21': + dependencies: + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-ini': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/types': 3.973.6 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-process@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-sso@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/token-providers': 3.1009.0 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-web-identity@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/lib-storage@3.1009.0(@aws-sdk/client-s3@3.1009.0)': + dependencies: + '@aws-sdk/client-s3': 3.1009.0 + '@smithy/abort-controller': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/smithy-client': 4.12.5 + buffer: 5.6.0 + events: 3.3.0 + stream-browserify: 3.0.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-bucket-endpoint@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/node-config-provider': 4.3.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-config-provider': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-expect-continue@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-flexible-checksums@3.973.6': + dependencies: + '@aws-crypto/crc32': 5.2.0 + '@aws-crypto/crc32c': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/crc64-nvme': 3.972.5 + '@aws-sdk/types': 3.973.6 + '@smithy/is-array-buffer': 4.2.2 + '@smithy/node-config-provider': 4.3.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-stream': 4.5.19 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-host-header@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-location-constraint@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-logger@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-recursion-detection@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@aws/lambda-invoke-store': 0.2.4 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-sdk-s3@3.972.20': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/core': 3.23.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-stream': 4.5.19 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-ssec@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/middleware-user-agent@3.972.21': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@smithy/core': 3.23.11 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-retry': 4.2.12 + tslib: 2.8.1 + + '@aws-sdk/nested-clients@3.996.10': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/region-config-resolver@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/config-resolver': 4.4.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/signature-v4-multi-region@3.996.8': + dependencies: + '@aws-sdk/middleware-sdk-s3': 3.972.20 + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/token-providers@3.1009.0': + dependencies: + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/types@3.973.6': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@aws-sdk/util-arn-parser@3.972.3': + dependencies: + tslib: 2.8.1 + + '@aws-sdk/util-endpoints@3.996.5': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-endpoints': 3.3.3 + tslib: 2.8.1 + + '@aws-sdk/util-locate-window@3.965.5': + dependencies: + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-browser@3.972.8': + dependencies: + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + bowser: 2.14.1 + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-node@3.973.7': + dependencies: + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/types': 3.973.6 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-config-provider': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/xml-builder@3.972.11': + dependencies: + '@smithy/types': 4.13.1 + fast-xml-parser: 5.4.1 + tslib: 2.8.1 + + '@aws/lambda-invoke-store@0.2.4': {} + '@babel/code-frame@7.28.6': dependencies: '@babel/helper-validator-identifier': 7.28.5 @@ -4089,6 +5473,49 @@ snapshots: style-mod: 4.1.3 w3c-keyname: 2.2.8 + '@daytonaio/api-client@0.150.0': + dependencies: + axios: 1.13.6 + transitivePeerDependencies: + - debug + + '@daytonaio/sdk@0.150.0(ws@8.19.0)': + dependencies: + '@aws-sdk/client-s3': 3.1009.0 + '@aws-sdk/lib-storage': 3.1009.0(@aws-sdk/client-s3@3.1009.0) + '@daytonaio/api-client': 0.150.0 + '@daytonaio/toolbox-api-client': 0.150.0 + '@iarna/toml': 2.2.5 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/exporter-trace-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-node': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + axios: 1.13.6 + busboy: 1.6.0 + dotenv: 17.3.1 + expand-tilde: 2.0.2 + fast-glob: 3.3.3 + form-data: 4.0.5 + isomorphic-ws: 5.0.0(ws@8.19.0) + pathe: 2.0.3 + shell-quote: 1.8.3 + tar: 7.5.11 + transitivePeerDependencies: + - aws-crt + - debug + - supports-color + - ws + + '@daytonaio/toolbox-api-client@0.150.0': + dependencies: + axios: 1.13.6 + transitivePeerDependencies: + - debug + '@dimforge/rapier2d-simd-compat@0.17.3': optional: true @@ -4335,6 +5762,24 @@ snapshots: '@grammyjs/types@3.23.0': {} + '@grpc/grpc-js@1.14.3': + dependencies: + '@grpc/proto-loader': 0.8.0 + '@js-sdsl/ordered-map': 4.4.2 + + '@grpc/proto-loader@0.8.0': + dependencies: + lodash.camelcase: 4.3.0 + long: 5.3.2 + protobufjs: 7.5.4 + yargs: 17.7.2 + + '@hono/node-server@1.19.11(hono@4.12.8)': + dependencies: + hono: 4.12.8 + + '@iarna/toml@2.2.5': {} + '@img/colour@1.1.0': {} '@img/sharp-darwin-arm64@0.34.5': @@ -4437,6 +5882,10 @@ snapshots: dependencies: '@isaacs/balanced-match': 4.0.1 + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + '@jimp/core@1.6.0': dependencies: '@jimp/file-ops': 1.6.0 @@ -4645,6 +6094,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 + '@js-sdsl/ordered-map@4.4.2': {} + '@lezer/common@1.5.1': {} '@lezer/css@1.3.0': @@ -4753,6 +6204,261 @@ snapshots: '@opencode-ai/sdk@1.1.39': {} + '@opentelemetry/api-logs@0.207.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/exporter-logs-otlp-grpc@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.207.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-http@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.207.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-proto@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-grpc@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-http@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-proto@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-prometheus@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-grpc@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-http@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-proto@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-zipkin@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/instrumentation-http@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + forwarded-parse: 2.1.2 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + import-in-the-middle: 2.0.6 + require-in-the-middle: 8.0.1 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/otlp-exporter-base@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-grpc-exporter-base@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.207.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + + '@opentelemetry/propagator-b3@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/propagator-jaeger@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-logs@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-node@0.207.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.207.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-grpc': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-proto': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-grpc': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-proto': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-prometheus': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-grpc': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-http': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-proto': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-zipkin': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-b3': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-jaeger': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.207.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-trace-node@2.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/semantic-conventions@1.40.0': {} + '@opentui/core-darwin-arm64@0.1.77': {} '@opentui/core-darwin-x64@0.1.77': {} @@ -4822,10 +6528,35 @@ snapshots: '@pinojs/redact@0.4.0': {} + '@planetscale/database@1.19.0': {} + '@playwright/test@1.58.2': dependencies: playwright: 1.58.2 + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + '@radix-ui/colors@3.0.0': {} '@rollup/rollup-android-arm-eabi@4.55.1': @@ -4939,6 +6670,345 @@ snapshots: transitivePeerDependencies: - debug + '@smithy/abort-controller@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/chunked-blob-reader-native@4.2.3': + dependencies: + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + + '@smithy/chunked-blob-reader@5.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/config-resolver@4.4.11': + dependencies: + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + tslib: 2.8.1 + + '@smithy/core@3.23.11': + dependencies: + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-stream': 4.5.19 + '@smithy/util-utf8': 4.2.2 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + + '@smithy/credential-provider-imds@4.2.12': + dependencies: + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + tslib: 2.8.1 + + '@smithy/eventstream-codec@4.2.12': + dependencies: + '@aws-crypto/crc32': 5.2.0 + '@smithy/types': 4.13.1 + '@smithy/util-hex-encoding': 4.2.2 + tslib: 2.8.1 + + '@smithy/eventstream-serde-browser@4.2.12': + dependencies: + '@smithy/eventstream-serde-universal': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/eventstream-serde-config-resolver@4.3.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/eventstream-serde-node@4.2.12': + dependencies: + '@smithy/eventstream-serde-universal': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/eventstream-serde-universal@4.2.12': + dependencies: + '@smithy/eventstream-codec': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/fetch-http-handler@5.3.15': + dependencies: + '@smithy/protocol-http': 5.3.12 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + + '@smithy/hash-blob-browser@4.2.13': + dependencies: + '@smithy/chunked-blob-reader': 5.2.2 + '@smithy/chunked-blob-reader-native': 4.2.3 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/hash-node@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/hash-stream-node@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/invalid-dependency@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/is-array-buffer@2.2.0': + dependencies: + tslib: 2.8.1 + + '@smithy/is-array-buffer@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/md5-js@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/middleware-content-length@4.2.12': + dependencies: + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/middleware-endpoint@4.4.25': + dependencies: + '@smithy/core': 3.23.11 + '@smithy/middleware-serde': 4.2.14 + '@smithy/node-config-provider': 4.3.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-middleware': 4.2.12 + tslib: 2.8.1 + + '@smithy/middleware-retry@4.4.42': + dependencies: + '@smithy/node-config-provider': 4.3.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/service-error-classification': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + + '@smithy/middleware-serde@4.2.14': + dependencies: + '@smithy/core': 3.23.11 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/middleware-stack@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/node-config-provider@4.3.12': + dependencies: + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/node-http-handler@4.4.16': + dependencies: + '@smithy/abort-controller': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/property-provider@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/protocol-http@5.3.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/querystring-builder@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + '@smithy/util-uri-escape': 4.2.2 + tslib: 2.8.1 + + '@smithy/querystring-parser@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/service-error-classification@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + + '@smithy/shared-ini-file-loader@4.4.7': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/signature-v4@5.3.12': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-uri-escape': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/smithy-client@4.12.5': + dependencies: + '@smithy/core': 3.23.11 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-stack': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-stream': 4.5.19 + tslib: 2.8.1 + + '@smithy/types@4.13.1': + dependencies: + tslib: 2.8.1 + + '@smithy/url-parser@4.2.12': + dependencies: + '@smithy/querystring-parser': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-base64@4.3.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-body-length-browser@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-body-length-node@4.2.3': + dependencies: + tslib: 2.8.1 + + '@smithy/util-buffer-from@2.2.0': + dependencies: + '@smithy/is-array-buffer': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-buffer-from@4.2.2': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-config-provider@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-defaults-mode-browser@4.3.41': + dependencies: + '@smithy/property-provider': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-defaults-mode-node@4.2.44': + dependencies: + '@smithy/config-resolver': 4.4.11 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-endpoints@3.3.3': + dependencies: + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-hex-encoding@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-middleware@4.2.12': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-retry@4.2.12': + dependencies: + '@smithy/service-error-classification': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/util-stream@4.5.19': + dependencies: + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/node-http-handler': 4.4.16 + '@smithy/types': 4.13.1 + '@smithy/util-base64': 4.3.2 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-uri-escape@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-utf8@2.3.0': + dependencies: + '@smithy/util-buffer-from': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-utf8@4.2.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-waiter@4.2.13': + dependencies: + '@smithy/abort-controller': 4.2.12 + '@smithy/types': 4.13.1 + tslib: 2.8.1 + + '@smithy/uuid@1.1.2': + dependencies: + tslib: 2.8.1 + '@solid-primitives/event-bus@1.1.2(solid-js@1.9.10)': dependencies: '@solid-primitives/utils': 6.3.2(solid-js@1.9.10) @@ -5248,6 +7318,18 @@ snapshots: mime-types: 2.1.35 negotiator: 0.6.3 + acorn-import-attributes@1.9.5(acorn@8.16.0): + dependencies: + acorn: 8.16.0 + + acorn@8.16.0: {} + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + any-base@1.1.0: {} any-promise@1.3.0: {} @@ -5291,6 +7373,14 @@ snapshots: transitivePeerDependencies: - debug + axios@1.13.6: + dependencies: + follow-redirects: 1.15.11 + form-data: 4.0.5 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + babel-plugin-jsx-dom-expressions@0.40.3(@babel/core@7.28.0): dependencies: '@babel/core': 7.28.0 @@ -5337,7 +7427,7 @@ snapshots: baseline-browser-mapping@2.9.14: {} - better-auth@1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4))(mysql2@3.17.4)(next@16.1.6(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(solid-js@1.9.10): + better-auth@1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4))(mysql2@3.17.4)(next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(solid-js@1.9.10): dependencies: '@better-auth/core': 1.4.18(@better-auth/utils@0.3.0)(@better-fetch/fetch@1.1.21)(better-call@1.1.8(zod@4.3.6))(jose@6.1.3)(kysely@0.28.11)(nanostores@1.1.0) '@better-auth/telemetry': 1.4.18(@better-auth/core@1.4.18(@better-auth/utils@0.3.0)(@better-fetch/fetch@1.1.21)(better-call@1.1.8(zod@4.3.6))(jose@6.1.3)(kysely@0.28.11)(nanostores@1.1.0)) @@ -5353,9 +7443,9 @@ snapshots: zod: 4.3.6 optionalDependencies: drizzle-kit: 0.31.9 - drizzle-orm: 0.45.1(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) + drizzle-orm: 0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4) mysql2: 3.17.4 - next: 16.1.6(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + next: 16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react: 19.2.4 react-dom: 19.2.4(react@19.2.4) solid-js: 1.9.10 @@ -5390,6 +7480,8 @@ snapshots: transitivePeerDependencies: - supports-color + bowser@2.14.1: {} + brace-expansion@2.0.2: dependencies: balanced-match: 1.0.2 @@ -5408,6 +7500,11 @@ snapshots: buffer-from@1.1.2: {} + buffer@5.6.0: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + buffer@6.0.3: dependencies: base64-js: 1.5.1 @@ -5475,8 +7572,24 @@ snapshots: optionalDependencies: fsevents: 2.3.3 + chownr@3.0.0: {} + + cjs-module-lexer@2.2.0: {} + client-only@0.0.1: {} + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 @@ -5536,6 +7649,8 @@ snapshots: dotenv@16.6.1: {} + dotenv@17.3.1: {} + drizzle-kit@0.31.9: dependencies: '@drizzle-team/brocli': 0.10.2 @@ -5545,8 +7660,10 @@ snapshots: transitivePeerDependencies: - supports-color - drizzle-orm@0.45.1(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4): + drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@planetscale/database@1.19.0)(bun-types@1.3.6)(kysely@0.28.11)(mysql2@3.17.4): optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@planetscale/database': 1.19.0 bun-types: 1.3.6 kysely: 0.28.11 mysql2: 3.17.4 @@ -5561,6 +7678,8 @@ snapshots: electron-to-chromium@1.5.267: {} + emoji-regex@8.0.0: {} + encodeurl@2.0.0: {} enhanced-resolve@5.18.4: @@ -5691,6 +7810,10 @@ snapshots: exif-parser@0.1.12: {} + expand-tilde@2.0.2: + dependencies: + homedir-polyfill: 1.0.3 + express@4.22.1: dependencies: accepts: 1.3.8 @@ -5735,6 +7858,15 @@ snapshots: merge2: 1.4.1 micromatch: 4.0.8 + fast-xml-builder@1.1.3: + dependencies: + path-expression-matcher: 1.1.3 + + fast-xml-parser@5.4.1: + dependencies: + fast-xml-builder: 1.1.3 + strnum: 2.2.0 + fastq@1.20.1: dependencies: reusify: 1.1.0 @@ -5783,6 +7915,8 @@ snapshots: hasown: 2.0.2 mime-types: 2.1.35 + forwarded-parse@2.1.2: {} + forwarded@0.2.0: {} fraction.js@4.3.7: {} @@ -5816,6 +7950,8 @@ snapshots: gensync@1.0.0-beta.2: {} + get-caller-file@2.0.5: {} + get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -5882,6 +8018,12 @@ snapshots: dependencies: function-bind: 1.1.2 + homedir-polyfill@1.0.3: + dependencies: + parse-passwd: 1.0.0 + + hono@4.12.8: {} + html-entities@2.3.3: {} http-errors@2.0.1: @@ -5906,6 +8048,13 @@ snapshots: dependencies: '@types/node': 16.9.1 + import-in-the-middle@2.0.6: + dependencies: + acorn: 8.16.0 + acorn-import-attributes: 1.9.5(acorn@8.16.0) + cjs-module-lexer: 2.2.0 + module-details-from-path: 1.0.4 + inherits@2.0.4: {} ipaddr.js@1.9.1: {} @@ -5924,6 +8073,8 @@ snapshots: is-extglob@2.1.1: {} + is-fullwidth-code-point@3.0.0: {} + is-glob@4.0.3: dependencies: is-extglob: 2.1.1 @@ -5938,6 +8089,10 @@ snapshots: is-what@4.1.16: {} + isomorphic-ws@5.0.0(ws@8.19.0): + dependencies: + ws: 8.19.0 + jimp@1.6.0: dependencies: '@jimp/core': 1.6.0 @@ -6046,6 +8201,8 @@ snapshots: p-locate: 3.0.0 path-exists: 3.0.0 + lodash.camelcase@4.3.0: {} + long@5.3.2: {} loose-envify@1.4.0: @@ -6115,6 +8272,12 @@ snapshots: minipass@7.1.2: {} + minizlib@3.1.0: + dependencies: + minipass: 7.1.2 + + module-details-from-path@1.0.4: {} + motion-dom@12.35.1: dependencies: motion-utils: 12.29.2 @@ -6152,7 +8315,7 @@ snapshots: negotiator@0.6.3: {} - next@14.2.5(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0): + next@14.2.5(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@18.2.0(react@18.2.0))(react@18.2.0): dependencies: '@next/env': 14.2.5 '@swc/helpers': 0.5.5 @@ -6173,12 +8336,13 @@ snapshots: '@next/swc-win32-arm64-msvc': 14.2.5 '@next/swc-win32-ia32-msvc': 14.2.5 '@next/swc-win32-x64-msvc': 14.2.5 + '@opentelemetry/api': 1.9.0 '@playwright/test': 1.58.2 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros - next@16.1.6(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: '@next/env': 16.1.6 '@swc/helpers': 0.5.15 @@ -6197,6 +8361,7 @@ snapshots: '@next/swc-linux-x64-musl': 16.1.6 '@next/swc-win32-arm64-msvc': 16.1.6 '@next/swc-win32-x64-msvc': 16.1.6 + '@opentelemetry/api': 1.9.0 '@playwright/test': 1.58.2 sharp: 0.34.5 transitivePeerDependencies: @@ -6264,6 +8429,8 @@ snapshots: xml-parse-from-string: 1.0.1 xml2js: 0.5.0 + parse-passwd@1.0.0: {} + parse5@7.3.0: dependencies: entities: 6.0.1 @@ -6272,6 +8439,8 @@ snapshots: path-exists@3.0.0: {} + path-expression-matcher@1.1.3: {} + path-parse@1.0.7: {} path-scurry@1.11.1: @@ -6281,6 +8450,8 @@ snapshots: path-to-regexp@0.1.12: {} + pathe@2.0.3: {} + peek-readable@4.1.0: {} picocolors@1.1.1: {} @@ -6391,6 +8562,21 @@ snapshots: process@0.11.10: {} + protobufjs@7.5.4: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 20.12.12 + long: 5.3.2 + proxy-addr@2.0.7: dependencies: forwarded: 0.2.0 @@ -6436,6 +8622,12 @@ snapshots: dependencies: pify: 2.3.0 + readable-stream@3.6.2: + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + readable-stream@4.7.0: dependencies: abort-controller: 3.0.0 @@ -6454,6 +8646,15 @@ snapshots: real-require@0.2.0: {} + require-directory@2.1.1: {} + + require-in-the-middle@8.0.1: + dependencies: + debug: 4.4.3 + module-details-from-path: 1.0.4 + transitivePeerDependencies: + - supports-color + reselect@4.1.8: {} resolve-pkg-maps@1.0.0: {} @@ -6593,6 +8794,8 @@ snapshots: '@img/sharp-win32-ia32': 0.34.5 '@img/sharp-win32-x64': 0.34.5 + shell-quote@1.8.3: {} + side-channel-list@1.0.0: dependencies: es-errors: 1.3.0 @@ -6666,12 +8869,29 @@ snapshots: statuses@2.0.2: {} + stream-browserify@3.0.0: + dependencies: + inherits: 2.0.4 + readable-stream: 3.6.2 + streamsearch@1.1.0: {} + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + string_decoder@1.3.0: dependencies: safe-buffer: 5.2.1 + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strnum@2.2.0: {} + strtok3@6.3.0: dependencies: '@tokenizer/token': 0.3.0 @@ -6732,6 +8952,14 @@ snapshots: tapable@2.3.0: {} + tar@7.5.11: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.1.0 + yallist: 5.0.0 + thenify-all@1.6.0: dependencies: thenify: 3.3.1 @@ -6785,6 +9013,10 @@ snapshots: media-typer: 0.3.0 mime-types: 2.1.35 + typeid-js@1.2.0: + dependencies: + uuid: 10.0.0 + typescript@5.4.5: {} typescript@5.9.3: {} @@ -6817,6 +9049,8 @@ snapshots: utils-merge@1.0.1: {} + uuid@10.0.0: {} + vary@1.1.2: {} vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@6.4.1(@types/node@25.4.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)): @@ -6863,6 +9097,12 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + ws@8.19.0: {} xml-parse-from-string@1.0.1: {} @@ -6874,10 +9114,26 @@ snapshots: xmlbuilder@11.0.1: {} + y18n@5.0.8: {} + yallist@3.1.1: {} + yallist@5.0.0: {} + yaml@2.8.2: {} + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + yoga-layout@3.2.1: {} zod@3.25.76: {} diff --git a/scripts/create-daytona-openwork-snapshot.sh b/scripts/create-daytona-openwork-snapshot.sh new file mode 100755 index 00000000..c56c1104 --- /dev/null +++ b/scripts/create-daytona-openwork-snapshot.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +DOCKERFILE="$ROOT_DIR/services/den-worker-runtime/Dockerfile.daytona-snapshot" +DAYTONA_ENV_FILE="${DAYTONA_ENV_FILE:-$ROOT_DIR/.env.daytona}" + +if ! command -v docker >/dev/null 2>&1; then + echo "docker is required" >&2 + exit 1 +fi + +if ! command -v daytona >/dev/null 2>&1; then + echo "daytona CLI is required" >&2 + exit 1 +fi + +if [ -f "$DAYTONA_ENV_FILE" ]; then + set -a + # shellcheck disable=SC1090 + source "$DAYTONA_ENV_FILE" + set +a +fi + +SNAPSHOT_NAME="${1:-${DAYTONA_SNAPSHOT_NAME:-openwork-runtime}}" +SNAPSHOT_REGION="${DAYTONA_SNAPSHOT_REGION:-${DAYTONA_TARGET:-}}" +SNAPSHOT_CPU="${DAYTONA_SNAPSHOT_CPU:-1}" +SNAPSHOT_MEMORY="${DAYTONA_SNAPSHOT_MEMORY:-2}" +SNAPSHOT_DISK="${DAYTONA_SNAPSHOT_DISK:-8}" +LOCAL_IMAGE_TAG="${DAYTONA_LOCAL_IMAGE_TAG:-openwork-daytona-snapshot:${SNAPSHOT_NAME//[^a-zA-Z0-9_.-]/-}}" + +OPENWORK_ORCHESTRATOR_VERSION="${OPENWORK_ORCHESTRATOR_VERSION:-$(node -e 'const fs=require("fs"); const pkg=JSON.parse(fs.readFileSync(process.argv[1], "utf8")); process.stdout.write(String(pkg.version));' "$ROOT_DIR/packages/orchestrator/package.json")}" +OPENCODE_VERSION="${OPENCODE_VERSION:-$(node -e 'const fs=require("fs"); const pkg=JSON.parse(fs.readFileSync(process.argv[1], "utf8")); process.stdout.write(String(pkg.opencodeVersion));' "$ROOT_DIR/packages/orchestrator/package.json")}" + +echo "Building local image $LOCAL_IMAGE_TAG" >&2 +echo "- openwork-orchestrator@$OPENWORK_ORCHESTRATOR_VERSION" >&2 +echo "- opencode@$OPENCODE_VERSION" >&2 + +docker buildx build \ + --platform linux/amd64 \ + -t "$LOCAL_IMAGE_TAG" \ + -f "$DOCKERFILE" \ + --build-arg "OPENWORK_ORCHESTRATOR_VERSION=$OPENWORK_ORCHESTRATOR_VERSION" \ + --build-arg "OPENCODE_VERSION=$OPENCODE_VERSION" \ + --load \ + "$ROOT_DIR" + +args=(snapshot push "$LOCAL_IMAGE_TAG" --name "$SNAPSHOT_NAME" --cpu "$SNAPSHOT_CPU" --memory "$SNAPSHOT_MEMORY" --disk "$SNAPSHOT_DISK") +if [ -n "$SNAPSHOT_REGION" ]; then + args+=(--region "$SNAPSHOT_REGION") +fi + +echo "Pushing Daytona snapshot $SNAPSHOT_NAME" >&2 +daytona "${args[@]}" + +echo >&2 +echo "Snapshot ready: $SNAPSHOT_NAME" >&2 +echo "Set DAYTONA_SNAPSHOT=$SNAPSHOT_NAME in .env.daytona before starting Den." >&2 diff --git a/services/den-v2/.env.example b/services/den-v2/.env.example new file mode 100644 index 00000000..6beb8722 --- /dev/null +++ b/services/den-v2/.env.example @@ -0,0 +1,73 @@ +DATABASE_URL= +DATABASE_HOST= +DATABASE_USERNAME= +DATABASE_PASSWORD= +DB_MODE= +BETTER_AUTH_SECRET= +BETTER_AUTH_URL=http://localhost:8788 +DEN_BETTER_AUTH_TRUSTED_ORIGINS=http://localhost:3005,http://localhost:5173 +GITHUB_CLIENT_ID= +GITHUB_CLIENT_SECRET= +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= +PORT=8788 +WORKER_PROXY_PORT=8789 +CORS_ORIGINS=http://localhost:3005,http://localhost:5173 +PROVISIONER_MODE=stub +OPENWORK_DAYTONA_ENV_PATH= +WORKER_URL_TEMPLATE=https://workers.example.com/{workerId} +RENDER_API_BASE=https://api.render.com/v1 +RENDER_API_KEY= +RENDER_OWNER_ID= +RENDER_WORKER_REPO=https://github.com/different-ai/openwork +RENDER_WORKER_BRANCH=dev +RENDER_WORKER_ROOT_DIR=services/den-worker-runtime +RENDER_WORKER_PLAN=standard +RENDER_WORKER_REGION=oregon +RENDER_WORKER_OPENWORK_VERSION=0.11.113 +RENDER_WORKER_NAME_PREFIX=den-worker +RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX=openwork.studio +RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS=240000 +RENDER_PROVISION_TIMEOUT_MS=900000 +RENDER_HEALTHCHECK_TIMEOUT_MS=180000 +RENDER_POLL_INTERVAL_MS=5000 +VERCEL_API_BASE=https://api.vercel.com +VERCEL_TOKEN= +VERCEL_TEAM_ID= +VERCEL_TEAM_SLUG=prologe +VERCEL_DNS_DOMAIN=openwork.studio +POLAR_FEATURE_GATE_ENABLED=false +POLAR_API_BASE=https://api.polar.sh +POLAR_ACCESS_TOKEN= +POLAR_PRODUCT_ID= +POLAR_BENEFIT_ID= +POLAR_SUCCESS_URL=http://localhost:8788 +POLAR_RETURN_URL=http://localhost:8788 +DAYTONA_API_URL=https://app.daytona.io/api +DAYTONA_API_KEY= +DAYTONA_TARGET= +DAYTONA_SNAPSHOT= +DAYTONA_SANDBOX_IMAGE=node:20-bookworm +DAYTONA_SANDBOX_CPU=2 +DAYTONA_SANDBOX_MEMORY=4 +DAYTONA_SANDBOX_DISK=8 +DAYTONA_SANDBOX_PUBLIC=false +DAYTONA_SANDBOX_AUTO_STOP_INTERVAL=0 +DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL=10080 +DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL=-1 +DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400 +DAYTONA_WORKER_PROXY_BASE_URL=https://workers.den.openworklabs +DAYTONA_SANDBOX_NAME_PREFIX=den-daytona-worker +DAYTONA_VOLUME_NAME_PREFIX=den-daytona-worker +DAYTONA_WORKSPACE_MOUNT_PATH=/workspace +DAYTONA_DATA_MOUNT_PATH=/persist/openwork +DAYTONA_RUNTIME_WORKSPACE_PATH=/tmp/openwork-workspace +DAYTONA_RUNTIME_DATA_PATH=/tmp/openwork-data +DAYTONA_SIDECAR_DIR=/tmp/openwork-sidecars +DAYTONA_OPENWORK_PORT=8787 +DAYTONA_OPENCODE_PORT=4096 +DAYTONA_OPENWORK_VERSION= +DAYTONA_CREATE_TIMEOUT_SECONDS=300 +DAYTONA_DELETE_TIMEOUT_SECONDS=120 +DAYTONA_HEALTHCHECK_TIMEOUT_MS=300000 +DAYTONA_POLL_INTERVAL_MS=5000 diff --git a/services/den-v2/README.md b/services/den-v2/README.md new file mode 100644 index 00000000..90e28507 --- /dev/null +++ b/services/den-v2/README.md @@ -0,0 +1,218 @@ +# Den v2 Service + +Control plane for hosted workers. Provides Better Auth, worker CRUD, and provisioning hooks. + +## Quick start + +```bash +pnpm install +cp .env.example .env +pnpm dev +``` + +## Docker dev stack + +For a one-command local stack with MySQL + the Den cloud web app, run this from the repo root: + +```bash +./packaging/docker/den-dev-up.sh +``` + +That brings up: +- local MySQL for Den +- the Den control plane on a randomized host port +- the OpenWork Cloud web app on a randomized host port + +The script prints the exact URLs and `docker compose ... down` command to use for cleanup. + +## Environment + +- `DATABASE_URL` MySQL connection URL +- `BETTER_AUTH_SECRET` 32+ char secret +- `BETTER_AUTH_URL` public base URL Better Auth uses for OAuth redirects and callbacks +- `DEN_BETTER_AUTH_TRUSTED_ORIGINS` optional comma-separated trusted origins for Better Auth origin validation (defaults to `CORS_ORIGINS`) +- `GITHUB_CLIENT_ID` optional OAuth app client ID for GitHub sign-in +- `GITHUB_CLIENT_SECRET` optional OAuth app client secret for GitHub sign-in +- `GOOGLE_CLIENT_ID` optional OAuth app client ID for Google sign-in +- `GOOGLE_CLIENT_SECRET` optional OAuth app client secret for Google sign-in +- `PORT` server port +<<<<<<< HEAD +- `CORS_ORIGINS` comma-separated list of trusted browser origins (used for Better Auth origin validation + Express CORS) +- `PROVISIONER_MODE` `stub`, `render`, or `daytona` +- `OPENWORK_DAYTONA_ENV_PATH` optional path to a shared `.env.daytona` file; when unset, Den searches upwards from the repo for `.env.daytona` +- `WORKER_URL_TEMPLATE` template string with `{workerId}` +- `RENDER_API_BASE` Render API base URL (default `https://api.render.com/v1`) +- `RENDER_API_KEY` Render API key (required for `PROVISIONER_MODE=render`) +- `RENDER_OWNER_ID` Render workspace owner id (required for `PROVISIONER_MODE=render`) +- `RENDER_WORKER_REPO` repository URL used to create worker services +- `RENDER_WORKER_BRANCH` branch used for worker services +- `RENDER_WORKER_ROOT_DIR` render `rootDir` for worker services +- `RENDER_WORKER_PLAN` Render plan for worker services +- `RENDER_WORKER_REGION` Render region for worker services +- `RENDER_WORKER_OPENWORK_VERSION` `openwork-orchestrator` npm version installed in workers; the worker build uses its `opencodeVersion` metadata to bundle a matching `opencode` binary into the Render deploy +- `RENDER_WORKER_NAME_PREFIX` service name prefix +- `RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX` optional domain suffix for worker custom URLs (e.g. `openwork.studio` -> `.openwork.studio`) +- `RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS` max time to wait for vanity URL health before falling back to Render URL +- `RENDER_PROVISION_TIMEOUT_MS` max time to wait for deploy to become live +- `RENDER_HEALTHCHECK_TIMEOUT_MS` max time to wait for worker health checks +- `RENDER_POLL_INTERVAL_MS` polling interval for deploy + health checks +- `VERCEL_API_BASE` Vercel API base URL (default `https://api.vercel.com`) +- `VERCEL_TOKEN` Vercel API token used to upsert worker DNS records +- `VERCEL_TEAM_ID` optional Vercel team id for scoped API calls +- `VERCEL_TEAM_SLUG` optional Vercel team slug for scoped API calls (used when `VERCEL_TEAM_ID` is unset) +- `VERCEL_DNS_DOMAIN` Vercel-managed DNS zone used for worker records (default `openwork.studio`) +- `POLAR_FEATURE_GATE_ENABLED` enable cloud-worker paywall (`true` or `false`) +- `POLAR_API_BASE` Polar API base URL (default `https://api.polar.sh`) +- `POLAR_ACCESS_TOKEN` Polar organization access token (required when paywall enabled) +- `POLAR_PRODUCT_ID` Polar product ID used for checkout sessions (required when paywall enabled) +- `POLAR_BENEFIT_ID` Polar benefit ID required to unlock cloud workers (required when paywall enabled) +- `POLAR_SUCCESS_URL` redirect URL after successful checkout (required when paywall enabled) +- `POLAR_RETURN_URL` return URL shown in checkout (required when paywall enabled) +- Daytona: + - `DAYTONA_API_KEY` API key used to create sandboxes and volumes + - `DAYTONA_API_URL` Daytona API base URL (default `https://app.daytona.io/api`) + - `DAYTONA_TARGET` optional Daytona region/target + - `DAYTONA_SNAPSHOT` optional snapshot name; if omitted Den creates workers from `DAYTONA_SANDBOX_IMAGE` + - `DAYTONA_SANDBOX_IMAGE` sandbox base image when no snapshot is provided (default `node:20-bookworm`) + - `DAYTONA_SANDBOX_CPU`, `DAYTONA_SANDBOX_MEMORY`, `DAYTONA_SANDBOX_DISK` resource sizing when image-backed sandboxes are used + - `DAYTONA_SANDBOX_AUTO_STOP_INTERVAL`, `DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL`, `DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL` lifecycle controls + - `DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS` TTL for the signed OpenWork preview URL returned to Den clients (Daytona currently caps this at 24 hours) + - `DAYTONA_SANDBOX_NAME_PREFIX`, `DAYTONA_VOLUME_NAME_PREFIX` resource naming prefixes + - `DAYTONA_WORKSPACE_MOUNT_PATH`, `DAYTONA_DATA_MOUNT_PATH` volume mount paths inside the sandbox + - `DAYTONA_RUNTIME_WORKSPACE_PATH`, `DAYTONA_RUNTIME_DATA_PATH`, `DAYTONA_SIDECAR_DIR` local sandbox paths used for the live OpenWork runtime; the mounted Daytona volumes are linked into the runtime workspace under `volumes/` + - `DAYTONA_OPENWORK_PORT`, `DAYTONA_OPENCODE_PORT` ports used when launching `openwork serve` + - `DAYTONA_OPENWORK_VERSION` optional npm version to install instead of latest `openwork-orchestrator` + - `DAYTONA_CREATE_TIMEOUT_SECONDS`, `DAYTONA_DELETE_TIMEOUT_SECONDS`, `DAYTONA_HEALTHCHECK_TIMEOUT_MS`, `DAYTONA_POLL_INTERVAL_MS` provisioning timeouts + +For local Daytona development, place your Daytona API credentials in `/_repos/openwork/.env.daytona` and Den will pick them up automatically, including from task worktrees. + +## Building a Daytona snapshot + +If you want Daytona workers to start from a prebuilt runtime instead of a generic base image, create a snapshot and point Den at it. + +The snapshot builder for this repo lives at: + +- `scripts/create-daytona-openwork-snapshot.sh` +- `services/den-worker-runtime/Dockerfile.daytona-snapshot` + +It builds a Linux image with: + +- `openwork-orchestrator` +- `opencode` + +Prerequisites: + +- Docker running locally +- Daytona CLI installed and logged in +- a valid `.env.daytona` with at least `DAYTONA_API_KEY` + +From the OpenWork repo root: + +```bash +./scripts/create-daytona-openwork-snapshot.sh +``` + +To publish a custom-named snapshot: + +```bash +./scripts/create-daytona-openwork-snapshot.sh openwork-runtime +``` + +Useful optional overrides: + +- `DAYTONA_SNAPSHOT_NAME` +- `DAYTONA_SNAPSHOT_REGION` +- `DAYTONA_SNAPSHOT_CPU` +- `DAYTONA_SNAPSHOT_MEMORY` +- `DAYTONA_SNAPSHOT_DISK` +- `OPENWORK_ORCHESTRATOR_VERSION` +- `OPENCODE_VERSION` + +After the snapshot is pushed, set it in `.env.daytona`: + +```env +DAYTONA_SNAPSHOT=openwork-runtime +``` + +Then start Den in Daytona mode: + +```bash +DEN_PROVISIONER_MODE=daytona packaging/docker/den-dev-up.sh +``` + +If you do not set `DAYTONA_SNAPSHOT`, Den falls back to `DAYTONA_SANDBOX_IMAGE` and installs runtime dependencies at sandbox startup. + +## Auth setup (Better Auth) + +Generate Better Auth schema (Drizzle): + +```bash +npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes +``` + +Apply migrations: + +```bash +pnpm db:generate +pnpm db:migrate + +# or use the SQL migration runner used by Docker +pnpm db:migrate:sql +``` + +## API + +- `GET /health` +- `GET /` demo web app (sign-up + auth + worker launch) +- `GET /v1/me` +- `GET /v1/workers` (list recent workers for signed-in user/org) +- `POST /v1/workers` + - Cloud launches return `202` quickly with worker `status=provisioning` and continue provisioning asynchronously. + - Returns `402 payment_required` with Polar checkout URL when paywall is enabled and entitlement is missing. + - Existing Polar customers are matched by `external_customer_id` first, then by email to preserve access for pre-existing paid users. +- `GET /v1/workers/:id` + - Includes latest instance metadata when available. +- `POST /v1/workers/:id/tokens` +- `DELETE /v1/workers/:id` + - Deletes worker records and attempts to tear down the backing cloud runtime when destination is `cloud`. + +## CI deployment (dev == prod) + +The workflow `.github/workflows/deploy-den.yml` updates Render env vars and deploys the service on every push to `dev` when this service changes. + +Required GitHub Actions secrets: + +- `RENDER_API_KEY` +- `RENDER_DEN_CONTROL_PLANE_SERVICE_ID` +- `RENDER_OWNER_ID` +- `DEN_DATABASE_URL` +- `DEN_BETTER_AUTH_SECRET` + +Optional GitHub Actions secrets (enable GitHub social sign-in): + +- `DEN_GITHUB_CLIENT_ID` +- `DEN_GITHUB_CLIENT_SECRET` +- `DEN_GOOGLE_CLIENT_ID` +- `DEN_GOOGLE_CLIENT_SECRET` + +Optional GitHub Actions variable: + +- `DEN_RENDER_WORKER_PLAN` (defaults to `standard`) +- `DEN_RENDER_WORKER_OPENWORK_VERSION` pins the `openwork-orchestrator` npm version installed in workers; the worker build bundles the matching `opencode` release asset into the Render image +- `DEN_CORS_ORIGINS` (defaults to `https://app.openwork.software,https://api.openwork.software,`) +- `DEN_BETTER_AUTH_TRUSTED_ORIGINS` (defaults to `DEN_CORS_ORIGINS`) +- `DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX` (defaults to `openwork.studio`) +- `DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS` (defaults to `240000`) +- `DEN_BETTER_AUTH_URL` (defaults to `https://app.openwork.software`) +- `DEN_VERCEL_API_BASE` (defaults to `https://api.vercel.com`) +- `DEN_VERCEL_TEAM_ID` (optional) +- `DEN_VERCEL_TEAM_SLUG` (optional, defaults to `prologe`) +- `DEN_VERCEL_DNS_DOMAIN` (defaults to `openwork.studio`) +- `DEN_POLAR_FEATURE_GATE_ENABLED` (`true`/`false`, defaults to `false`) +- `DEN_POLAR_API_BASE` (defaults to `https://api.polar.sh`) +- `DEN_POLAR_SUCCESS_URL` (defaults to `https://app.openwork.software`) +- `DEN_POLAR_RETURN_URL` (defaults to `DEN_POLAR_SUCCESS_URL`) + +Required additional secret when using vanity worker domains: + +- `VERCEL_TOKEN` diff --git a/services/den-v2/package.json b/services/den-v2/package.json new file mode 100644 index 00000000..833d7aa2 --- /dev/null +++ b/services/den-v2/package.json @@ -0,0 +1,36 @@ +{ + "name": "@openwork/den-v2", + "private": true, + "type": "module", + "scripts": { + "dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/index.ts", + "build": "npm run build:den-db && tsc -p tsconfig.json", + "build:den-db": "npm --prefix ../../packages/den-db run build", + "start": "node dist/index.js", + "db:migrate:sql": "node scripts/run-sql-migrations.mjs", + "test:smoke:daytona": "pnpm build && node scripts/daytona-provisioner-smoke.mjs", + "test:e2e:daytona": "node scripts/e2e-daytona-worker.mjs", + "test:e2e:worker-limit": "node scripts/e2e-worker-limit.mjs", + "db:generate": "drizzle-kit generate", + "db:migrate": "drizzle-kit migrate", + "auth:generate": "npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes" + }, + "dependencies": { + "@daytonaio/sdk": "^0.150.0", + "better-auth": "^1.4.18", + "cors": "^2.8.5", + "dotenv": "^16.4.5", + "drizzle-orm": "^0.45.1", + "express": "^4.19.2", + "mysql2": "^3.11.3", + "zod": "^4.3.6" + }, + "devDependencies": { + "@types/cors": "^2.8.17", + "@types/express": "^4.17.21", + "@types/node": "^20.11.30", + "drizzle-kit": "^0.31.9", + "tsx": "^4.15.7", + "typescript": "^5.5.4" + } +} diff --git a/services/den-v2/public/index.html b/services/den-v2/public/index.html new file mode 100644 index 00000000..f5cd08dc --- /dev/null +++ b/services/den-v2/public/index.html @@ -0,0 +1,248 @@ + + + + + + Den Control Plane + + + +

Den Control Plane Demo

+

Sign up, verify auth, and launch a cloud worker end-to-end.

+ +
+
+

1) Sign up

+ + + + +
+ +
+

2) Verify auth/session

+ + +

Bearer token comes from sign-up/sign-in response.

+
+ +
+

3) Launch worker

+ + + + +
+
+ +
+

Output

+
ready
+
+ + + + diff --git a/services/den-v2/scripts/daytona-provisioner-smoke.mjs b/services/den-v2/scripts/daytona-provisioner-smoke.mjs new file mode 100644 index 00000000..cefdc139 --- /dev/null +++ b/services/den-v2/scripts/daytona-provisioner-smoke.mjs @@ -0,0 +1,129 @@ +import { randomUUID } from "node:crypto" +import { existsSync } from "node:fs" +import { dirname, join, resolve } from "node:path" +import { fileURLToPath } from "node:url" +import { setTimeout as delay } from "node:timers/promises" +import dotenv from "dotenv" +import { Daytona } from "@daytonaio/sdk" + +const __dirname = dirname(fileURLToPath(import.meta.url)) +const serviceDir = resolve(__dirname, "..") +const repoRoot = resolve(serviceDir, "..", "..") + +function findUpwards(startDir, fileName, maxDepth = 8) { + let current = startDir + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + const parent = dirname(current) + if (parent === current) { + break + } + current = parent + } + return null +} + +const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona") +if (daytonaEnvPath) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +process.env.DATABASE_URL ||= "mysql://unused" +process.env.BETTER_AUTH_SECRET ||= "openwork-daytona-local-secret-000000000" +process.env.BETTER_AUTH_URL ||= "http://127.0.0.1" +process.env.CORS_ORIGINS ||= "http://127.0.0.1" +process.env.PROVISIONER_MODE ||= "daytona" + +function log(message, detail) { + if (detail === undefined) { + console.log(message) + return + } + console.log(message, detail) +} + +function fail(message, detail) { + if (detail !== undefined) { + console.error(message, detail) + } else { + console.error(message) + } + process.exit(1) +} + +async function waitForCleanup(daytona, workerId, attempts = 24) { + for (let index = 0; index < attempts; index += 1) { + const sandboxes = await daytona.list( + { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + }, + 1, + 20, + ) + if (sandboxes.items.length === 0) { + return + } + await delay(5000) + } + throw new Error(`cleanup_timeout:${workerId}`) +} + +async function main() { + if (!process.env.DAYTONA_API_KEY) { + fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the smoke test.") + } + + const { provisionWorker, deprovisionWorker } = await import("../dist/workers/provisioner.js") + + const workerId = randomUUID() + const clientToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "") + const hostToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "") + + const instance = await provisionWorker({ + workerId, + name: "daytona-smoke", + hostToken, + clientToken, + }) + + log("Provisioned Daytona worker", instance) + + const workspacesResponse = await fetch(`${instance.url.replace(/\/$/, "")}/workspaces`, { + headers: { + Accept: "application/json", + Authorization: `Bearer ${clientToken}`, + }, + }) + + const workspacesPayload = await workspacesResponse.text() + if (!workspacesResponse.ok) { + fail("Worker /workspaces check failed", { + status: workspacesResponse.status, + body: workspacesPayload, + }) + } + + log("Worker /workspaces responded", workspacesPayload) + + await deprovisionWorker({ + workerId, + instanceUrl: instance.url, + }) + + const daytona = new Daytona({ + apiKey: process.env.DAYTONA_API_KEY, + apiUrl: process.env.DAYTONA_API_URL, + ...(process.env.DAYTONA_TARGET ? { target: process.env.DAYTONA_TARGET } : {}), + }) + + await waitForCleanup(daytona, workerId) + log("Daytona worker cleanup completed", workerId) +} + +main().catch((error) => { + fail(error instanceof Error ? error.message : String(error)) +}) diff --git a/services/den-v2/scripts/e2e-daytona-worker.mjs b/services/den-v2/scripts/e2e-daytona-worker.mjs new file mode 100644 index 00000000..59a60d0d --- /dev/null +++ b/services/den-v2/scripts/e2e-daytona-worker.mjs @@ -0,0 +1,489 @@ +import { randomUUID } from "node:crypto" +import { once } from "node:events" +import { existsSync } from "node:fs" +import net from "node:net" +import { dirname, join, resolve } from "node:path" +import { fileURLToPath } from "node:url" +import { setTimeout as delay } from "node:timers/promises" +import { spawn } from "node:child_process" +import dotenv from "dotenv" +import mysql from "mysql2/promise" +import { Daytona } from "@daytonaio/sdk" + +const __dirname = dirname(fileURLToPath(import.meta.url)) +const serviceDir = resolve(__dirname, "..") +const repoRoot = resolve(serviceDir, "..", "..") + +function log(message) { + process.stdout.write(`${message}\n`) +} + +function fail(message, detail) { + if (detail !== undefined) { + console.error(message, detail) + } else { + console.error(message) + } + process.exit(1) +} + +function findUpwards(startDir, fileName, maxDepth = 8) { + let current = startDir + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + const parent = dirname(current) + if (parent === current) { + break + } + current = parent + } + return null +} + +const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona") +if (daytonaEnvPath) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +function slug(value) { + return value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, "") +} + +function workerHint(workerId) { + return workerId.replace(/-/g, "").slice(0, 12) +} + +function sandboxLabels(workerId) { + return { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + } +} + +function workspaceVolumeName(workerId) { + const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker" + return slug(`${prefix}-${workerHint(workerId)}-workspace`).slice(0, 63) +} + +function dataVolumeName(workerId) { + const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker" + return slug(`${prefix}-${workerHint(workerId)}-data`).slice(0, 63) +} + +async function getFreePort() { + return await new Promise((resolvePort, reject) => { + const server = net.createServer() + server.listen(0, "127.0.0.1", () => { + const address = server.address() + if (!address || typeof address === "string") { + reject(new Error("failed_to_resolve_free_port")) + return + } + server.close((error) => (error ? reject(error) : resolvePort(address.port))) + }) + server.on("error", reject) + }) +} + +function spawnCommand(command, args, options = {}) { + return spawn(command, args, { + cwd: serviceDir, + env: process.env, + stdio: "pipe", + ...options, + }) +} + +async function runCommand(command, args, options = {}) { + const child = spawnCommand(command, args, options) + let stdout = "" + let stderr = "" + child.stdout?.on("data", (chunk) => { + stdout += chunk.toString() + }) + child.stderr?.on("data", (chunk) => { + stderr += chunk.toString() + }) + const [code] = await once(child, "exit") + if (code !== 0) { + throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`) + } + return { stdout, stderr } +} + +async function waitForMysqlConnection(databaseUrl, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + try { + const connection = await mysql.createConnection(databaseUrl) + await connection.query("SELECT 1") + await connection.end() + return + } catch { + await delay(1000) + } + } + throw new Error("mysql_not_ready") +} + +async function waitForHttp(url, attempts = 60, intervalMs = 500) { + for (let index = 0; index < attempts; index += 1) { + try { + const response = await fetch(url) + if (response.ok) { + return response + } + } catch { + // ignore until retries are exhausted + } + await delay(intervalMs) + } + throw new Error(`http_not_ready:${url}`) +} + +async function waitForWorkerReady(baseUrl, workerId, auth, attempts = 180) { + for (let index = 0; index < attempts; index += 1) { + const result = await requestJson(baseUrl, `/v1/workers/${workerId}`, auth) + if (result.response.ok && result.payload?.instance?.url && result.payload?.worker?.status === "healthy") { + return result.payload + } + await delay(5000) + } + throw new Error(`worker_not_ready:${workerId}`) +} + +async function waitForDaytonaCleanup(daytona, workerId, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + const volumes = await daytona.volume.list() + const remainingVolumes = volumes.filter((volume) => + [workspaceVolumeName(workerId), dataVolumeName(workerId)].includes(volume.name), + ) + + if (sandboxes.items.length === 0 && remainingVolumes.length === 0) { + return + } + + await delay(5000) + } + + throw new Error(`daytona_cleanup_incomplete:${workerId}`) +} + +async function forceDeleteDaytonaResources(daytona, workerId) { + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + for (const sandbox of sandboxes.items) { + await sandbox.delete(120).catch(() => {}) + } + + const volumes = await daytona.volume.list() + for (const volumeName of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) { + const volume = volumes.find((entry) => entry.name === volumeName) + if (volume) { + await daytona.volume.delete(volume).catch(() => {}) + } + } +} + +function extractAuthToken(payload) { + if (!payload || typeof payload !== "object") { + return null + } + if (typeof payload.token === "string" && payload.token.trim()) { + return payload.token + } + if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") { + return payload.session.token + } + return null +} + +async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) { + const headers = new Headers() + const origin = process.env.DEN_BROWSER_ORIGIN?.trim() || new URL(baseUrl).origin + headers.set("Accept", "application/json") + headers.set("Origin", origin) + headers.set("Referer", `${origin}/`) + if (body !== undefined) { + headers.set("Content-Type", "application/json") + } + if (token) { + headers.set("Authorization", `Bearer ${token}`) + } + if (cookie) { + headers.set("Cookie", cookie) + } + + const response = await fetch(`${baseUrl}${path}`, { + method, + headers, + body: body === undefined ? undefined : JSON.stringify(body), + }) + + const text = await response.text() + let payload = null + if (text) { + try { + payload = JSON.parse(text) + } catch { + payload = text + } + } + + return { + response, + payload, + cookie: response.headers.get("set-cookie"), + } +} + +async function main() { + if (!process.env.DAYTONA_API_KEY) { + fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the test.") + } + + const existingBaseUrl = process.env.DEN_BASE_URL?.trim() || process.env.DEN_API_URL?.trim() || "" + const mysqlPort = existingBaseUrl ? null : await getFreePort() + const appPort = existingBaseUrl ? null : await getFreePort() + const containerName = existingBaseUrl + ? null + : `openwork-den-daytona-${randomUUID().slice(0, 8)}` + const dbName = "openwork_den_daytona_e2e" + const dbPassword = "openwork-root" + const baseUrl = existingBaseUrl || `http://127.0.0.1:${appPort}` + const databaseUrl = mysqlPort + ? `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}` + : null + const runtimeEnv = { + ...process.env, + ...(databaseUrl ? { DATABASE_URL: databaseUrl } : {}), + BETTER_AUTH_SECRET: "openwork-den-daytona-secret-0000000000", + BETTER_AUTH_URL: baseUrl, + ...(appPort ? { PORT: String(appPort) } : {}), + CORS_ORIGINS: baseUrl, + PROVISIONER_MODE: "daytona", + POLAR_FEATURE_GATE_ENABLED: "false", + OPENWORK_DAYTONA_ENV_PATH: daytonaEnvPath || process.env.OPENWORK_DAYTONA_ENV_PATH || "", + } + + const daytona = new Daytona({ + apiKey: runtimeEnv.DAYTONA_API_KEY, + apiUrl: runtimeEnv.DAYTONA_API_URL, + ...(runtimeEnv.DAYTONA_TARGET ? { target: runtimeEnv.DAYTONA_TARGET } : {}), + }) + + let serviceProcess = null + let workerId = null + + const cleanup = async () => { + if (workerId) { + try { + await forceDeleteDaytonaResources(daytona, workerId) + } catch { + // cleanup best effort only + } + } + + if (serviceProcess && !serviceProcess.killed) { + serviceProcess.kill("SIGINT") + await once(serviceProcess, "exit").catch(() => {}) + } + + if (containerName) { + await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {}) + } + } + + process.on("SIGINT", async () => { + await cleanup() + process.exit(130) + }) + + try { + if (containerName && mysqlPort && databaseUrl && appPort) { + log("Starting disposable MySQL container...") + await runCommand("docker", [ + "run", + "-d", + "--rm", + "--name", + containerName, + "-e", + `MYSQL_ROOT_PASSWORD=${dbPassword}`, + "-e", + `MYSQL_DATABASE=${dbName}`, + "-p", + `${mysqlPort}:3306`, + "mysql:8.4", + ]) + + log("Waiting for MySQL...") + await waitForMysqlConnection(databaseUrl) + + log("Running Den migrations...") + await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env: runtimeEnv }) + + log("Starting Den service with Daytona provisioner...") + serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], { + cwd: serviceDir, + env: runtimeEnv, + stdio: "pipe", + }) + + let serviceOutput = "" + serviceProcess.stdout?.on("data", (chunk) => { + serviceOutput += chunk.toString() + }) + serviceProcess.stderr?.on("data", (chunk) => { + serviceOutput += chunk.toString() + }) + + serviceProcess.on("exit", (code) => { + if (code !== 0) { + console.error(serviceOutput) + } + }) + } else { + log(`Using existing Den API at ${baseUrl}`) + } + + await waitForHttp(`${baseUrl}/health`) + + const email = `den-daytona-${Date.now()}@example.com` + const password = "TestPass123!" + + log("Creating account...") + const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", { + method: "POST", + body: { + name: "Den Daytona E2E", + email, + password, + }, + }) + + if (!signup.response.ok) { + fail("Signup failed", signup.payload) + } + + const token = extractAuthToken(signup.payload) + const cookie = signup.cookie + if (!token && !cookie) { + fail("Signup did not return a bearer token or session cookie", signup.payload) + } + + const auth = { token, cookie } + + log("Validating authenticated session...") + const me = await requestJson(baseUrl, "/v1/me", auth) + if (!me.response.ok) { + fail("Session lookup failed", me.payload) + } + + log("Creating Daytona-backed cloud worker...") + const createWorker = await requestJson(baseUrl, "/v1/workers", { + method: "POST", + ...auth, + body: { + name: "daytona-worker", + destination: "cloud", + }, + }) + + if (createWorker.response.status !== 202) { + fail("Worker creation did not return async launch", { + status: createWorker.response.status, + payload: createWorker.payload, + }) + } + + workerId = createWorker.payload?.worker?.id || null + if (!workerId) { + fail("Worker response did not include an id", createWorker.payload) + } + + log("Waiting for worker provisioning to finish...") + const workerPayload = await waitForWorkerReady(baseUrl, workerId, auth) + if (workerPayload.instance.provider !== "daytona") { + fail("Worker instance did not report the Daytona provider", workerPayload) + } + + log("Checking worker health endpoint...") + await waitForHttp(`${workerPayload.instance.url.replace(/\/$/, "")}/health`, 120, 5000) + + log("Checking OpenWork connect metadata...") + const tokensResponse = await requestJson(baseUrl, `/v1/workers/${workerId}/tokens`, { + method: "POST", + ...auth, + }) + if (!tokensResponse.response.ok || !tokensResponse.payload?.connect?.openworkUrl) { + fail("Worker tokens/connect payload missing", tokensResponse.payload) + } + + const clientToken = tokensResponse.payload.tokens?.client + if (!clientToken) { + fail("Client token missing from worker token payload", tokensResponse.payload) + } + + const connectHeaders = { + Accept: "application/json", + Authorization: `Bearer ${clientToken}`, + } + const statusResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/status`, { + headers: connectHeaders, + }) + if (!statusResponse.ok) { + fail("Connected worker /status failed", await statusResponse.text()) + } + + const capabilitiesResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/capabilities`, { + headers: connectHeaders, + }) + if (!capabilitiesResponse.ok) { + fail("Connected worker /capabilities failed", await capabilitiesResponse.text()) + } + + log("Verifying Daytona resources exist...") + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + if (sandboxes.items.length === 0) { + fail("Expected a Daytona sandbox for the worker but none were found") + } + const volumes = await daytona.volume.list() + const expectedVolumeNames = [workspaceVolumeName(workerId), dataVolumeName(workerId)] + const missingVolumes = expectedVolumeNames.filter( + (name) => !volumes.some((volume) => volume.name === name), + ) + if (missingVolumes.length > 0) { + fail("Expected Daytona volumes were not created", missingVolumes) + } + + log("Deleting worker and waiting for Daytona cleanup...") + const deleteResponse = await requestJson(baseUrl, `/v1/workers/${workerId}`, { + method: "DELETE", + ...auth, + }) + if (deleteResponse.response.status !== 204) { + fail("Worker deletion failed", { + status: deleteResponse.response.status, + payload: deleteResponse.payload, + }) + } + + await waitForDaytonaCleanup(daytona, workerId) + workerId = null + + log("Daytona worker flow passed.") + } finally { + await cleanup() + } +} + +main().catch((error) => { + fail(error instanceof Error ? error.message : String(error)) +}) diff --git a/services/den-v2/scripts/e2e-worker-limit.mjs b/services/den-v2/scripts/e2e-worker-limit.mjs new file mode 100644 index 00000000..41641d87 --- /dev/null +++ b/services/den-v2/scripts/e2e-worker-limit.mjs @@ -0,0 +1,340 @@ +import { randomUUID } from "node:crypto"; +import { once } from "node:events"; +import net from "node:net"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { setTimeout as delay } from "node:timers/promises"; +import { spawn } from "node:child_process"; +import mysql from "mysql2/promise"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const serviceDir = resolve(__dirname, ".."); + +function log(message) { + process.stdout.write(`${message}\n`); +} + +function fail(message, detail) { + if (detail !== undefined) { + console.error(message, detail); + } else { + console.error(message); + } + process.exit(1); +} + +async function getFreePort() { + return await new Promise((resolvePort, reject) => { + const server = net.createServer(); + server.listen(0, "127.0.0.1", () => { + const address = server.address(); + if (!address || typeof address === "string") { + reject(new Error("failed_to_resolve_free_port")); + return; + } + const { port } = address; + server.close((error) => (error ? reject(error) : resolvePort(port))); + }); + server.on("error", reject); + }); +} + +function spawnCommand(command, args, options = {}) { + return spawn(command, args, { + cwd: serviceDir, + env: process.env, + stdio: "pipe", + ...options, + }); +} + +async function runCommand(command, args, options = {}) { + const child = spawnCommand(command, args, options); + let stdout = ""; + let stderr = ""; + child.stdout?.on("data", (chunk) => { + stdout += chunk.toString(); + }); + child.stderr?.on("data", (chunk) => { + stderr += chunk.toString(); + }); + const [code] = await once(child, "exit"); + if (code !== 0) { + throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`); + } + return { stdout, stderr }; +} + +async function waitForMysqlConnection(databaseUrl, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + try { + const connection = await mysql.createConnection(databaseUrl); + await connection.query("SELECT 1"); + await connection.end(); + return; + } catch { + await delay(1000); + } + } + + throw new Error("mysql_not_ready"); +} + +async function waitForHttp(url, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + try { + const response = await fetch(url); + if (response.ok) { + return; + } + } catch { + // ignore until retries are exhausted + } + await delay(500); + } + + throw new Error(`http_not_ready:${url}`); +} + +function extractAuthToken(payload) { + if (!payload || typeof payload !== "object") { + return null; + } + + if (typeof payload.token === "string" && payload.token.trim()) { + return payload.token; + } + + if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") { + return payload.session.token; + } + + return null; +} + +async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) { + const headers = new Headers(); + const origin = new URL(baseUrl).origin; + headers.set("Accept", "application/json"); + headers.set("Origin", origin); + headers.set("Referer", `${origin}/`); + if (body !== undefined) { + headers.set("Content-Type", "application/json"); + } + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + if (cookie) { + headers.set("Cookie", cookie); + } + + const response = await fetch(`${baseUrl}${path}`, { + method, + headers, + body: body === undefined ? undefined : JSON.stringify(body), + }); + + const text = await response.text(); + let payload = null; + if (text) { + try { + payload = JSON.parse(text); + } catch { + payload = text; + } + } + + return { + response, + payload, + cookie: response.headers.get("set-cookie"), + }; +} + +async function main() { + const mysqlPort = await getFreePort(); + const appPort = await getFreePort(); + const containerName = `openwork-den-e2e-${randomUUID().slice(0, 8)}`; + const dbName = "openwork_den_e2e"; + const dbPassword = "openwork-root"; + const baseUrl = `http://127.0.0.1:${appPort}`; + const databaseUrl = `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}`; + const env = { + ...process.env, + DATABASE_URL: databaseUrl, + BETTER_AUTH_SECRET: "openwork-den-e2e-secret-000000000000", + BETTER_AUTH_URL: baseUrl, + PORT: String(appPort), + OPENWORK_DEV_MODE: "1", + CORS_ORIGINS: baseUrl, + PROVISIONER_MODE: "stub", + WORKER_URL_TEMPLATE: "https://workers.example.com/{workerId}", + POLAR_FEATURE_GATE_ENABLED: "false", + }; + + let serviceProcess = null; + + const cleanup = async () => { + if (serviceProcess && !serviceProcess.killed) { + serviceProcess.kill("SIGINT"); + await once(serviceProcess, "exit").catch(() => {}); + } + + await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {}); + }; + + process.on("SIGINT", async () => { + await cleanup(); + process.exit(130); + }); + + try { + log("Starting disposable MySQL container..."); + await runCommand("docker", [ + "run", + "-d", + "--rm", + "--name", + containerName, + "-e", + `MYSQL_ROOT_PASSWORD=${dbPassword}`, + "-e", + `MYSQL_DATABASE=${dbName}`, + "-p", + `${mysqlPort}:3306`, + "mysql:8.4", + ]); + + log("Waiting for MySQL..."); + await waitForMysqlConnection(databaseUrl); + + log("Running Den migrations..."); + await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env }); + + log("Starting Den service..."); + serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], { + cwd: serviceDir, + env, + stdio: "pipe", + }); + + let serviceOutput = ""; + serviceProcess.stdout?.on("data", (chunk) => { + serviceOutput += chunk.toString(); + }); + serviceProcess.stderr?.on("data", (chunk) => { + serviceOutput += chunk.toString(); + }); + + serviceProcess.on("exit", (code) => { + if (code !== 0) { + console.error(serviceOutput); + } + }); + + await waitForHttp(`${baseUrl}/health`); + + const email = `den-e2e-${Date.now()}@example.com`; + const password = "TestPass123!"; + + log("Creating account..."); + const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", { + method: "POST", + body: { + name: "Den E2E", + email, + password, + }, + }); + + if (!signup.response.ok) { + fail("Signup failed", signup.payload); + } + + const token = extractAuthToken(signup.payload); + const cookie = signup.cookie; + if (!token && !cookie) { + fail("Signup did not return a bearer token or session cookie", signup.payload); + } + + log("Validating authenticated session..."); + const me = await requestJson(baseUrl, "/v1/me", { token, cookie }); + if (!me.response.ok) { + fail("Session lookup failed", me.payload); + } + + log("Checking billing summary is disabled..."); + const billing = await requestJson(baseUrl, "/v1/workers/billing", { token, cookie }); + if (!billing.response.ok) { + fail("Billing summary request failed", billing.payload); + } + + if ( + !billing.payload?.billing || + billing.payload.billing.featureGateEnabled !== false || + billing.payload.billing.checkoutRequired !== false || + billing.payload.billing.checkoutUrl !== null + ) { + fail("Billing summary should be disabled for the experiment", billing.payload); + } + + log("Creating first cloud worker..."); + const firstWorker = await requestJson(baseUrl, "/v1/workers", { + method: "POST", + token, + cookie, + body: { + name: "first-worker", + destination: "cloud", + }, + }); + + if (firstWorker.response.status !== 202) { + fail("First worker did not launch successfully", { + status: firstWorker.response.status, + payload: firstWorker.payload, + }); + } + + log("Attempting second cloud worker..."); + const secondWorker = await requestJson(baseUrl, "/v1/workers", { + method: "POST", + token, + cookie, + body: { + name: "second-worker", + destination: "cloud", + }, + }); + + if (secondWorker.response.status !== 202) { + fail("Second worker should be allowed in dev mode", { + status: secondWorker.response.status, + payload: secondWorker.payload, + }); + } + + if (!secondWorker.payload?.worker?.id) { + fail("Second worker did not return a worker payload", secondWorker.payload); + } + + log("Listing workers..."); + const workers = await requestJson(baseUrl, "/v1/workers?limit=20", { token, cookie }); + if (!workers.response.ok) { + fail("Worker list request failed", workers.payload); + } + + const items = Array.isArray(workers.payload?.workers) ? workers.payload.workers : null; + if (!items || items.length !== 2) { + fail("Expected two cloud workers in dev mode", workers.payload); + } + + log("E2E dev worker limit check passed."); + } finally { + await cleanup(); + } +} + +await main().catch((error) => { + fail(error instanceof Error ? error.message : String(error)); +}); diff --git a/services/den-v2/scripts/run-sql-migrations.mjs b/services/den-v2/scripts/run-sql-migrations.mjs new file mode 100644 index 00000000..e0a85cbf --- /dev/null +++ b/services/den-v2/scripts/run-sql-migrations.mjs @@ -0,0 +1,87 @@ +import { readdir, readFile } from "node:fs/promises" +import path from "node:path" +import { fileURLToPath } from "node:url" +import mysql from "mysql2/promise" + +const __dirname = path.dirname(fileURLToPath(import.meta.url)) +const drizzleDir = path.resolve(__dirname, "..", "drizzle") + +function splitStatements(sql) { + return sql + .split(/--> statement-breakpoint/g) + .map((part) => part.trim()) + .filter(Boolean) +} + +async function ensureMigrationsTable(connection) { + await connection.query(` + CREATE TABLE IF NOT EXISTS __den_migrations ( + id varchar(255) NOT NULL PRIMARY KEY, + applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP + ) + `) +} + +async function appliedMigrations(connection) { + const [rows] = await connection.query("SELECT id FROM __den_migrations") + return new Set(rows.map((row) => row.id)) +} + +function connectionConfigFromEnv() { + const databaseUrl = process.env.DATABASE_URL?.trim() + if (databaseUrl) { + return databaseUrl + } + + const host = process.env.DATABASE_HOST?.trim() + const user = process.env.DATABASE_USERNAME?.trim() + const password = process.env.DATABASE_PASSWORD ?? "" + + if (!host || !user) { + throw new Error("DATABASE_URL or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD is required") + } + + return { + host, + user, + password, + ssl: { + rejectUnauthorized: true, + }, + } +} + +async function run() { + const connection = await mysql.createConnection(connectionConfigFromEnv()) + + try { + await ensureMigrationsTable(connection) + const completed = await appliedMigrations(connection) + const files = (await readdir(drizzleDir)) + .filter((file) => file.endsWith(".sql")) + .sort((left, right) => left.localeCompare(right)) + + for (const file of files) { + if (completed.has(file)) { + continue + } + + const sql = await readFile(path.join(drizzleDir, file), "utf8") + const statements = splitStatements(sql) + + for (const statement of statements) { + await connection.query(statement) + } + + await connection.query("INSERT INTO __den_migrations (id) VALUES (?)", [file]) + process.stdout.write(`[den] Applied migration ${file}\n`) + } + } finally { + await connection.end() + } +} + +run().catch((error) => { + console.error(error instanceof Error ? error.stack ?? error.message : String(error)) + process.exit(1) +}) diff --git a/services/den-v2/src/admin-allowlist.ts b/services/den-v2/src/admin-allowlist.ts new file mode 100644 index 00000000..d5bd0b4f --- /dev/null +++ b/services/den-v2/src/admin-allowlist.ts @@ -0,0 +1,53 @@ +import { sql } from "./db/drizzle.js" +import { db } from "./db/index.js" +import { AdminAllowlistTable } from "./db/schema.js" +import { createDenTypeId } from "./db/typeid.js" + +const ADMIN_ALLOWLIST_SEEDS = [ + { + email: "ben@openworklabs.com", + note: "Seeded internal admin", + }, + { + email: "jan@openworklabs.com", + note: "Seeded internal admin", + }, + { + email: "omar@openworklabs.com", + note: "Seeded internal admin", + }, + { + email: "berk@openworklabs.com", + note: "Seeded internal admin", + }, +] as const + +let ensureAdminAllowlistSeededPromise: Promise | null = null + +async function seedAdminAllowlist() { + for (const entry of ADMIN_ALLOWLIST_SEEDS) { + await db + .insert(AdminAllowlistTable) + .values({ + id: createDenTypeId("adminAllowlist"), + ...entry, + }) + .onDuplicateKeyUpdate({ + set: { + note: entry.note, + updated_at: sql`CURRENT_TIMESTAMP(3)`, + }, + }) + } +} + +export async function ensureAdminAllowlistSeeded() { + if (!ensureAdminAllowlistSeededPromise) { + ensureAdminAllowlistSeededPromise = seedAdminAllowlist().catch((error) => { + ensureAdminAllowlistSeededPromise = null + throw error + }) + } + + await ensureAdminAllowlistSeededPromise +} diff --git a/services/den-v2/src/auth.ts b/services/den-v2/src/auth.ts new file mode 100644 index 00000000..93917653 --- /dev/null +++ b/services/den-v2/src/auth.ts @@ -0,0 +1,68 @@ +import { betterAuth } from "better-auth" +import { drizzleAdapter } from "better-auth/adapters/drizzle" +import { db } from "./db/index.js" +import * as schema from "./db/schema.js" +import { createDenTypeId, normalizeDenTypeId } from "./db/typeid.js" +import { env } from "./env.js" +import { ensureDefaultOrg } from "./orgs.js" + +const socialProviders = { + ...(env.github.clientId && env.github.clientSecret + ? { + github: { + clientId: env.github.clientId, + clientSecret: env.github.clientSecret, + }, + } + : {}), + ...(env.google.clientId && env.google.clientSecret + ? { + google: { + clientId: env.google.clientId, + clientSecret: env.google.clientSecret, + }, + } + : {}), +} + +export const auth = betterAuth({ + baseURL: env.betterAuthUrl, + secret: env.betterAuthSecret, + trustedOrigins: env.betterAuthTrustedOrigins.length > 0 ? env.betterAuthTrustedOrigins : undefined, + socialProviders: Object.keys(socialProviders).length > 0 ? socialProviders : undefined, + database: drizzleAdapter(db, { + provider: "mysql", + schema, + }), + advanced: { + database: { + generateId: (options) => { + switch (options.model) { + case "user": + return createDenTypeId("user") + case "session": + return createDenTypeId("session") + case "account": + return createDenTypeId("account") + case "verification": + return createDenTypeId("verification") + default: + return false + } + }, + }, + }, + emailAndPassword: { + enabled: true, + }, + databaseHooks: { + user: { + create: { + after: async (user) => { + const name = user.name ?? user.email ?? "Personal" + await ensureDefaultOrg(normalizeDenTypeId("user", user.id), name) + }, + }, + }, + }, +}) diff --git a/services/den-v2/src/billing/polar.ts b/services/den-v2/src/billing/polar.ts new file mode 100644 index 00000000..81c999f9 --- /dev/null +++ b/services/den-v2/src/billing/polar.ts @@ -0,0 +1,822 @@ +import { env } from "../env.js" + +type PolarCustomerState = { + granted_benefits?: Array<{ + benefit_id?: string + }> +} + +type PolarCheckoutSession = { + url?: string +} + +type PolarCustomerSession = { + customer_portal_url?: string +} + +type PolarCustomer = { + id?: string + email?: string + external_id?: string | null +} + +type PolarListResource = { + items?: T[] +} + +type PolarSubscription = { + id?: string + status?: string + amount?: number + currency?: string + recurring_interval?: string | null + recurring_interval_count?: number | null + current_period_start?: string | null + current_period_end?: string | null + cancel_at_period_end?: boolean + canceled_at?: string | null + ended_at?: string | null +} + +type PolarOrder = { + id?: string + created_at?: string + status?: string + total_amount?: number + net_amount?: number + currency?: string + invoice_number?: string + is_invoice_generated?: boolean +} + +type PolarOrderInvoice = { + url?: string +} + +type PolarProductPrice = { + amount_type?: string + price_currency?: string + price_amount?: number + minimum_amount?: number + preset_amount?: number | null + is_archived?: boolean + seat_tiers?: { + tiers?: Array<{ + price_per_seat?: number + }> + } +} + +type PolarProduct = { + recurring_interval?: string | null + recurring_interval_count?: number | null + prices?: PolarProductPrice[] +} + +export type CloudWorkerAccess = + | { + allowed: true + } + | { + allowed: false + checkoutUrl: string + } + +export type CloudWorkerBillingPrice = { + amount: number | null + currency: string | null + recurringInterval: string | null + recurringIntervalCount: number | null +} + +export type CloudWorkerBillingSubscription = { + id: string + status: string + amount: number | null + currency: string | null + recurringInterval: string | null + recurringIntervalCount: number | null + currentPeriodStart: string | null + currentPeriodEnd: string | null + cancelAtPeriodEnd: boolean + canceledAt: string | null + endedAt: string | null +} + +export type CloudWorkerBillingInvoice = { + id: string + createdAt: string | null + status: string + totalAmount: number | null + currency: string | null + invoiceNumber: string | null + invoiceUrl: string | null +} + +export type CloudWorkerBillingStatus = { + featureGateEnabled: boolean + hasActivePlan: boolean + checkoutRequired: boolean + checkoutUrl: string | null + portalUrl: string | null + price: CloudWorkerBillingPrice | null + subscription: CloudWorkerBillingSubscription | null + invoices: CloudWorkerBillingInvoice[] +} + +export type CloudWorkerAdminBillingStatus = { + status: "paid" | "unpaid" | "unavailable" + featureGateEnabled: boolean + subscriptionId: string | null + subscriptionStatus: string | null + currentPeriodEnd: string | null + source: "benefit" | "subscription" | "unavailable" + note: string | null +} + +type CloudAccessInput = { + userId: string + email: string + name: string +} + +type BillingStatusOptions = { + includeCheckoutUrl?: boolean + includePortalUrl?: boolean + includeInvoices?: boolean +} + +function sanitizeApiBase(value: string) { + return value.replace(/\/+$/, "") +} + +function parseJson(text: string): T | null { + if (!text) { + return null + } + + return JSON.parse(text) as T +} + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null +} + +async function polarFetch(path: string, init: RequestInit = {}) { + const headers = new Headers(init.headers) + headers.set("Authorization", `Bearer ${env.polar.accessToken}`) + headers.set("Accept", "application/json") + if (init.body && !headers.has("Content-Type")) { + headers.set("Content-Type", "application/json") + } + + return fetch(`${sanitizeApiBase(env.polar.apiBase)}${path}`, { + ...init, + headers, + }) +} + +async function polarFetchJson(path: string, init: RequestInit = {}) { + const response = await polarFetch(path, init) + const text = await response.text() + const payload = parseJson(text) + return { response, text, payload } +} + +function assertPaywallConfig() { + if (!env.polar.accessToken) { + throw new Error("POLAR_ACCESS_TOKEN is required when POLAR_FEATURE_GATE_ENABLED=true") + } + if (!env.polar.productId) { + throw new Error("POLAR_PRODUCT_ID is required when POLAR_FEATURE_GATE_ENABLED=true") + } + if (!env.polar.benefitId) { + throw new Error("POLAR_BENEFIT_ID is required when POLAR_FEATURE_GATE_ENABLED=true") + } + if (!env.polar.successUrl) { + throw new Error("POLAR_SUCCESS_URL is required when POLAR_FEATURE_GATE_ENABLED=true") + } + if (!env.polar.returnUrl) { + throw new Error("POLAR_RETURN_URL is required when POLAR_FEATURE_GATE_ENABLED=true") + } +} + +async function getCustomerStateByExternalId(externalCustomerId: string): Promise { + const encodedExternalId = encodeURIComponent(externalCustomerId) + const { response, payload, text } = await polarFetchJson(`/v1/customers/external/${encodedExternalId}/state`, { + method: "GET", + }) + + if (response.status === 404) { + return null + } + + if (!response.ok) { + throw new Error(`Polar customer state lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return payload +} + +async function getCustomerStateById(customerId: string): Promise { + const encodedCustomerId = encodeURIComponent(customerId) + const { response, payload, text } = await polarFetchJson(`/v1/customers/${encodedCustomerId}/state`, { + method: "GET", + }) + + if (response.status === 404) { + return null + } + + if (!response.ok) { + throw new Error(`Polar customer state lookup by ID failed (${response.status}): ${text.slice(0, 400)}`) + } + + return payload +} + +async function getCustomerByEmail(email: string): Promise { + const normalizedEmail = email.trim().toLowerCase() + if (!normalizedEmail) { + return null + } + + const encodedEmail = encodeURIComponent(normalizedEmail) + const { response, payload, text } = await polarFetchJson>(`/v1/customers/?email=${encodedEmail}`, { + method: "GET", + }) + + if (!response.ok) { + throw new Error(`Polar customer lookup by email failed (${response.status}): ${text.slice(0, 400)}`) + } + + const customers = payload?.items ?? [] + const exact = customers.find((customer) => customer.email?.trim().toLowerCase() === normalizedEmail) + return exact ?? customers[0] ?? null +} + +async function linkCustomerExternalId(customer: PolarCustomer, externalCustomerId: string): Promise { + if (!customer.id) { + return + } + + if (typeof customer.external_id === "string" && customer.external_id.length > 0) { + return + } + + const encodedCustomerId = encodeURIComponent(customer.id) + await polarFetch(`/v1/customers/${encodedCustomerId}`, { + method: "PATCH", + body: JSON.stringify({ + external_id: externalCustomerId, + }), + }) +} + +function hasRequiredBenefit(state: PolarCustomerState | null) { + if (!state?.granted_benefits || !env.polar.benefitId) { + return false + } + + return state.granted_benefits.some((grant) => grant.benefit_id === env.polar.benefitId) +} + +async function createCheckoutSession(input: CloudAccessInput): Promise { + const payload = { + products: [env.polar.productId], + success_url: env.polar.successUrl, + return_url: env.polar.returnUrl, + external_customer_id: input.userId, + customer_email: input.email, + customer_name: input.name, + } + + const { response, payload: checkout, text } = await polarFetchJson("/v1/checkouts/", { + method: "POST", + body: JSON.stringify(payload), + }) + + if (!response.ok) { + throw new Error(`Polar checkout creation failed (${response.status}): ${text.slice(0, 400)}`) + } + + if (!checkout?.url) { + throw new Error("Polar checkout response missing URL") + } + + return checkout.url +} + +type CloudWorkerAccessEvaluation = { + featureGateEnabled: boolean + hasActivePlan: boolean + checkoutUrl: string | null +} + +async function evaluateCloudWorkerAccess( + input: CloudAccessInput, + options: { includeCheckoutUrl?: boolean } = {}, +): Promise { + if (!env.polar.featureGateEnabled) { + return { + featureGateEnabled: false, + hasActivePlan: true, + checkoutUrl: null, + } + } + + assertPaywallConfig() + + const externalState = await getCustomerStateByExternalId(input.userId) + if (hasRequiredBenefit(externalState)) { + return { + featureGateEnabled: true, + hasActivePlan: true, + checkoutUrl: null, + } + } + + const customer = await getCustomerByEmail(input.email) + if (customer?.id) { + const emailState = await getCustomerStateById(customer.id) + if (hasRequiredBenefit(emailState)) { + await linkCustomerExternalId(customer, input.userId).catch(() => undefined) + return { + featureGateEnabled: true, + hasActivePlan: true, + checkoutUrl: null, + } + } + } + + return { + featureGateEnabled: true, + hasActivePlan: false, + checkoutUrl: options.includeCheckoutUrl ? await createCheckoutSession(input) : null, + } +} + +function normalizeRecurringInterval(value: string | null | undefined): string | null { + return typeof value === "string" && value.trim().length > 0 ? value : null +} + +function normalizeRecurringIntervalCount(value: number | null | undefined): number | null { + return typeof value === "number" && Number.isFinite(value) ? value : null +} + +function isActiveSubscriptionStatus(status: string | null | undefined) { + const normalized = typeof status === "string" ? status.trim().toLowerCase() : "" + return normalized === "active" || normalized === "trialing" +} + +function toBillingSubscription(subscription: PolarSubscription | null): CloudWorkerBillingSubscription | null { + if (!subscription?.id) { + return null + } + + return { + id: subscription.id, + status: typeof subscription.status === "string" ? subscription.status : "unknown", + amount: typeof subscription.amount === "number" ? subscription.amount : null, + currency: typeof subscription.currency === "string" ? subscription.currency : null, + recurringInterval: normalizeRecurringInterval(subscription.recurring_interval), + recurringIntervalCount: normalizeRecurringIntervalCount(subscription.recurring_interval_count), + currentPeriodStart: typeof subscription.current_period_start === "string" ? subscription.current_period_start : null, + currentPeriodEnd: typeof subscription.current_period_end === "string" ? subscription.current_period_end : null, + cancelAtPeriodEnd: subscription.cancel_at_period_end === true, + canceledAt: typeof subscription.canceled_at === "string" ? subscription.canceled_at : null, + endedAt: typeof subscription.ended_at === "string" ? subscription.ended_at : null, + } +} + +function toBillingPriceFromSubscription(subscription: CloudWorkerBillingSubscription | null): CloudWorkerBillingPrice | null { + if (!subscription) { + return null + } + + return { + amount: subscription.amount, + currency: subscription.currency, + recurringInterval: subscription.recurringInterval, + recurringIntervalCount: subscription.recurringIntervalCount, + } +} + +async function getSubscriptionById(subscriptionId: string): Promise { + const encodedId = encodeURIComponent(subscriptionId) + const { response, payload, text } = await polarFetchJson(`/v1/subscriptions/${encodedId}`, { + method: "GET", + }) + + if (response.status === 404) { + return null + } + + if (!response.ok) { + throw new Error(`Polar subscription lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return payload +} + +async function listSubscriptionsByExternalCustomer( + externalCustomerId: string, + options: { activeOnly?: boolean; limit?: number } = {}, +): Promise { + const params = new URLSearchParams() + params.set("external_customer_id", externalCustomerId) + if (env.polar.productId) { + params.set("product_id", env.polar.productId) + } + params.set("limit", String(options.limit ?? 1)) + params.set("sorting", "-started_at") + + if (options.activeOnly === true) { + params.set("active", "true") + } + + const lookup = await polarFetchJson>(`/v1/subscriptions/?${params.toString()}`, { + method: "GET", + }) + let response = lookup.response + let payload = lookup.payload + let text = lookup.text + + if (response.status === 422 && params.has("sorting")) { + params.delete("sorting") + const fallbackLookup = await polarFetchJson>(`/v1/subscriptions/?${params.toString()}`, { + method: "GET", + }) + response = fallbackLookup.response + payload = fallbackLookup.payload + text = fallbackLookup.text + } + + if (!response.ok) { + throw new Error(`Polar subscriptions lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return payload?.items ?? [] +} + +async function getPrimarySubscriptionForCustomer(externalCustomerId: string): Promise { + const active = await listSubscriptionsByExternalCustomer(externalCustomerId, { activeOnly: true, limit: 1 }) + if (active[0]) { + return active[0] + } + + const recent = await listSubscriptionsByExternalCustomer(externalCustomerId, { activeOnly: false, limit: 1 }) + return recent[0] ?? null +} + +async function listRecentOrdersByExternalCustomer(externalCustomerId: string, limit = 6): Promise { + const params = new URLSearchParams() + params.set("external_customer_id", externalCustomerId) + if (env.polar.productId) { + params.set("product_id", env.polar.productId) + } + params.set("limit", String(limit)) + params.set("sorting", "-created_at") + + const { response, payload, text } = await polarFetchJson>(`/v1/orders/?${params.toString()}`, { + method: "GET", + }) + + if (!response.ok) { + throw new Error(`Polar orders lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return payload?.items ?? [] +} + +async function getOrderInvoiceUrl(orderId: string): Promise { + const encodedId = encodeURIComponent(orderId) + const { response, payload, text } = await polarFetchJson(`/v1/orders/${encodedId}/invoice`, { + method: "GET", + }) + + if (response.status === 404) { + return null + } + + if (!response.ok) { + throw new Error(`Polar invoice lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return typeof payload?.url === "string" ? payload.url : null +} + +function toBillingInvoice(order: PolarOrder, invoiceUrl: string | null): CloudWorkerBillingInvoice | null { + if (!order.id) { + return null + } + + const totalAmount = + typeof order.total_amount === "number" + ? order.total_amount + : typeof order.net_amount === "number" + ? order.net_amount + : null + + return { + id: order.id, + createdAt: typeof order.created_at === "string" ? order.created_at : null, + status: typeof order.status === "string" ? order.status : "unknown", + totalAmount, + currency: typeof order.currency === "string" ? order.currency : null, + invoiceNumber: typeof order.invoice_number === "string" ? order.invoice_number : null, + invoiceUrl, + } +} + +async function listBillingInvoices(externalCustomerId: string, limit = 6): Promise { + const orders = await listRecentOrdersByExternalCustomer(externalCustomerId, limit) + const invoices = await Promise.all( + orders.map(async (order) => { + const invoiceUrl = order.id && order.is_invoice_generated === true ? await getOrderInvoiceUrl(order.id).catch(() => null) : null + return toBillingInvoice(order, invoiceUrl) + }), + ) + + return invoices.filter((invoice): invoice is CloudWorkerBillingInvoice => invoice !== null) +} + +async function createCustomerPortalUrl(externalCustomerId: string): Promise { + const body = { + external_customer_id: externalCustomerId, + return_url: env.polar.returnUrl ?? env.polar.successUrl ?? null, + } + + const { response, payload, text } = await polarFetchJson("/v1/customer-sessions/", { + method: "POST", + body: JSON.stringify(body), + }) + + if (response.status === 404 || response.status === 422) { + return null + } + + if (!response.ok) { + throw new Error(`Polar customer portal session failed (${response.status}): ${text.slice(0, 400)}`) + } + + return typeof payload?.customer_portal_url === "string" ? payload.customer_portal_url : null +} + +function extractAmountFromProductPrice(price: PolarProductPrice): number | null { + if (price.amount_type === "fixed" && typeof price.price_amount === "number") { + return price.price_amount + } + + if (price.amount_type === "seat_based") { + const firstTier = Array.isArray(price.seat_tiers?.tiers) ? price.seat_tiers?.tiers[0] : null + if (firstTier && typeof firstTier.price_per_seat === "number") { + return firstTier.price_per_seat + } + } + + if (price.amount_type === "custom") { + if (typeof price.preset_amount === "number") { + return price.preset_amount + } + if (typeof price.minimum_amount === "number") { + return price.minimum_amount + } + } + + if (price.amount_type === "free") { + return 0 + } + + return null +} + +function extractBillingPriceFromProduct(product: PolarProduct | null): CloudWorkerBillingPrice | null { + if (!product || !Array.isArray(product.prices)) { + return null + } + + for (const price of product.prices) { + if (!isRecord(price) || price.is_archived === true) { + continue + } + + const amount = extractAmountFromProductPrice(price as PolarProductPrice) + if (amount === null) { + continue + } + + const currency = typeof price.price_currency === "string" ? price.price_currency : null + return { + amount, + currency, + recurringInterval: normalizeRecurringInterval(product.recurring_interval), + recurringIntervalCount: normalizeRecurringIntervalCount(product.recurring_interval_count), + } + } + + return null +} + +async function getProductBillingPrice(productId: string): Promise { + const encodedId = encodeURIComponent(productId) + const { response, payload, text } = await polarFetchJson(`/v1/products/${encodedId}`, { + method: "GET", + }) + + if (response.status === 404) { + return null + } + + if (!response.ok) { + throw new Error(`Polar product lookup failed (${response.status}): ${text.slice(0, 400)}`) + } + + return extractBillingPriceFromProduct(payload) +} + +export async function requireCloudWorkerAccess(input: CloudAccessInput): Promise { + const evaluation = await evaluateCloudWorkerAccess(input, { includeCheckoutUrl: true }) + if (evaluation.hasActivePlan) { + return { allowed: true } + } + + if (!evaluation.checkoutUrl) { + throw new Error("Polar checkout URL unavailable") + } + + return { + allowed: false, + checkoutUrl: evaluation.checkoutUrl, + } +} + +export async function getCloudWorkerBillingStatus( + input: CloudAccessInput, + options: BillingStatusOptions = {}, +): Promise { + const includePortalUrl = options.includePortalUrl !== false + const includeInvoices = options.includeInvoices !== false + const evaluation = await evaluateCloudWorkerAccess(input, { + includeCheckoutUrl: options.includeCheckoutUrl, + }) + + if (!evaluation.featureGateEnabled) { + return { + featureGateEnabled: false, + hasActivePlan: true, + checkoutRequired: false, + checkoutUrl: null, + portalUrl: null, + price: null, + subscription: null, + invoices: [], + } + } + + let subscription: CloudWorkerBillingSubscription | null = null + let productPrice: CloudWorkerBillingPrice | null = null + let portalUrl: string | null = null + let invoices: CloudWorkerBillingInvoice[] = [] + + const [subscriptionResult, priceResult, portalResult, invoicesResult] = await Promise.all([ + getPrimarySubscriptionForCustomer(input.userId).catch(() => null), + env.polar.productId ? getProductBillingPrice(env.polar.productId).catch(() => null) : Promise.resolve(null), + includePortalUrl ? createCustomerPortalUrl(input.userId).catch(() => null) : Promise.resolve(null), + includeInvoices ? listBillingInvoices(input.userId).catch(() => []) : Promise.resolve([]), + ]) + + subscription = toBillingSubscription(subscriptionResult) + productPrice = priceResult + portalUrl = portalResult + invoices = invoicesResult + + return { + featureGateEnabled: evaluation.featureGateEnabled, + hasActivePlan: evaluation.hasActivePlan, + checkoutRequired: evaluation.featureGateEnabled && !evaluation.hasActivePlan, + checkoutUrl: evaluation.checkoutUrl, + portalUrl, + price: productPrice ?? toBillingPriceFromSubscription(subscription), + subscription, + invoices, + } +} + +export async function getCloudWorkerAdminBillingStatus( + input: CloudAccessInput, +): Promise { + if (!env.polar.accessToken) { + return { + status: "unavailable", + featureGateEnabled: env.polar.featureGateEnabled, + subscriptionId: null, + subscriptionStatus: null, + currentPeriodEnd: null, + source: "unavailable", + note: "Polar access token is not configured.", + } + } + + if (!env.polar.benefitId && !env.polar.productId) { + return { + status: "unavailable", + featureGateEnabled: env.polar.featureGateEnabled, + subscriptionId: null, + subscriptionStatus: null, + currentPeriodEnd: null, + source: "unavailable", + note: "Polar product or benefit configuration is missing.", + } + } + + try { + let note: string | null = null + let paidByBenefit = false + + if (env.polar.benefitId) { + const externalState = await getCustomerStateByExternalId(input.userId) + if (hasRequiredBenefit(externalState)) { + paidByBenefit = true + note = "Benefit granted via external customer id." + } else { + const customer = await getCustomerByEmail(input.email) + if (customer?.id) { + const emailState = await getCustomerStateById(customer.id) + if (hasRequiredBenefit(emailState)) { + paidByBenefit = true + note = "Benefit granted via matching customer email." + await linkCustomerExternalId(customer, input.userId).catch(() => undefined) + } + } + } + } + + const subscription = env.polar.productId ? await getPrimarySubscriptionForCustomer(input.userId) : null + const normalizedSubscription = toBillingSubscription(subscription) + const paidBySubscription = isActiveSubscriptionStatus(normalizedSubscription?.status) + + return { + status: paidByBenefit || paidBySubscription ? "paid" : "unpaid", + featureGateEnabled: env.polar.featureGateEnabled, + subscriptionId: normalizedSubscription?.id ?? null, + subscriptionStatus: normalizedSubscription?.status ?? null, + currentPeriodEnd: normalizedSubscription?.currentPeriodEnd ?? null, + source: paidByBenefit ? "benefit" : "subscription", + note: + note ?? + (normalizedSubscription + ? "Subscription status resolved from Polar." + : "No active billing record was found for this user."), + } + } catch (error) { + return { + status: "unavailable", + featureGateEnabled: env.polar.featureGateEnabled, + subscriptionId: null, + subscriptionStatus: null, + currentPeriodEnd: null, + source: "unavailable", + note: error instanceof Error ? error.message : "Billing lookup failed.", + } + } +} + +export async function setCloudWorkerSubscriptionCancellation( + input: CloudAccessInput, + cancelAtPeriodEnd: boolean, +): Promise { + if (!env.polar.featureGateEnabled) { + return null + } + + assertPaywallConfig() + + const activeSubscriptions = await listSubscriptionsByExternalCustomer(input.userId, { + activeOnly: true, + limit: 1, + }) + const active = activeSubscriptions[0] + if (!active?.id) { + return null + } + + const encodedId = encodeURIComponent(active.id) + const { response, payload, text } = await polarFetchJson(`/v1/subscriptions/${encodedId}`, { + method: "PATCH", + body: JSON.stringify({ + cancel_at_period_end: cancelAtPeriodEnd, + }), + }) + + if (!response.ok) { + throw new Error(`Polar subscription update failed (${response.status}): ${text.slice(0, 400)}`) + } + + if (payload?.id) { + return toBillingSubscription(payload) + } + + const refreshed = await getSubscriptionById(active.id) + return toBillingSubscription(refreshed) +} diff --git a/services/den-v2/src/db/drizzle.ts b/services/den-v2/src/db/drizzle.ts new file mode 100644 index 00000000..54698bf6 --- /dev/null +++ b/services/den-v2/src/db/drizzle.ts @@ -0,0 +1 @@ +export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "../../../../packages/den-db/dist/drizzle.js" diff --git a/services/den-v2/src/db/index.ts b/services/den-v2/src/db/index.ts new file mode 100644 index 00000000..a305df7e --- /dev/null +++ b/services/den-v2/src/db/index.ts @@ -0,0 +1,9 @@ +import { createDenDb, isTransientDbConnectionError } from "../../../../packages/den-db/dist/index.js" +import { env } from "../env.js" + +export const { db } = createDenDb({ + databaseUrl: env.databaseUrl, + mode: env.dbMode, + planetscale: env.planetscale, +}) +export { isTransientDbConnectionError } diff --git a/services/den-v2/src/db/schema.ts b/services/den-v2/src/db/schema.ts new file mode 100644 index 00000000..e51175cf --- /dev/null +++ b/services/den-v2/src/db/schema.ts @@ -0,0 +1 @@ +export * from "../../../../packages/den-db/dist/schema.js" diff --git a/services/den-v2/src/db/typeid.ts b/services/den-v2/src/db/typeid.ts new file mode 100644 index 00000000..c966ad51 --- /dev/null +++ b/services/den-v2/src/db/typeid.ts @@ -0,0 +1 @@ +export * from "../../../../packages/utils/dist/typeid.js" diff --git a/services/den-v2/src/env.ts b/services/den-v2/src/env.ts new file mode 100644 index 00000000..47aadc16 --- /dev/null +++ b/services/den-v2/src/env.ts @@ -0,0 +1,253 @@ +import { z } from "zod"; + +const schema = z.object({ + DATABASE_URL: z.string().min(1).optional(), + DATABASE_HOST: z.string().min(1).optional(), + DATABASE_USERNAME: z.string().min(1).optional(), + DATABASE_PASSWORD: z.string().optional(), + DB_MODE: z.enum(["mysql", "planetscale"]).optional(), + BETTER_AUTH_SECRET: z.string().min(32), + BETTER_AUTH_URL: z.string().min(1), + DEN_BETTER_AUTH_TRUSTED_ORIGINS: z.string().optional(), + GITHUB_CLIENT_ID: z.string().optional(), + GITHUB_CLIENT_SECRET: z.string().optional(), + GOOGLE_CLIENT_ID: z.string().optional(), + GOOGLE_CLIENT_SECRET: z.string().optional(), + PORT: z.string().optional(), + WORKER_PROXY_PORT: z.string().optional(), + OPENWORK_DEV_MODE: z.string().optional(), + CORS_ORIGINS: z.string().optional(), + PROVISIONER_MODE: z.enum(["stub", "render", "daytona"]).optional(), + WORKER_URL_TEMPLATE: z.string().optional(), + OPENWORK_DAYTONA_ENV_PATH: z.string().optional(), + RENDER_API_BASE: z.string().optional(), + RENDER_API_KEY: z.string().optional(), + RENDER_OWNER_ID: z.string().optional(), + RENDER_WORKER_REPO: z.string().optional(), + RENDER_WORKER_BRANCH: z.string().optional(), + RENDER_WORKER_ROOT_DIR: z.string().optional(), + RENDER_WORKER_PLAN: z.string().optional(), + RENDER_WORKER_REGION: z.string().optional(), + RENDER_WORKER_OPENWORK_VERSION: z.string().optional(), + RENDER_WORKER_NAME_PREFIX: z.string().optional(), + RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: z.string().optional(), + RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS: z.string().optional(), + RENDER_PROVISION_TIMEOUT_MS: z.string().optional(), + RENDER_HEALTHCHECK_TIMEOUT_MS: z.string().optional(), + RENDER_POLL_INTERVAL_MS: z.string().optional(), + VERCEL_API_BASE: z.string().optional(), + VERCEL_TOKEN: z.string().optional(), + VERCEL_TEAM_ID: z.string().optional(), + VERCEL_TEAM_SLUG: z.string().optional(), + VERCEL_DNS_DOMAIN: z.string().optional(), + POLAR_FEATURE_GATE_ENABLED: z.string().optional(), + POLAR_API_BASE: z.string().optional(), + POLAR_ACCESS_TOKEN: z.string().optional(), + POLAR_PRODUCT_ID: z.string().optional(), + POLAR_BENEFIT_ID: z.string().optional(), + POLAR_SUCCESS_URL: z.string().optional(), + POLAR_RETURN_URL: z.string().optional(), + DAYTONA_API_URL: z.string().optional(), + DAYTONA_API_KEY: z.string().optional(), + DAYTONA_TARGET: z.string().optional(), + DAYTONA_SNAPSHOT: z.string().optional(), + DAYTONA_SANDBOX_IMAGE: z.string().optional(), + DAYTONA_SANDBOX_CPU: z.string().optional(), + DAYTONA_SANDBOX_MEMORY: z.string().optional(), + DAYTONA_SANDBOX_DISK: z.string().optional(), + DAYTONA_SANDBOX_PUBLIC: z.string().optional(), + DAYTONA_SANDBOX_AUTO_STOP_INTERVAL: z.string().optional(), + DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL: z.string().optional(), + DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL: z.string().optional(), + DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(), + DAYTONA_WORKER_PROXY_BASE_URL: z.string().optional(), + DAYTONA_SANDBOX_NAME_PREFIX: z.string().optional(), + DAYTONA_VOLUME_NAME_PREFIX: z.string().optional(), + DAYTONA_WORKSPACE_MOUNT_PATH: z.string().optional(), + DAYTONA_DATA_MOUNT_PATH: z.string().optional(), + DAYTONA_RUNTIME_WORKSPACE_PATH: z.string().optional(), + DAYTONA_RUNTIME_DATA_PATH: z.string().optional(), + DAYTONA_SIDECAR_DIR: z.string().optional(), + DAYTONA_OPENWORK_PORT: z.string().optional(), + DAYTONA_OPENCODE_PORT: z.string().optional(), + DAYTONA_OPENWORK_VERSION: z.string().optional(), + DAYTONA_CREATE_TIMEOUT_SECONDS: z.string().optional(), + DAYTONA_DELETE_TIMEOUT_SECONDS: z.string().optional(), + DAYTONA_HEALTHCHECK_TIMEOUT_MS: z.string().optional(), + DAYTONA_POLL_INTERVAL_MS: z.string().optional(), +}).superRefine((value, ctx) => { + const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale") + + if (inferredMode === "mysql" && !value.DATABASE_URL) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "DATABASE_URL is required when using mysql mode", + path: ["DATABASE_URL"], + }) + } + + if (inferredMode === "planetscale") { + for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) { + if (!value[key]) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: `${key} is required when using planetscale mode`, + path: [key], + }) + } + } + } +}); + +const parsed = schema.parse(process.env); + +function optionalString(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + +function normalizeOrigin(origin: string): string { + const value = origin.trim(); + if (value === "*") { + return value; + } + return value.replace(/\/+$/, ""); +} + +const corsOrigins = parsed.CORS_ORIGINS?.split(",") + .map((origin) => normalizeOrigin(origin)) + .filter(Boolean); + +const betterAuthTrustedOrigins = + parsed.DEN_BETTER_AUTH_TRUSTED_ORIGINS?.split(",") + .map((origin) => normalizeOrigin(origin)) + .filter(Boolean) ?? + corsOrigins ?? + []; + +const polarFeatureGateEnabled = + (parsed.POLAR_FEATURE_GATE_ENABLED ?? "false").toLowerCase() === "true"; + +const daytonaSandboxPublic = + (parsed.DAYTONA_SANDBOX_PUBLIC ?? "false").toLowerCase() === "true"; + +const planetscaleCredentials = + parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined + ? { + host: parsed.DATABASE_HOST, + username: parsed.DATABASE_USERNAME, + password: parsed.DATABASE_PASSWORD, + } + : null + +export const env = { + databaseUrl: parsed.DATABASE_URL, + dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"), + planetscale: planetscaleCredentials, + betterAuthSecret: parsed.BETTER_AUTH_SECRET, + betterAuthUrl: parsed.BETTER_AUTH_URL, + betterAuthTrustedOrigins, + devMode: (parsed.OPENWORK_DEV_MODE ?? "0").trim() === "1", + github: { + clientId: parsed.GITHUB_CLIENT_ID?.trim() || undefined, + clientSecret: parsed.GITHUB_CLIENT_SECRET?.trim() || undefined, + }, + google: { + clientId: parsed.GOOGLE_CLIENT_ID?.trim() || undefined, + clientSecret: parsed.GOOGLE_CLIENT_SECRET?.trim() || undefined, + }, + port: Number(parsed.PORT ?? "8788"), + workerProxyPort: Number(parsed.WORKER_PROXY_PORT ?? "8789"), + corsOrigins: corsOrigins ?? [], + provisionerMode: parsed.PROVISIONER_MODE ?? "daytona", + workerUrlTemplate: parsed.WORKER_URL_TEMPLATE, + render: { + apiBase: parsed.RENDER_API_BASE ?? "https://api.render.com/v1", + apiKey: parsed.RENDER_API_KEY, + ownerId: parsed.RENDER_OWNER_ID, + workerRepo: + parsed.RENDER_WORKER_REPO ?? "https://github.com/different-ai/openwork", + workerBranch: parsed.RENDER_WORKER_BRANCH ?? "dev", + workerRootDir: + parsed.RENDER_WORKER_ROOT_DIR ?? "services/den-worker-runtime", + workerPlan: parsed.RENDER_WORKER_PLAN ?? "standard", + workerRegion: parsed.RENDER_WORKER_REGION ?? "oregon", + workerOpenworkVersion: parsed.RENDER_WORKER_OPENWORK_VERSION, + workerNamePrefix: parsed.RENDER_WORKER_NAME_PREFIX ?? "den-worker", + workerPublicDomainSuffix: parsed.RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX, + customDomainReadyTimeoutMs: Number( + parsed.RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS ?? "240000", + ), + provisionTimeoutMs: Number(parsed.RENDER_PROVISION_TIMEOUT_MS ?? "900000"), + healthcheckTimeoutMs: Number( + parsed.RENDER_HEALTHCHECK_TIMEOUT_MS ?? "180000", + ), + pollIntervalMs: Number(parsed.RENDER_POLL_INTERVAL_MS ?? "5000"), + }, + vercel: { + apiBase: parsed.VERCEL_API_BASE ?? "https://api.vercel.com", + token: parsed.VERCEL_TOKEN, + teamId: parsed.VERCEL_TEAM_ID, + teamSlug: parsed.VERCEL_TEAM_SLUG, + dnsDomain: parsed.VERCEL_DNS_DOMAIN, + }, + polar: { + featureGateEnabled: polarFeatureGateEnabled, + apiBase: parsed.POLAR_API_BASE ?? "https://api.polar.sh", + accessToken: parsed.POLAR_ACCESS_TOKEN, + productId: parsed.POLAR_PRODUCT_ID, + benefitId: parsed.POLAR_BENEFIT_ID, + successUrl: parsed.POLAR_SUCCESS_URL, + returnUrl: parsed.POLAR_RETURN_URL, + }, + daytona: { + envPath: optionalString(parsed.OPENWORK_DAYTONA_ENV_PATH), + apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api", + apiKey: optionalString(parsed.DAYTONA_API_KEY), + target: optionalString(parsed.DAYTONA_TARGET), + snapshot: optionalString(parsed.DAYTONA_SNAPSHOT), + image: optionalString(parsed.DAYTONA_SANDBOX_IMAGE) ?? "node:20-bookworm", + resources: { + cpu: Number(parsed.DAYTONA_SANDBOX_CPU ?? "2"), + memory: Number(parsed.DAYTONA_SANDBOX_MEMORY ?? "4"), + disk: Number(parsed.DAYTONA_SANDBOX_DISK ?? "8"), + }, + public: daytonaSandboxPublic, + autoStopInterval: Number(parsed.DAYTONA_SANDBOX_AUTO_STOP_INTERVAL ?? "0"), + autoArchiveInterval: Number( + parsed.DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL ?? "10080", + ), + autoDeleteInterval: Number( + parsed.DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL ?? "-1", + ), + signedPreviewExpiresSeconds: Number( + parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400", + ), + workerProxyBaseUrl: + optionalString(parsed.DAYTONA_WORKER_PROXY_BASE_URL) ?? "https://workers.den.openworklabs", + sandboxNamePrefix: + optionalString(parsed.DAYTONA_SANDBOX_NAME_PREFIX) ?? "den-daytona-worker", + volumeNamePrefix: + optionalString(parsed.DAYTONA_VOLUME_NAME_PREFIX) ?? "den-daytona-worker", + workspaceMountPath: + optionalString(parsed.DAYTONA_WORKSPACE_MOUNT_PATH) ?? "/workspace", + dataMountPath: + optionalString(parsed.DAYTONA_DATA_MOUNT_PATH) ?? "/persist/openwork", + runtimeWorkspacePath: + optionalString(parsed.DAYTONA_RUNTIME_WORKSPACE_PATH) ?? + "/tmp/openwork-workspace", + runtimeDataPath: + optionalString(parsed.DAYTONA_RUNTIME_DATA_PATH) ?? "/tmp/openwork-data", + sidecarDir: + optionalString(parsed.DAYTONA_SIDECAR_DIR) ?? "/tmp/openwork-sidecars", + openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"), + opencodePort: Number(parsed.DAYTONA_OPENCODE_PORT ?? "4096"), + openworkVersion: optionalString(parsed.DAYTONA_OPENWORK_VERSION), + createTimeoutSeconds: Number(parsed.DAYTONA_CREATE_TIMEOUT_SECONDS ?? "300"), + deleteTimeoutSeconds: Number(parsed.DAYTONA_DELETE_TIMEOUT_SECONDS ?? "120"), + healthcheckTimeoutMs: Number( + parsed.DAYTONA_HEALTHCHECK_TIMEOUT_MS ?? "300000", + ), + pollIntervalMs: Number(parsed.DAYTONA_POLL_INTERVAL_MS ?? "5000"), + }, +}; diff --git a/services/den-v2/src/http/admin.ts b/services/den-v2/src/http/admin.ts new file mode 100644 index 00000000..cfbdcbbb --- /dev/null +++ b/services/den-v2/src/http/admin.ts @@ -0,0 +1,335 @@ +import express from "express" +import { fromNodeHeaders } from "better-auth/node" +import { asc, desc, eq, isNotNull, sql } from "../db/drizzle.js" +import { ensureAdminAllowlistSeeded } from "../admin-allowlist.js" +import { auth } from "../auth.js" +import { getCloudWorkerAdminBillingStatus } from "../billing/polar.js" +import { db } from "../db/index.js" +import { AdminAllowlistTable, AuthAccountTable, AuthSessionTable, AuthUserTable, WorkerTable } from "../db/schema.js" +import { normalizeDenTypeId } from "../db/typeid.js" +import { asyncRoute } from "./errors.js" + +type UserId = typeof AuthUserTable.$inferSelect.id + +function normalizeEmail(value: string | null | undefined) { + return value?.trim().toLowerCase() ?? "" +} + +function toNumber(value: unknown) { + if (typeof value === "number" && Number.isFinite(value)) { + return value + } + + const parsed = Number(value) + return Number.isFinite(parsed) ? parsed : 0 +} + +function isWithinDays(value: Date | string | null, days: number) { + if (!value) { + return false + } + + const date = value instanceof Date ? value : new Date(value) + if (Number.isNaN(date.getTime())) { + return false + } + + const windowMs = days * 24 * 60 * 60 * 1000 + return Date.now() - date.getTime() <= windowMs +} + +function normalizeProvider(providerId: string) { + const normalized = providerId.trim().toLowerCase() + if (!normalized) { + return "unknown" + } + + if (normalized === "credential" || normalized === "email-password") { + return "email" + } + + return normalized +} + +function parseBooleanQuery(value: unknown): boolean { + if (Array.isArray(value)) { + return value.some((entry) => parseBooleanQuery(entry)) + } + + if (typeof value !== "string") { + return false + } + + const normalized = value.trim().toLowerCase() + return normalized === "1" || normalized === "true" || normalized === "yes" +} + +async function mapWithConcurrency(items: T[], limit: number, mapper: (item: T) => Promise) { + if (items.length === 0) { + return [] as R[] + } + + const results = new Array(items.length) + let nextIndex = 0 + + async function runWorker() { + while (nextIndex < items.length) { + const currentIndex = nextIndex + nextIndex += 1 + results[currentIndex] = await mapper(items[currentIndex]) + } + } + + const workerCount = Math.max(1, Math.min(limit, items.length)) + await Promise.all(Array.from({ length: workerCount }, () => runWorker())) + return results +} + +async function requireAdminSession(req: express.Request, res: express.Response) { + const session = await auth.api.getSession({ + headers: fromNodeHeaders(req.headers), + }) + + if (!session?.user?.id) { + res.status(401).json({ error: "unauthorized" }) + return null + } + + const userId = normalizeDenTypeId("user", session.user.id) + + const email = normalizeEmail(session.user.email) + if (!email) { + res.status(403).json({ error: "admin_email_required" }) + return null + } + + await ensureAdminAllowlistSeeded() + + const allowed = await db + .select({ id: AdminAllowlistTable.id }) + .from(AdminAllowlistTable) + .where(eq(AdminAllowlistTable.email, email)) + .limit(1) + + if (allowed.length === 0) { + res.status(403).json({ error: "forbidden" }) + return null + } + + return { + ...session, + user: { + ...session.user, + id: userId, + }, + } +} + +export const adminRouter = express.Router() + +adminRouter.get("/overview", asyncRoute(async (req, res) => { + const session = await requireAdminSession(req, res) + if (!session) return + const includeBilling = parseBooleanQuery(req.query.includeBilling) + + const [admins, users, workerStatsRows, sessionStatsRows, accountRows] = await Promise.all([ + db + .select({ + email: AdminAllowlistTable.email, + note: AdminAllowlistTable.note, + createdAt: AdminAllowlistTable.created_at, + }) + .from(AdminAllowlistTable) + .orderBy(asc(AdminAllowlistTable.email)), + db.select().from(AuthUserTable).orderBy(desc(AuthUserTable.createdAt)), + db + .select({ + userId: WorkerTable.created_by_user_id, + workerCount: sql`count(*)`, + cloudWorkerCount: sql`sum(case when ${WorkerTable.destination} = 'cloud' then 1 else 0 end)`, + localWorkerCount: sql`sum(case when ${WorkerTable.destination} = 'local' then 1 else 0 end)`, + latestWorkerCreatedAt: sql`max(${WorkerTable.created_at})`, + }) + .from(WorkerTable) + .where(isNotNull(WorkerTable.created_by_user_id)) + .groupBy(WorkerTable.created_by_user_id), + db + .select({ + userId: AuthSessionTable.userId, + sessionCount: sql`count(*)`, + lastSeenAt: sql`max(${AuthSessionTable.updatedAt})`, + }) + .from(AuthSessionTable) + .groupBy(AuthSessionTable.userId), + db + .select({ + userId: AuthAccountTable.userId, + providerId: AuthAccountTable.providerId, + }) + .from(AuthAccountTable), + ]) + + const workerStatsByUser = new Map() + + for (const row of workerStatsRows) { + if (!row.userId) { + continue + } + + workerStatsByUser.set(row.userId, { + workerCount: toNumber(row.workerCount), + cloudWorkerCount: toNumber(row.cloudWorkerCount), + localWorkerCount: toNumber(row.localWorkerCount), + latestWorkerCreatedAt: row.latestWorkerCreatedAt, + }) + } + + const sessionStatsByUser = new Map() + + for (const row of sessionStatsRows) { + sessionStatsByUser.set(row.userId, { + sessionCount: toNumber(row.sessionCount), + lastSeenAt: row.lastSeenAt, + }) + } + + const providersByUser = new Map>() + for (const row of accountRows) { + const providerId = normalizeProvider(row.providerId) + const existing = providersByUser.get(row.userId) ?? new Set() + existing.add(providerId) + providersByUser.set(row.userId, existing) + } + + const defaultBilling = { + status: "unavailable" as const, + featureGateEnabled: false, + subscriptionId: null, + subscriptionStatus: null, + currentPeriodEnd: null, + source: "unavailable" as const, + note: "Billing lookup unavailable.", + } + + const billingRows = includeBilling + ? await mapWithConcurrency(users, 4, async (user) => ({ + userId: user.id, + billing: await getCloudWorkerAdminBillingStatus({ + userId: user.id, + email: user.email, + name: user.name ?? user.email, + }), + })) + : [] + + const billingByUser = new Map(billingRows.map((row) => [row.userId, row.billing])) + + const userRows = users.map((user) => { + const workerStats = workerStatsByUser.get(user.id) ?? { + workerCount: 0, + cloudWorkerCount: 0, + localWorkerCount: 0, + latestWorkerCreatedAt: null, + } + const sessionStats = sessionStatsByUser.get(user.id) ?? { + sessionCount: 0, + lastSeenAt: null, + } + const authProviders = Array.from(providersByUser.get(user.id) ?? []).sort() + + return { + id: user.id, + name: user.name, + email: user.email, + emailVerified: user.emailVerified, + createdAt: user.createdAt, + updatedAt: user.updatedAt, + lastSeenAt: sessionStats.lastSeenAt, + sessionCount: sessionStats.sessionCount, + authProviders, + workerCount: workerStats.workerCount, + cloudWorkerCount: workerStats.cloudWorkerCount, + localWorkerCount: workerStats.localWorkerCount, + latestWorkerCreatedAt: workerStats.latestWorkerCreatedAt, + billing: includeBilling ? billingByUser.get(user.id) ?? defaultBilling : null, + } + }) + + const summary = userRows.reduce( + (accumulator, user) => { + accumulator.totalUsers += 1 + accumulator.totalWorkers += user.workerCount + accumulator.cloudWorkers += user.cloudWorkerCount + accumulator.localWorkers += user.localWorkerCount + + if (user.emailVerified) { + accumulator.verifiedUsers += 1 + } + + if (user.workerCount > 0) { + accumulator.usersWithWorkers += 1 + } + + if (includeBilling && user.billing) { + if (user.billing.status === "paid") { + accumulator.paidUsers += 1 + } else if (user.billing.status === "unpaid") { + accumulator.unpaidUsers += 1 + } else { + accumulator.billingUnavailableUsers += 1 + } + } + + if (isWithinDays(user.createdAt, 7)) { + accumulator.recentUsers7d += 1 + } + + if (isWithinDays(user.createdAt, 30)) { + accumulator.recentUsers30d += 1 + } + + return accumulator + }, + { + totalUsers: 0, + verifiedUsers: 0, + recentUsers7d: 0, + recentUsers30d: 0, + totalWorkers: 0, + cloudWorkers: 0, + localWorkers: 0, + usersWithWorkers: 0, + paidUsers: 0, + unpaidUsers: 0, + billingUnavailableUsers: 0, + }, + ) + + res.json({ + viewer: { + id: session.user.id, + email: session.user.email, + name: session.user.name, + }, + admins, + summary: { + ...summary, + adminCount: admins.length, + billingLoaded: includeBilling, + paidUsers: includeBilling ? summary.paidUsers : null, + unpaidUsers: includeBilling ? summary.unpaidUsers : null, + billingUnavailableUsers: includeBilling ? summary.billingUnavailableUsers : null, + usersWithoutWorkers: summary.totalUsers - summary.usersWithWorkers, + }, + users: userRows, + generatedAt: new Date().toISOString(), + }) +})) diff --git a/services/den-v2/src/http/errors.ts b/services/den-v2/src/http/errors.ts new file mode 100644 index 00000000..c4dd5655 --- /dev/null +++ b/services/den-v2/src/http/errors.ts @@ -0,0 +1,61 @@ +import type { ErrorRequestHandler, NextFunction, Request, RequestHandler, Response } from "express" + +const TRANSIENT_DB_ERROR_CODES = new Set([ + "ECONNRESET", + "EPIPE", + "ETIMEDOUT", + "PROTOCOL_CONNECTION_LOST", + "PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR", +]) + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null +} + +function getErrorCode(error: unknown): string | null { + if (!isRecord(error)) { + return null + } + + if (typeof error.code === "string") { + return error.code + } + + return getErrorCode(error.cause) +} + +export function isTransientDbConnectionError(error: unknown): boolean { + const code = getErrorCode(error) + if (!code) { + return false + } + return TRANSIENT_DB_ERROR_CODES.has(code) +} + +export function asyncRoute( + handler: (req: Request, res: Response, next: NextFunction) => Promise, +): RequestHandler { + return (req, res, next) => { + void handler(req, res, next).catch(next) + } +} + +export const errorMiddleware: ErrorRequestHandler = (error, _req, res, _next) => { + if (res.headersSent) { + return + } + + if (isTransientDbConnectionError(error)) { + const message = error instanceof Error ? error.message : "transient database connection failure" + console.warn(`[http] transient db connection error: ${message}`) + res.status(503).json({ + error: "service_unavailable", + message: "Database connection was interrupted. Please retry.", + }) + return + } + + const message = error instanceof Error ? error.stack ?? error.message : String(error) + console.error(`[http] unhandled error: ${message}`) + res.status(500).json({ error: "internal_error" }) +} diff --git a/services/den-v2/src/http/session.ts b/services/den-v2/src/http/session.ts new file mode 100644 index 00000000..587b83bc --- /dev/null +++ b/services/den-v2/src/http/session.ts @@ -0,0 +1,88 @@ +import type express from "express" +import { fromNodeHeaders } from "better-auth/node" +import { and, eq, gt } from "../db/drizzle.js" +import { auth } from "../auth.js" +import { db } from "../db/index.js" +import { AuthSessionTable, AuthUserTable } from "../db/schema.js" +import { normalizeDenTypeId } from "../db/typeid.js" + +type AuthSessionLike = Awaited> + +function readBearerToken(req: express.Request): string | null { + const header = typeof req.headers.authorization === "string" ? req.headers.authorization.trim() : "" + if (!header) { + return null + } + + const match = header.match(/^Bearer\s+(.+)$/i) + if (!match) { + return null + } + + const token = match[1]?.trim() ?? "" + return token || null +} + +async function getSessionFromBearerToken(token: string): Promise { + const rows = await db + .select({ + session: { + id: AuthSessionTable.id, + token: AuthSessionTable.token, + userId: AuthSessionTable.userId, + expiresAt: AuthSessionTable.expiresAt, + createdAt: AuthSessionTable.createdAt, + updatedAt: AuthSessionTable.updatedAt, + ipAddress: AuthSessionTable.ipAddress, + userAgent: AuthSessionTable.userAgent, + }, + user: { + id: AuthUserTable.id, + name: AuthUserTable.name, + email: AuthUserTable.email, + emailVerified: AuthUserTable.emailVerified, + image: AuthUserTable.image, + createdAt: AuthUserTable.createdAt, + updatedAt: AuthUserTable.updatedAt, + }, + }) + .from(AuthSessionTable) + .innerJoin(AuthUserTable, eq(AuthSessionTable.userId, AuthUserTable.id)) + .where(and(eq(AuthSessionTable.token, token), gt(AuthSessionTable.expiresAt, new Date()))) + .limit(1) + + const row = rows[0] + if (!row) { + return null + } + + return { + session: row.session, + user: { + ...row.user, + id: normalizeDenTypeId("user", row.user.id), + }, + } +} + +export async function getRequestSession(req: express.Request): Promise { + const cookieSession = await auth.api.getSession({ + headers: fromNodeHeaders(req.headers), + }) + if (cookieSession?.user?.id) { + return { + ...cookieSession, + user: { + ...cookieSession.user, + id: normalizeDenTypeId("user", cookieSession.user.id), + }, + } + } + + const bearerToken = readBearerToken(req) + if (!bearerToken) { + return null + } + + return getSessionFromBearerToken(bearerToken) +} diff --git a/services/den-v2/src/http/workers.ts b/services/den-v2/src/http/workers.ts new file mode 100644 index 00000000..017ec8f9 --- /dev/null +++ b/services/den-v2/src/http/workers.ts @@ -0,0 +1,834 @@ +import { randomBytes } from "crypto" +import express from "express" +import { fromNodeHeaders } from "better-auth/node" +import { and, asc, desc, eq, isNull } from "../db/drizzle.js" +import { z } from "zod" +import { auth } from "../auth.js" +// Polar billing is temporarily disabled for the one-worker experiment in hosted mode. +// Keep the old billing integration nearby so it can be restored quickly. +// import { getCloudWorkerBillingStatus, setCloudWorkerSubscriptionCancellation } from "../billing/polar.js" +import { db } from "../db/index.js" +import { AuditEventTable, AuthUserTable, DaytonaSandboxTable, OrgMembershipTable, WorkerBundleTable, WorkerInstanceTable, WorkerTable, WorkerTokenTable } from "../db/schema.js" +import { env } from "../env.js" +import { asyncRoute, isTransientDbConnectionError } from "./errors.js" +import { ensureDefaultOrg } from "../orgs.js" +import { deprovisionWorker, provisionWorker } from "../workers/provisioner.js" +import { customDomainForWorker } from "../workers/vanity-domain.js" +import { createDenTypeId, normalizeDenTypeId } from "../db/typeid.js" + +const createSchema = z.object({ + name: z.string().min(1), + description: z.string().optional(), + destination: z.enum(["local", "cloud"]), + workspacePath: z.string().optional(), + sandboxBackend: z.string().optional(), + imageVersion: z.string().optional(), +}) + +const listSchema = z.object({ + limit: z.coerce.number().int().min(1).max(50).default(20), +}) + +const billingSubscriptionSchema = z.object({ + cancelAtPeriodEnd: z.boolean().default(true), +}) + +const token = () => randomBytes(32).toString("hex") + +type WorkerRow = typeof WorkerTable.$inferSelect +type WorkerInstanceRow = typeof WorkerInstanceTable.$inferSelect +type WorkerId = WorkerRow["id"] +type OrgId = typeof OrgMembershipTable.$inferSelect.org_id +type UserId = typeof AuthUserTable.$inferSelect.id + +function parseWorkerIdParam(value: string): WorkerId { + return normalizeDenTypeId("worker", value) +} + +function parseUserId(value: string): UserId { + return normalizeDenTypeId("user", value) +} + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null +} + +function normalizeUrl(value: string): string { + return value.trim().replace(/\/+$/, "") +} + +function parseWorkspaceSelection(payload: unknown): { workspaceId: string; openworkUrl: string } | null { + if (!isRecord(payload) || !Array.isArray(payload.items)) { + return null + } + + const activeId = typeof payload.activeId === "string" ? payload.activeId : null + let workspaceId = activeId + + if (!workspaceId) { + for (const item of payload.items) { + if (isRecord(item) && typeof item.id === "string" && item.id.trim()) { + workspaceId = item.id + break + } + } + } + + const baseUrl = typeof payload.baseUrl === "string" ? normalizeUrl(payload.baseUrl) : "" + if (!workspaceId || !baseUrl) { + return null + } + + return { + workspaceId, + openworkUrl: `${baseUrl}/w/${encodeURIComponent(workspaceId)}`, + } +} + +async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: string) { + const baseUrl = normalizeUrl(instanceUrl) + if (!baseUrl || !clientToken.trim()) { + return null + } + + try { + const response = await fetch(`${baseUrl}/workspaces`, { + method: "GET", + headers: { + Accept: "application/json", + Authorization: `Bearer ${clientToken.trim()}`, + }, + }) + + if (!response.ok) { + return null + } + + const payload = (await response.json()) as unknown + const selected = parseWorkspaceSelection({ + ...(isRecord(payload) ? payload : {}), + baseUrl, + }) + return selected + } catch { + return null + } +} + +function getConnectUrlCandidates(workerId: WorkerId, instanceUrl: string | null) { + const candidates: string[] = [] + const vanityHostname = customDomainForWorker(workerId, env.render.workerPublicDomainSuffix) + if (vanityHostname) { + candidates.push(`https://${vanityHostname}`) + } + + if (instanceUrl) { + const normalized = normalizeUrl(instanceUrl) + if (normalized && !candidates.includes(normalized)) { + candidates.push(normalized) + } + } + + return candidates +} + +function queryIncludesFlag(value: unknown): boolean { + if (typeof value === "string") { + const normalized = value.trim().toLowerCase() + return normalized === "1" || normalized === "true" || normalized === "yes" + } + + if (Array.isArray(value)) { + return value.some((entry) => queryIncludesFlag(entry)) + } + + return false +} + +async function resolveConnectUrlFromCandidates(workerId: WorkerId, instanceUrl: string | null, clientToken: string) { + const candidates = getConnectUrlCandidates(workerId, instanceUrl) + for (const candidate of candidates) { + const resolved = await resolveConnectUrlFromWorker(candidate, clientToken) + if (resolved) { + return resolved + } + } + return null +} + +async function getWorkerRuntimeAccess(workerId: WorkerId) { + const instance = await getLatestWorkerInstance(workerId) + const tokenRows = await db + .select() + .from(WorkerTokenTable) + .where(and(eq(WorkerTokenTable.worker_id, workerId), isNull(WorkerTokenTable.revoked_at))) + .orderBy(asc(WorkerTokenTable.created_at)) + + const hostToken = tokenRows.find((entry) => entry.scope === "host")?.token ?? null + if (!instance?.url || !hostToken) { + return null + } + + return { + instance, + hostToken, + candidates: getConnectUrlCandidates(workerId, instance.url), + } +} + +async function fetchWorkerRuntimeJson(input: { + workerId: WorkerId + path: string + method?: "GET" | "POST" + body?: unknown +}) { + const access = await getWorkerRuntimeAccess(input.workerId) + if (!access) { + return { + ok: false as const, + status: 409, + payload: { + error: "worker_runtime_unavailable", + message: "Worker runtime access is not ready yet. Wait for provisioning to finish and try again.", + }, + } + } + + let lastPayload: unknown = null + let lastStatus = 502 + + for (const candidate of access.candidates) { + try { + const response = await fetch(`${normalizeUrl(candidate)}${input.path}`, { + method: input.method ?? "GET", + headers: { + Accept: "application/json", + "Content-Type": "application/json", + "X-OpenWork-Host-Token": access.hostToken, + }, + body: input.body === undefined ? undefined : JSON.stringify(input.body), + }) + + const text = await response.text() + lastStatus = response.status + try { + lastPayload = text ? JSON.parse(text) : null + } catch { + lastPayload = text ? { message: text } : null + } + + if (response.ok) { + return { ok: true as const, status: response.status, payload: lastPayload } + } + } catch (error) { + lastPayload = { message: error instanceof Error ? error.message : "worker_request_failed" } + } + } + + return { ok: false as const, status: lastStatus, payload: lastPayload } +} + +async function requireSession(req: express.Request, res: express.Response) { + const session = await auth.api.getSession({ + headers: fromNodeHeaders(req.headers), + }) + if (!session?.user?.id) { + res.status(401).json({ error: "unauthorized" }) + return null + } + return { + ...session, + user: { + ...session.user, + id: parseUserId(session.user.id), + }, + } +} + +async function getOrgId(userId: UserId): Promise { + const membership = await db + .select() + .from(OrgMembershipTable) + .where(eq(OrgMembershipTable.user_id, userId)) + .limit(1) + if (membership.length === 0) { + return null + } + return membership[0].org_id +} + +async function countUserCloudWorkers(userId: UserId) { + const rows = await db + .select({ id: WorkerTable.id }) + .from(WorkerTable) + .where(and(eq(WorkerTable.created_by_user_id, userId), eq(WorkerTable.destination, "cloud"))) + .limit(2) + + return rows.length +} + +function getExperimentBillingSummary() { + return { + featureGateEnabled: false, + hasActivePlan: false, + checkoutRequired: false, + checkoutUrl: null, + portalUrl: null, + price: null, + subscription: null, + invoices: [], + productId: env.polar.productId, + benefitId: env.polar.benefitId, + } +} + +async function getLatestWorkerInstance(workerId: WorkerId) { + for (let attempt = 0; attempt < 2; attempt += 1) { + try { + const rows = await db + .select() + .from(WorkerInstanceTable) + .where(eq(WorkerInstanceTable.worker_id, workerId)) + .orderBy(desc(WorkerInstanceTable.created_at)) + .limit(1) + + return rows[0] ?? null + } catch (error) { + if (!isTransientDbConnectionError(error)) { + throw error + } + + if (attempt === 0) { + console.warn(`[workers] transient db error reading instance for ${workerId}; retrying`) + continue + } + + console.warn(`[workers] transient db error reading instance for ${workerId}; returning null instance`) + return null + } + } + + return null +} + +function toInstanceResponse(instance: WorkerInstanceRow | null) { + if (!instance) { + return null + } + + return { + provider: instance.provider, + region: instance.region, + url: instance.url, + status: instance.status, + createdAt: instance.created_at, + updatedAt: instance.updated_at, + } +} + +function toWorkerResponse(row: WorkerRow, userId: string) { + return { + id: row.id, + orgId: row.org_id, + createdByUserId: row.created_by_user_id, + isMine: row.created_by_user_id === userId, + name: row.name, + description: row.description, + destination: row.destination, + status: row.status, + imageVersion: row.image_version, + workspacePath: row.workspace_path, + sandboxBackend: row.sandbox_backend, + createdAt: row.created_at, + updatedAt: row.updated_at, + } +} + +async function continueCloudProvisioning(input: { workerId: WorkerId; name: string; hostToken: string; clientToken: string }) { + try { + const provisioned = await provisionWorker({ + workerId: input.workerId, + name: input.name, + hostToken: input.hostToken, + clientToken: input.clientToken, + }) + + await db + .update(WorkerTable) + .set({ status: provisioned.status }) + .where(eq(WorkerTable.id, input.workerId)) + + await db.insert(WorkerInstanceTable).values({ + id: createDenTypeId("workerInstance"), + worker_id: input.workerId, + provider: provisioned.provider, + region: provisioned.region, + url: provisioned.url, + status: provisioned.status, + }) + } catch (error) { + await db + .update(WorkerTable) + .set({ status: "failed" }) + .where(eq(WorkerTable.id, input.workerId)) + + const message = error instanceof Error ? error.message : "provisioning_failed" + console.error(`[workers] provisioning failed for ${input.workerId}: ${message}`) + } +} + +export const workersRouter = express.Router() + +workersRouter.get("/", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.json({ workers: [] }) + return + } + + const parsed = listSchema.safeParse({ limit: req.query.limit }) + if (!parsed.success) { + res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(eq(WorkerTable.org_id, orgId)) + .orderBy(desc(WorkerTable.created_at)) + .limit(parsed.data.limit) + + const workers = await Promise.all( + rows.map(async (row) => { + const instance = await getLatestWorkerInstance(row.id) + return { + ...toWorkerResponse(row, session.user.id), + instance: toInstanceResponse(instance), + } + }), + ) + + res.json({ workers }) +})) + +workersRouter.post("/", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const parsed = createSchema.safeParse(req.body) + if (!parsed.success) { + res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() }) + return + } + + if (parsed.data.destination === "local" && !parsed.data.workspacePath) { + res.status(400).json({ error: "workspace_path_required" }) + return + } + + if (parsed.data.destination === "cloud" && !env.devMode && (await countUserCloudWorkers(session.user.id)) > 0) { + // Polar is temporarily disabled for this experiment. + // Keep the previous paywall block nearby so it can be restored quickly. + // + // const access = await requireCloudWorkerAccess({ + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // }) + // if (!access.allowed) { + // res.status(402).json({ + // error: "payment_required", + // message: "Additional cloud workers require an active Den Cloud plan.", + // polar: { + // checkoutUrl: access.checkoutUrl, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) + // return + // } + + res.status(409).json({ + error: "worker_limit_reached", + message: "You can only create one cloud worker during this experiment.", + }) + return + } + + const orgId = + (await getOrgId(session.user.id)) ?? (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal")) + const workerId = createDenTypeId("worker") + let workerStatus: WorkerRow["status"] = parsed.data.destination === "cloud" ? "provisioning" : "healthy" + + await db.insert(WorkerTable).values({ + id: workerId, + org_id: orgId, + created_by_user_id: session.user.id, + name: parsed.data.name, + description: parsed.data.description, + destination: parsed.data.destination, + status: workerStatus, + image_version: parsed.data.imageVersion, + workspace_path: parsed.data.workspacePath, + sandbox_backend: parsed.data.sandboxBackend, + }) + + const hostToken = token() + const clientToken = token() + await db.insert(WorkerTokenTable).values([ + { + id: createDenTypeId("workerToken"), + worker_id: workerId, + scope: "host", + token: hostToken, + }, + { + id: createDenTypeId("workerToken"), + worker_id: workerId, + scope: "client", + token: clientToken, + }, + ]) + + if (parsed.data.destination === "cloud") { + void continueCloudProvisioning({ + workerId, + name: parsed.data.name, + hostToken, + clientToken, + }) + } + + res.status(parsed.data.destination === "cloud" ? 202 : 201).json({ + worker: toWorkerResponse( + { + id: workerId, + org_id: orgId, + created_by_user_id: session.user.id, + name: parsed.data.name, + description: parsed.data.description ?? null, + destination: parsed.data.destination, + status: workerStatus, + image_version: parsed.data.imageVersion ?? null, + workspace_path: parsed.data.workspacePath ?? null, + sandbox_backend: parsed.data.sandboxBackend ?? null, + created_at: new Date(), + updated_at: new Date(), + }, + session.user.id, + ), + tokens: { + host: hostToken, + client: clientToken, + }, + instance: null, + launch: parsed.data.destination === "cloud" ? { mode: "async", pollAfterMs: 5000 } : { mode: "instant", pollAfterMs: 0 }, + }) +})) + +workersRouter.get("/billing", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + res.json({ + billing: getExperimentBillingSummary(), + }) + + // Polar billing is temporarily disabled for the one-worker experiment. + // const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout) + // const includePortalUrl = !queryIncludesFlag(req.query.excludePortal) + // const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices) + // + // const billingInput = { + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // } + // + // const billing = await getCloudWorkerBillingStatus( + // billingInput, + // { + // includeCheckoutUrl, + // includePortalUrl, + // includeInvoices, + // }, + // ) + // + // res.json({ + // billing: { + // ...billing, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) +})) + +workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const parsed = billingSubscriptionSchema.safeParse(req.body ?? {}) + if (!parsed.success) { + res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() }) + return + } + + res.json({ + subscription: null, + billing: getExperimentBillingSummary(), + }) + + // Polar billing is temporarily disabled for the one-worker experiment. + // const billingInput = { + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // } + // + // const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd) + // const billing = await getCloudWorkerBillingStatus(billingInput, { + // includeCheckoutUrl: false, + // includePortalUrl: true, + // includeInvoices: true, + // }) + // + // res.json({ + // subscription, + // billing: { + // ...billing, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) +})) + +workersRouter.get("/:id", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) + .limit(1) + + if (rows.length === 0) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const instance = await getLatestWorkerInstance(rows[0].id) + + res.json({ + worker: toWorkerResponse(rows[0], session.user.id), + instance: toInstanceResponse(instance), + }) +})) + +workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(eq(WorkerTable.id, workerId)) + .limit(1) + + if (rows.length === 0 || rows[0].org_id !== orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const tokenRows = await db + .select() + .from(WorkerTokenTable) + .where(and(eq(WorkerTokenTable.worker_id, rows[0].id), isNull(WorkerTokenTable.revoked_at))) + .orderBy(asc(WorkerTokenTable.created_at)) + + const hostToken = tokenRows.find((entry) => entry.scope === "host")?.token ?? null + const clientToken = tokenRows.find((entry) => entry.scope === "client")?.token ?? null + + if (!hostToken || !clientToken) { + res.status(409).json({ + error: "worker_tokens_unavailable", + message: "Worker tokens are missing for this worker. Launch a new worker and try again.", + }) + return + } + + const instance = await getLatestWorkerInstance(rows[0].id) + const connect = await resolveConnectUrlFromCandidates(rows[0].id, instance?.url ?? null, clientToken) + + res.json({ + tokens: { + host: hostToken, + client: clientToken, + }, + connect: connect ?? (instance?.url ? { openworkUrl: instance.url, workspaceId: null } : null), + }) +})) + +workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) + .limit(1) + + if (rows.length === 0) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const runtime = await fetchWorkerRuntimeJson({ + workerId: rows[0].id, + path: "/runtime/versions", + }) + + res.status(runtime.status).json(runtime.payload) +})) + +workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) + .limit(1) + + if (rows.length === 0) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const runtime = await fetchWorkerRuntimeJson({ + workerId: rows[0].id, + path: "/runtime/upgrade", + method: "POST", + body: req.body ?? {}, + }) + + res.status(runtime.status).json(runtime.payload) +})) + +workersRouter.delete("/:id", asyncRoute(async (req, res) => { + const session = await requireSession(req, res) + if (!session) return + + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const rows = await db + .select() + .from(WorkerTable) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) + .limit(1) + + if (rows.length === 0) { + res.status(404).json({ error: "worker_not_found" }) + return + } + + const worker = rows[0] + const instance = await getLatestWorkerInstance(worker.id) + + if (worker.destination === "cloud") { + try { + await deprovisionWorker({ + workerId: worker.id, + instanceUrl: instance?.url ?? null, + }) + } catch (error) { + const message = error instanceof Error ? error.message : "deprovision_failed" + console.warn(`[workers] deprovision warning for ${worker.id}: ${message}`) + } + } + + await db.transaction(async (tx) => { + await tx.delete(WorkerTokenTable).where(eq(WorkerTokenTable.worker_id, worker.id)) + await tx.delete(DaytonaSandboxTable).where(eq(DaytonaSandboxTable.worker_id, worker.id)) + await tx.delete(WorkerInstanceTable).where(eq(WorkerInstanceTable.worker_id, worker.id)) + await tx.delete(WorkerBundleTable).where(eq(WorkerBundleTable.worker_id, worker.id)) + await tx.delete(AuditEventTable).where(eq(AuditEventTable.worker_id, worker.id)) + await tx.delete(WorkerTable).where(eq(WorkerTable.id, worker.id)) + }) + + res.status(204).end() +})) diff --git a/services/den-v2/src/index.ts b/services/den-v2/src/index.ts new file mode 100644 index 00000000..1ec9f30d --- /dev/null +++ b/services/den-v2/src/index.ts @@ -0,0 +1,67 @@ +import "./load-env.js" +import cors from "cors" +import express from "express" +import path from "node:path" +import { fileURLToPath } from "node:url" +import { toNodeHandler } from "better-auth/node" +import { auth } from "./auth.js" +import { env } from "./env.js" +import { adminRouter } from "./http/admin.js" +import { asyncRoute, errorMiddleware } from "./http/errors.js" +import { getRequestSession } from "./http/session.js" +import { workersRouter } from "./http/workers.js" +import { normalizeDenTypeId } from "./db/typeid.js" +import { listUserOrgs } from "./orgs.js" + +const app = express() +const currentFile = fileURLToPath(import.meta.url) +const publicDir = path.resolve(path.dirname(currentFile), "../public") + +if (env.corsOrigins.length > 0) { + app.use( + cors({ + origin: env.corsOrigins, + credentials: true, + methods: ["GET", "POST", "PATCH", "DELETE"], + }), + ) +} + +app.use(express.json()) +app.all("/api/auth/*", toNodeHandler(auth)) +app.use(express.static(publicDir)) + +app.get("/health", (_, res) => { + res.json({ ok: true }) +}) + +app.get("/v1/me", asyncRoute(async (req, res) => { + const session = await getRequestSession(req) + if (!session?.user?.id) { + res.status(401).json({ error: "unauthorized" }) + return + } + res.json(session) +})) + +app.get("/v1/me/orgs", asyncRoute(async (req, res) => { + const session = await getRequestSession(req) + if (!session?.user?.id) { + res.status(401).json({ error: "unauthorized" }) + return + } + + const orgs = await listUserOrgs(normalizeDenTypeId("user", session.user.id)) + res.json({ + orgs, + defaultOrgId: orgs[0]?.id ?? null, + }) +})) + +app.use("/v1/admin", adminRouter) +app.use("/v1/workers", workersRouter) +app.use(errorMiddleware) + +app.listen(env.port, () => { + console.log(`den listening on ${env.port} (provisioner=${env.provisionerMode})`) +}) diff --git a/services/den-v2/src/load-env.ts b/services/den-v2/src/load-env.ts new file mode 100644 index 00000000..ef46b39f --- /dev/null +++ b/services/den-v2/src/load-env.ts @@ -0,0 +1,45 @@ +import { existsSync } from "node:fs" +import path from "node:path" +import { fileURLToPath } from "node:url" +import dotenv from "dotenv" + +function findUpwards(startDir: string, fileName: string, maxDepth = 8) { + let current = startDir + + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = path.join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + + const parent = path.dirname(current) + if (parent === current) { + break + } + current = parent + } + + return null +} + +const srcDir = path.dirname(fileURLToPath(import.meta.url)) +const serviceDir = path.resolve(srcDir, "..") + +for (const filePath of [ + path.join(serviceDir, ".env.local"), + path.join(serviceDir, ".env"), +]) { + if (existsSync(filePath)) { + dotenv.config({ path: filePath, override: false }) + } +} + +const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() +const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona") +const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath + +if (daytonaEnvPath && existsSync(daytonaEnvPath)) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +dotenv.config({ override: false }) diff --git a/services/den-v2/src/orgs.ts b/services/den-v2/src/orgs.ts new file mode 100644 index 00000000..845884d6 --- /dev/null +++ b/services/den-v2/src/orgs.ts @@ -0,0 +1,65 @@ +import { eq } from "./db/drizzle.js" +import { db } from "./db/index.js" +import { AuthUserTable, OrgMembershipTable, OrgTable } from "./db/schema.js" +import { createDenTypeId } from "./db/typeid.js" + +type UserId = typeof AuthUserTable.$inferSelect.id +type OrgId = typeof OrgTable.$inferSelect.id + +export async function ensureDefaultOrg(userId: UserId, name: string): Promise { + const existing = await db + .select() + .from(OrgMembershipTable) + .where(eq(OrgMembershipTable.user_id, userId)) + .limit(1) + + if (existing.length > 0) { + return existing[0].org_id + } + + const orgId = createDenTypeId("org") + const slug = `personal-${orgId.slice(0, 8)}` + await db.insert(OrgTable).values({ + id: orgId, + name, + slug, + owner_user_id: userId, + }) + await db.insert(OrgMembershipTable).values({ + id: createDenTypeId("orgMembership"), + org_id: orgId, + user_id: userId, + role: "owner", + }) + return orgId +} + +export async function listUserOrgs(userId: UserId) { + const memberships = await db + .select({ + membershipId: OrgMembershipTable.id, + role: OrgMembershipTable.role, + org: { + id: OrgTable.id, + name: OrgTable.name, + slug: OrgTable.slug, + ownerUserId: OrgTable.owner_user_id, + createdAt: OrgTable.created_at, + updatedAt: OrgTable.updated_at, + }, + }) + .from(OrgMembershipTable) + .innerJoin(OrgTable, eq(OrgMembershipTable.org_id, OrgTable.id)) + .where(eq(OrgMembershipTable.user_id, userId)) + + return memberships.map((row) => ({ + id: row.org.id, + name: row.org.name, + slug: row.org.slug, + ownerUserId: row.org.ownerUserId, + role: row.role, + membershipId: row.membershipId, + createdAt: row.org.createdAt, + updatedAt: row.org.updatedAt, + })) +} diff --git a/services/den-v2/src/workers/daytona.ts b/services/den-v2/src/workers/daytona.ts new file mode 100644 index 00000000..f97db0ab --- /dev/null +++ b/services/den-v2/src/workers/daytona.ts @@ -0,0 +1,484 @@ +import { Daytona, type Sandbox } from "@daytonaio/sdk" +import { eq } from "../db/drizzle.js" +import { db } from "../db/index.js" +import { DaytonaSandboxTable } from "../db/schema.js" +import { createDenTypeId } from "../db/typeid.js" +import { env } from "../env.js" + +type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id + +type ProvisionInput = { + workerId: WorkerId + name: string + hostToken: string + clientToken: string +} + +type ProvisionedInstance = { + provider: string + url: string + status: "provisioning" | "healthy" + region?: string +} + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) +const maxSignedPreviewExpirySeconds = 60 * 60 * 24 +const signedPreviewRefreshLeadMs = 5 * 60 * 1000 + +const slug = (value: string) => + value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, "") + +function shellQuote(value: string) { + return `'${value.replace(/'/g, `'"'"'`)}'` +} + +function createDaytonaClient() { + return new Daytona({ + apiKey: env.daytona.apiKey, + apiUrl: env.daytona.apiUrl, + ...(env.daytona.target ? { target: env.daytona.target } : {}), + }) +} + +function normalizedSignedPreviewExpirySeconds() { + return Math.max( + 1, + Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds), + ) +} + +function signedPreviewRefreshAt(expiresInSeconds: number) { + return new Date( + Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs), + ) +} + +function workerProxyUrl(workerId: WorkerId) { + return `${env.daytona.workerProxyBaseUrl.replace(/\/+$/, "")}/${encodeURIComponent(workerId)}` +} + +function assertDaytonaConfig() { + if (!env.daytona.apiKey) { + throw new Error("DAYTONA_API_KEY is required for daytona provisioner") + } +} + +function workerHint(workerId: WorkerId) { + return workerId.replace(/-/g, "").slice(0, 12) +} + +function sandboxLabels(workerId: WorkerId) { + return { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + } +} + +function sandboxName(input: ProvisionInput) { + return slug( + `${env.daytona.sandboxNamePrefix}-${input.name}-${workerHint(input.workerId)}`, + ).slice(0, 63) +} + +function workspaceVolumeName(workerId: WorkerId) { + return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-workspace`).slice(0, 63) +} + +function dataVolumeName(workerId: WorkerId) { + return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-data`).slice(0, 63) +} + +function buildOpenWorkStartCommand(input: ProvisionInput) { + const orchestratorPackage = env.daytona.openworkVersion?.trim() + ? `openwork-orchestrator@${env.daytona.openworkVersion.trim()}` + : "openwork-orchestrator" + const installStep = [ + `if ! command -v openwork >/dev/null 2>&1; then npm install -g ${shellQuote(orchestratorPackage)}; fi`, + "if ! command -v opencode >/dev/null 2>&1; then echo 'opencode binary missing from Daytona runtime; bake it into the snapshot image and expose it on PATH' >&2; exit 1; fi", + ].join("; ") + const openworkServe = [ + "OPENWORK_DATA_DIR=", + shellQuote(env.daytona.runtimeDataPath), + " OPENWORK_SIDECAR_DIR=", + shellQuote(env.daytona.sidecarDir), + " OPENWORK_TOKEN=", + shellQuote(input.clientToken), + " OPENWORK_HOST_TOKEN=", + shellQuote(input.hostToken), + " openwork serve", + ` --workspace ${shellQuote(env.daytona.runtimeWorkspacePath)}`, + ` --openwork-host 0.0.0.0`, + ` --openwork-port ${env.daytona.openworkPort}`, + ` --opencode-host 127.0.0.1`, + ` --opencode-port ${env.daytona.opencodePort}`, + ` --connect-host 127.0.0.1`, + ` --cors '*'`, + ` --approval manual`, + ` --allow-external`, + ` --opencode-source external`, + ` --opencode-bin $(command -v opencode)`, + ` --no-opencode-router`, + ` --verbose`, + ].join("") + + const script = ` +set -u +mkdir -p ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(env.daytona.runtimeWorkspacePath)} ${shellQuote(env.daytona.runtimeDataPath)} ${shellQuote(env.daytona.sidecarDir)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes`)} +ln -sfn ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/workspace`) } +ln -sfn ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/data`) } +${installStep} +attempt=0 +while [ "$attempt" -lt 3 ]; do + attempt=$((attempt + 1)) + if ${openworkServe}; then + exit 0 + fi + status=$? + echo "openwork serve failed (attempt $attempt, exit $status); retrying in 3s" + sleep 3 +done +exit 1 +`.trim() + + return `sh -lc ${shellQuote(script)}` +} + +async function waitForVolumeReady(daytona: Daytona, name: string, timeoutMs: number) { + const startedAt = Date.now() + + while (Date.now() - startedAt < timeoutMs) { + const volume = await daytona.volume.get(name) + if (volume.state === "ready") { + return volume + } + await sleep(env.daytona.pollIntervalMs) + } + + throw new Error(`Timed out waiting for Daytona volume ${name} to become ready`) +} + +async function waitForHealth(url: string, timeoutMs: number, sandbox: Sandbox, sessionId: string, commandId: string) { + const startedAt = Date.now() + + while (Date.now() - startedAt < timeoutMs) { + try { + const response = await fetch(`${url.replace(/\/$/, "")}/health`, { method: "GET" }) + if (response.ok) { + return + } + } catch { + // ignore transient startup failures + } + + try { + const command = await sandbox.process.getSessionCommand(sessionId, commandId) + if (typeof command.exitCode === "number" && command.exitCode !== 0) { + const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId) + throw new Error( + [ + `openwork session exited with ${command.exitCode}`, + logs.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "", + logs.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "", + ] + .filter(Boolean) + .join("\n\n"), + ) + } + } catch (error) { + if (error instanceof Error && error.message.startsWith("openwork session exited")) { + throw error + } + } + + await sleep(env.daytona.pollIntervalMs) + } + + const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId).catch( + () => null, + ) + throw new Error( + [ + `Timed out waiting for Daytona worker health at ${url.replace(/\/$/, "")}/health`, + logs?.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "", + logs?.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "", + ] + .filter(Boolean) + .join("\n\n"), + ) +} + +async function upsertDaytonaSandbox(input: { + workerId: WorkerId + sandboxId: string + workspaceVolumeId: string + dataVolumeId: string + signedPreviewUrl: string + signedPreviewUrlExpiresAt: Date + region: string | null +}) { + const existing = await db + .select({ id: DaytonaSandboxTable.id }) + .from(DaytonaSandboxTable) + .where(eq(DaytonaSandboxTable.worker_id, input.workerId)) + .limit(1) + + if (existing.length > 0) { + await db + .update(DaytonaSandboxTable) + .set({ + sandbox_id: input.sandboxId, + workspace_volume_id: input.workspaceVolumeId, + data_volume_id: input.dataVolumeId, + signed_preview_url: input.signedPreviewUrl, + signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt, + region: input.region, + }) + .where(eq(DaytonaSandboxTable.worker_id, input.workerId)) + return + } + + await db.insert(DaytonaSandboxTable).values({ + id: createDenTypeId("daytonaSandbox"), + worker_id: input.workerId, + sandbox_id: input.sandboxId, + workspace_volume_id: input.workspaceVolumeId, + data_volume_id: input.dataVolumeId, + signed_preview_url: input.signedPreviewUrl, + signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt, + region: input.region, + }) +} + +export async function getDaytonaSandboxRecord(workerId: WorkerId) { + const rows = await db + .select() + .from(DaytonaSandboxTable) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + .limit(1) + + return rows[0] ?? null +} + +export async function refreshDaytonaSignedPreview(workerId: WorkerId) { + assertDaytonaConfig() + + const record = await getDaytonaSandboxRecord(workerId) + if (!record) { + return null + } + + const daytona = createDaytonaClient() + const sandbox = await daytona.get(record.sandbox_id) + await sandbox.refreshData() + + const expiresInSeconds = normalizedSignedPreviewExpirySeconds() + const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds) + const expiresAt = signedPreviewRefreshAt(expiresInSeconds) + + await db + .update(DaytonaSandboxTable) + .set({ + signed_preview_url: preview.url, + signed_preview_url_expires_at: expiresAt, + region: sandbox.target, + }) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + + return { + ...record, + signed_preview_url: preview.url, + signed_preview_url_expires_at: expiresAt, + region: sandbox.target, + } +} + +export async function getDaytonaSignedPreviewForProxy(workerId: WorkerId) { + const record = await getDaytonaSandboxRecord(workerId) + if (!record) { + return null + } + + if (record.signed_preview_url_expires_at.getTime() > Date.now()) { + return record.signed_preview_url + } + + const refreshed = await refreshDaytonaSignedPreview(workerId) + return refreshed?.signed_preview_url ?? null +} + +export async function provisionWorkerOnDaytona( + input: ProvisionInput, +): Promise { + assertDaytonaConfig() + + const daytona = createDaytonaClient() + const labels = sandboxLabels(input.workerId) + const workspaceVolumeNameValue = workspaceVolumeName(input.workerId) + const dataVolumeNameValue = dataVolumeName(input.workerId) + await daytona.volume.get(workspaceVolumeNameValue, true) + await daytona.volume.get(dataVolumeNameValue, true) + const workspaceVolume = await waitForVolumeReady( + daytona, + workspaceVolumeNameValue, + env.daytona.createTimeoutSeconds * 1000, + ) + const dataVolume = await waitForVolumeReady( + daytona, + dataVolumeNameValue, + env.daytona.createTimeoutSeconds * 1000, + ) + let sandbox: Awaited> | null = null + + try { + sandbox = env.daytona.snapshot + ? await daytona.create( + { + name: sandboxName(input), + snapshot: env.daytona.snapshot, + autoStopInterval: env.daytona.autoStopInterval, + autoArchiveInterval: env.daytona.autoArchiveInterval, + autoDeleteInterval: env.daytona.autoDeleteInterval, + public: env.daytona.public, + labels, + envVars: { + DEN_WORKER_ID: input.workerId, + }, + volumes: [ + { + volumeId: workspaceVolume.id, + mountPath: env.daytona.workspaceMountPath, + }, + { + volumeId: dataVolume.id, + mountPath: env.daytona.dataMountPath, + }, + ], + }, + { timeout: env.daytona.createTimeoutSeconds }, + ) + : await daytona.create( + { + name: sandboxName(input), + image: env.daytona.image, + autoStopInterval: env.daytona.autoStopInterval, + autoArchiveInterval: env.daytona.autoArchiveInterval, + autoDeleteInterval: env.daytona.autoDeleteInterval, + public: env.daytona.public, + labels, + envVars: { + DEN_WORKER_ID: input.workerId, + }, + resources: { + cpu: env.daytona.resources.cpu, + memory: env.daytona.resources.memory, + disk: env.daytona.resources.disk, + }, + volumes: [ + { + volumeId: workspaceVolume.id, + mountPath: env.daytona.workspaceMountPath, + }, + { + volumeId: dataVolume.id, + mountPath: env.daytona.dataMountPath, + }, + ], + }, + { timeout: env.daytona.createTimeoutSeconds }, + ) + + const sessionId = `openwork-${workerHint(input.workerId)}` + await sandbox.process.createSession(sessionId) + const command = await sandbox.process.executeSessionCommand( + sessionId, + { + command: buildOpenWorkStartCommand(input), + runAsync: true, + }, + 0, + ) + + const expiresInSeconds = normalizedSignedPreviewExpirySeconds() + const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds) + await waitForHealth(preview.url, env.daytona.healthcheckTimeoutMs, sandbox, sessionId, command.cmdId) + await upsertDaytonaSandbox({ + workerId: input.workerId, + sandboxId: sandbox.id, + workspaceVolumeId: workspaceVolume.id, + dataVolumeId: dataVolume.id, + signedPreviewUrl: preview.url, + signedPreviewUrlExpiresAt: signedPreviewRefreshAt(expiresInSeconds), + region: sandbox.target ?? null, + }) + + return { + provider: "daytona", + url: workerProxyUrl(input.workerId), + status: "healthy", + region: sandbox.target, + } + } catch (error) { + if (sandbox) { + await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch(() => {}) + } + await daytona.volume.delete(workspaceVolume).catch(() => {}) + await daytona.volume.delete(dataVolume).catch(() => {}) + throw error + } +} + +export async function deprovisionWorkerOnDaytona(workerId: WorkerId) { + assertDaytonaConfig() + + const daytona = createDaytonaClient() + const record = await getDaytonaSandboxRecord(workerId) + + if (record) { + try { + const sandbox = await daytona.get(record.sandbox_id) + await sandbox.delete(env.daytona.deleteTimeoutSeconds) + } catch (error) { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona sandbox ${record.sandbox_id}: ${message}`) + } + + const volumes = await daytona.volume.list().catch(() => []) + for (const volumeId of [record.workspace_volume_id, record.data_volume_id]) { + const volume = volumes.find((entry) => entry.id === volumeId) + if (!volume) { + continue + } + await daytona.volume.delete(volume).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona volume ${volumeId}: ${message}`) + }) + } + + return + } + + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + + for (const sandbox of sandboxes.items) { + await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona sandbox ${sandbox.id}: ${message}`) + }) + } + + const volumes = await daytona.volume.list() + for (const name of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) { + const volume = volumes.find((entry) => entry.name === name) + if (!volume) { + continue + } + await daytona.volume.delete(volume).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona volume ${name}: ${message}`) + }) + } +} diff --git a/services/den-v2/src/workers/provisioner.ts b/services/den-v2/src/workers/provisioner.ts new file mode 100644 index 00000000..166839db --- /dev/null +++ b/services/den-v2/src/workers/provisioner.ts @@ -0,0 +1,405 @@ +import { env } from "../env.js"; +import { WorkerTable } from "../db/schema.js"; +import { + deprovisionWorkerOnDaytona, + provisionWorkerOnDaytona, +} from "./daytona.js"; +import { + customDomainForWorker, + ensureVercelDnsRecord, +} from "./vanity-domain.js"; + +type WorkerId = typeof WorkerTable.$inferSelect.id; + +export type ProvisionInput = { + workerId: WorkerId; + name: string; + hostToken: string; + clientToken: string; +}; + +export type ProvisionedInstance = { + provider: string; + url: string; + status: "provisioning" | "healthy"; + region?: string; +}; + +type RenderService = { + id: string; + name?: string; + slug?: string; + serviceDetails?: { + url?: string; + region?: string; + }; +}; + +type RenderServiceListRow = { + cursor?: string; + service?: RenderService; +}; + +type RenderDeploy = { + id: string; + status: string; +}; + +const terminalDeployStates = new Set([ + "live", + "update_failed", + "build_failed", + "canceled", +]); + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); + +const slug = (value: string) => + value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, ""); + +const hostFromUrl = (value: string | null | undefined) => { + if (!value) { + return ""; + } + + try { + return new URL(value).host.toLowerCase(); + } catch { + return ""; + } +}; + +async function renderRequest( + path: string, + init: RequestInit = {}, +): Promise { + const headers = new Headers(init.headers); + headers.set("Authorization", `Bearer ${env.render.apiKey}`); + headers.set("Accept", "application/json"); + + if (init.body && !headers.has("Content-Type")) { + headers.set("Content-Type", "application/json"); + } + + const response = await fetch(`${env.render.apiBase}${path}`, { + ...init, + headers, + }); + const text = await response.text(); + + if (!response.ok) { + throw new Error( + `Render API ${path} failed (${response.status}): ${text.slice(0, 400)}`, + ); + } + + if (!text) { + return null as T; + } + + return JSON.parse(text) as T; +} + +async function waitForDeployLive(serviceId: string) { + const startedAt = Date.now(); + let latest: RenderDeploy | null = null; + + while (Date.now() - startedAt < env.render.provisionTimeoutMs) { + const rows = await renderRequest>( + `/services/${serviceId}/deploys?limit=1`, + ); + latest = rows[0]?.deploy ?? null; + + if (latest && terminalDeployStates.has(latest.status)) { + if (latest.status !== "live") { + throw new Error( + `Render deploy ${latest.id} ended with ${latest.status}`, + ); + } + return latest; + } + + await sleep(env.render.pollIntervalMs); + } + + throw new Error( + `Timed out waiting for Render deploy for service ${serviceId}`, + ); +} + +async function waitForHealth( + url: string, + timeoutMs = env.render.healthcheckTimeoutMs, +) { + const healthUrl = `${url.replace(/\/$/, "")}/health`; + const startedAt = Date.now(); + + while (Date.now() - startedAt < timeoutMs) { + try { + const response = await fetch(healthUrl, { method: "GET" }); + if (response.ok) { + return; + } + } catch { + // ignore transient network failures while the instance boots + } + await sleep(env.render.pollIntervalMs); + } + + throw new Error(`Timed out waiting for worker health endpoint ${healthUrl}`); +} + +async function listRenderServices(limit = 200) { + const rows: RenderService[] = []; + let cursor: string | undefined; + + while (rows.length < limit) { + const query = new URLSearchParams({ limit: "100" }); + if (cursor) { + query.set("cursor", cursor); + } + + const page = await renderRequest( + `/services?${query.toString()}`, + ); + if (page.length === 0) { + break; + } + + rows.push( + ...page + .map((entry) => entry.service) + .filter((entry): entry is RenderService => Boolean(entry?.id)), + ); + + const nextCursor = page[page.length - 1]?.cursor; + if (!nextCursor || nextCursor === cursor) { + break; + } + + cursor = nextCursor; + } + + return rows.slice(0, limit); +} + +async function attachRenderCustomDomain( + serviceId: string, + workerId: string, + renderUrl: string, +) { + const hostname = customDomainForWorker( + workerId, + env.render.workerPublicDomainSuffix, + ); + if (!hostname) { + return null; + } + + try { + await renderRequest(`/services/${serviceId}/custom-domains`, { + method: "POST", + body: JSON.stringify({ + name: hostname, + }), + }); + + const dnsReady = await ensureVercelDnsRecord({ + hostname, + targetUrl: renderUrl, + domain: env.vercel.dnsDomain ?? env.render.workerPublicDomainSuffix, + apiBase: env.vercel.apiBase, + token: env.vercel.token, + teamId: env.vercel.teamId, + teamSlug: env.vercel.teamSlug, + }); + + if (!dnsReady) { + console.warn( + `[provisioner] vanity dns upsert skipped or failed for ${hostname}; using Render URL fallback`, + ); + return null; + } + + return `https://${hostname}`; + } catch (error) { + const message = error instanceof Error ? error.message : "unknown_error"; + console.warn( + `[provisioner] custom domain attach failed for ${serviceId}: ${message}`, + ); + return null; + } +} + +function assertRenderConfig() { + if (!env.render.apiKey) { + throw new Error("RENDER_API_KEY is required for render provisioner"); + } + if (!env.render.ownerId) { + throw new Error("RENDER_OWNER_ID is required for render provisioner"); + } +} + +async function provisionWorkerOnRender( + input: ProvisionInput, +): Promise { + assertRenderConfig(); + + const serviceName = slug( + `${env.render.workerNamePrefix}-${input.name}-${input.workerId.slice(0, 8)}`, + ).slice(0, 62); + const orchestratorPackage = env.render.workerOpenworkVersion?.trim() + ? `openwork-orchestrator@${env.render.workerOpenworkVersion.trim()}` + : "openwork-orchestrator"; + const buildCommand = [ + `npm install -g ${orchestratorPackage}`, + "node ./scripts/install-opencode.mjs", + ].join(" && "); + const startCommand = [ + "mkdir -p /tmp/workspace", + "attempt=0; while [ $attempt -lt 3 ]; do attempt=$((attempt + 1)); openwork serve --workspace /tmp/workspace --openwork-host 0.0.0.0 --openwork-port ${PORT:-10000} --opencode-host 127.0.0.1 --opencode-port 4096 --connect-host 127.0.0.1 --cors '*' --approval manual --allow-external --opencode-source external --opencode-bin ./bin/opencode --no-opencode-router --verbose && exit 0; echo \"openwork serve failed (attempt $attempt); retrying in 3s\"; sleep 3; done; exit 1", + ].join(" && "); + + const payload = { + type: "web_service", + name: serviceName, + ownerId: env.render.ownerId, + repo: env.render.workerRepo, + branch: env.render.workerBranch, + autoDeploy: "no", + rootDir: env.render.workerRootDir, + envVars: [ + { key: "OPENWORK_TOKEN", value: input.clientToken }, + { key: "OPENWORK_HOST_TOKEN", value: input.hostToken }, + { key: "DEN_WORKER_ID", value: input.workerId }, + ], + serviceDetails: { + runtime: "node", + plan: env.render.workerPlan, + region: env.render.workerRegion, + healthCheckPath: "/health", + envSpecificDetails: { + buildCommand, + startCommand, + }, + }, + }; + + const created = await renderRequest<{ service: RenderService }>("/services", { + method: "POST", + body: JSON.stringify(payload), + }); + + const serviceId = created.service.id; + await waitForDeployLive(serviceId); + const service = await renderRequest(`/services/${serviceId}`); + const renderUrl = service.serviceDetails?.url; + + if (!renderUrl) { + throw new Error(`Render service ${serviceId} has no public URL`); + } + + await waitForHealth(renderUrl); + + const customUrl = await attachRenderCustomDomain( + serviceId, + input.workerId, + renderUrl, + ); + let url = renderUrl; + + if (customUrl) { + try { + await waitForHealth(customUrl, env.render.customDomainReadyTimeoutMs); + url = customUrl; + } catch { + console.warn( + `[provisioner] vanity domain not ready yet for ${input.workerId}; returning Render URL fallback`, + ); + } + } + + return { + provider: "render", + url, + status: "healthy", + region: service.serviceDetails?.region ?? env.render.workerRegion, + }; +} + +export async function provisionWorker( + input: ProvisionInput, +): Promise { + if (env.provisionerMode === "render") { + return provisionWorkerOnRender(input); + } + + if (env.provisionerMode === "daytona") { + return provisionWorkerOnDaytona(input); + } + + const template = env.workerUrlTemplate ?? "https://workers.local/{workerId}"; + const url = template.replace("{workerId}", input.workerId); + return { + provider: "stub", + url, + status: "provisioning", + }; +} + +export async function deprovisionWorker(input: { + workerId: WorkerId; + instanceUrl: string | null; +}) { + if (env.provisionerMode === "daytona") { + await deprovisionWorkerOnDaytona(input.workerId); + return; + } + + if (env.provisionerMode !== "render") { + return; + } + + assertRenderConfig(); + + const targetHost = hostFromUrl(input.instanceUrl); + const workerHint = input.workerId.slice(0, 8).toLowerCase(); + + const services = await listRenderServices(); + + const target = + services.find((service) => { + if (service.name?.toLowerCase().includes(workerHint)) { + return true; + } + + if ( + targetHost && + hostFromUrl(service.serviceDetails?.url) === targetHost + ) { + return true; + } + + return false; + }) ?? null; + + if (!target) { + return; + } + + try { + await renderRequest(`/services/${target.id}/suspend`, { + method: "POST", + body: JSON.stringify({}), + }); + } catch (error) { + const message = error instanceof Error ? error.message : "unknown_error"; + console.warn( + `[provisioner] failed to suspend Render service ${target.id}: ${message}`, + ); + } +} diff --git a/services/den-v2/src/workers/vanity-domain.ts b/services/den-v2/src/workers/vanity-domain.ts new file mode 100644 index 00000000..13e675f4 --- /dev/null +++ b/services/den-v2/src/workers/vanity-domain.ts @@ -0,0 +1,183 @@ +function normalizeUrl(value: string): string { + return value.trim().replace(/\/+$/, "") +} + +function slug(value: string) { + return value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, "") +} + +function splitHostname(hostname: string, domain: string): string | null { + const normalizedHost = hostname.trim().toLowerCase() + const normalizedDomain = domain.trim().toLowerCase() + if (!normalizedHost || !normalizedDomain) { + return null + } + + if (normalizedHost === normalizedDomain) { + return "" + } + + if (!normalizedHost.endsWith(`.${normalizedDomain}`)) { + return null + } + + return normalizedHost.slice(0, -(normalizedDomain.length + 1)) +} + +function hostFromUrl(value: string): string | null { + try { + return new URL(normalizeUrl(value)).host.toLowerCase() + } catch { + return null + } +} + +function withVercelScope(url: URL, teamId?: string, teamSlug?: string) { + if (teamId?.trim()) { + url.searchParams.set("teamId", teamId.trim()) + } else if (teamSlug?.trim()) { + url.searchParams.set("slug", teamSlug.trim()) + } + return url +} + +type VercelDnsRecord = { + id: string + type?: string + name?: string + value?: string +} + +async function vercelRequest(input: { + apiBase: string + token: string + path: string + teamId?: string + teamSlug?: string + method?: "GET" | "POST" | "PATCH" + body?: unknown +}): Promise { + const base = normalizeUrl(input.apiBase || "https://api.vercel.com") + const url = withVercelScope(new URL(`${base}${input.path}`), input.teamId, input.teamSlug) + const headers = new Headers({ + Authorization: `Bearer ${input.token}`, + Accept: "application/json", + }) + + const init: RequestInit = { + method: input.method ?? "GET", + headers, + } + + if (typeof input.body !== "undefined") { + headers.set("Content-Type", "application/json") + init.body = JSON.stringify(input.body) + } + + const response = await fetch(url, init) + const text = await response.text() + + if (!response.ok) { + throw new Error(`Vercel API ${input.path} failed (${response.status}): ${text.slice(0, 300)}`) + } + + if (!text) { + return null as T + } + + return JSON.parse(text) as T +} + +export function customDomainForWorker(workerId: string, suffix: string | null | undefined): string | null { + const normalizedSuffix = suffix?.trim().toLowerCase() + if (!normalizedSuffix) { + return null + } + + const label = slug(workerId).slice(0, 32) + if (!label) { + return null + } + + return `${label}.${normalizedSuffix}` +} + +export async function ensureVercelDnsRecord(input: { + hostname: string + targetUrl: string + domain: string | null | undefined + apiBase?: string + token?: string + teamId?: string + teamSlug?: string +}): Promise { + const domain = input.domain?.trim().toLowerCase() + const token = input.token?.trim() + if (!domain || !token) { + return false + } + + const name = splitHostname(input.hostname, domain) + const targetHost = hostFromUrl(input.targetUrl) + if (name === null || !targetHost) { + return false + } + + const list = await vercelRequest<{ records?: VercelDnsRecord[] }>({ + apiBase: input.apiBase ?? "https://api.vercel.com", + token, + teamId: input.teamId, + teamSlug: input.teamSlug, + path: `/v4/domains/${encodeURIComponent(domain)}/records`, + }) + + const records = Array.isArray(list.records) ? list.records : [] + const current = records.find((record) => { + if (!record?.id) { + return false + } + if ((record.type ?? "").toUpperCase() !== "CNAME") { + return false + } + return (record.name ?? "") === name + }) + + if (current && (current.value ?? "").toLowerCase() === targetHost.toLowerCase()) { + return true + } + + const payload = { + name, + type: "CNAME", + value: targetHost, + } + + if (current?.id) { + await vercelRequest({ + apiBase: input.apiBase ?? "https://api.vercel.com", + token, + teamId: input.teamId, + teamSlug: input.teamSlug, + method: "PATCH", + path: `/v4/domains/${encodeURIComponent(domain)}/records/${encodeURIComponent(current.id)}`, + body: payload, + }) + return true + } + + await vercelRequest({ + apiBase: input.apiBase ?? "https://api.vercel.com", + token, + teamId: input.teamId, + teamSlug: input.teamSlug, + method: "POST", + path: `/v4/domains/${encodeURIComponent(domain)}/records`, + body: payload, + }) + + return true +} diff --git a/services/den-v2/tsconfig.json b/services/den-v2/tsconfig.json new file mode 100644 index 00000000..21f5aeef --- /dev/null +++ b/services/den-v2/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "Bundler", + "rootDir": "src", + "outDir": "dist", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": ["src"] +} diff --git a/services/den-worker-proxy/.env.example b/services/den-worker-proxy/.env.example new file mode 100644 index 00000000..79113fab --- /dev/null +++ b/services/den-worker-proxy/.env.example @@ -0,0 +1,12 @@ +DATABASE_URL= +DATABASE_HOST= +DATABASE_USERNAME= +DATABASE_PASSWORD= +DB_MODE= +PORT=8789 +OPENWORK_DAYTONA_ENV_PATH= +DAYTONA_API_URL=https://app.daytona.io/api +DAYTONA_API_KEY= +DAYTONA_TARGET= +DAYTONA_OPENWORK_PORT=8787 +DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400 diff --git a/services/den-worker-proxy/package.json b/services/den-worker-proxy/package.json new file mode 100644 index 00000000..ccac5b9a --- /dev/null +++ b/services/den-worker-proxy/package.json @@ -0,0 +1,23 @@ +{ + "name": "@openwork/den-worker-proxy", + "private": true, + "type": "module", + "scripts": { + "dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/server.ts", + "build": "npm run build:den-db && tsc -p tsconfig.json", + "build:den-db": "npm --prefix ../../packages/den-db run build", + "start": "node dist/server.js" + }, + "dependencies": { + "@daytonaio/sdk": "^0.150.0", + "@hono/node-server": "^1.13.8", + "dotenv": "^16.4.5", + "hono": "^4.7.2", + "zod": "^4.3.6" + }, + "devDependencies": { + "@types/node": "^20.11.30", + "tsx": "^4.15.7", + "typescript": "^5.5.4" + } +} diff --git a/services/den-worker-proxy/src/app.ts b/services/den-worker-proxy/src/app.ts new file mode 100644 index 00000000..8a726f08 --- /dev/null +++ b/services/den-worker-proxy/src/app.ts @@ -0,0 +1,178 @@ +import "./load-env.js" +import { Daytona } from "@daytonaio/sdk" +import { Hono } from "hono" +import { eq } from "../../../packages/den-db/dist/drizzle.js" +import { createDenDb, DaytonaSandboxTable } from "../../../packages/den-db/dist/index.js" +import { normalizeDenTypeId } from "../../../packages/utils/dist/typeid.js" +import { env } from "./env.js" + +const { db } = createDenDb({ + databaseUrl: env.databaseUrl, + mode: env.dbMode, + planetscale: env.planetscale, +}) +const app = new Hono() +const maxSignedPreviewExpirySeconds = 60 * 60 * 24 +const signedPreviewRefreshLeadMs = 5 * 60 * 1000 +type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id + +function assertDaytonaConfig() { + if (!env.daytona.apiKey) { + throw new Error("DAYTONA_API_KEY is required for worker proxy") + } +} + +function createDaytonaClient() { + assertDaytonaConfig() + return new Daytona({ + apiKey: env.daytona.apiKey, + apiUrl: env.daytona.apiUrl, + ...(env.daytona.target ? { target: env.daytona.target } : {}), + }) +} + +function normalizedSignedPreviewExpirySeconds() { + return Math.max(1, Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds)) +} + +function signedPreviewRefreshAt(expiresInSeconds: number) { + return new Date(Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs)) +} + +function noCacheHeaders(headers: Headers) { + headers.set("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate") + headers.set("Pragma", "no-cache") + headers.set("Expires", "0") + headers.set("Surrogate-Control", "no-store") +} + +function stripProxyHeaders(input: Headers) { + const headers = new Headers(input) + headers.delete("host") + headers.delete("content-length") + headers.delete("connection") + return headers +} + +function targetUrl(baseUrl: string, requestUrl: string, workerId: WorkerId) { + const current = new URL(requestUrl) + const suffix = current.pathname.slice(`/${encodeURIComponent(workerId)}`.length) || "/" + return `${baseUrl.replace(/\/+$/, "")}${suffix}${current.search}` +} + +async function getSignedPreviewUrl(workerId: WorkerId) { + const rows = await db + .select() + .from(DaytonaSandboxTable) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + .limit(1) + + const record = rows[0] ?? null + if (!record) { + return null + } + + if (record.signed_preview_url_expires_at.getTime() > Date.now()) { + return record.signed_preview_url + } + + const daytona = createDaytonaClient() + const sandbox = await daytona.get(record.sandbox_id) + await sandbox.refreshData() + + const expiresInSeconds = normalizedSignedPreviewExpirySeconds() + const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds) + + await db + .update(DaytonaSandboxTable) + .set({ + signed_preview_url: preview.url, + signed_preview_url_expires_at: signedPreviewRefreshAt(expiresInSeconds), + region: sandbox.target, + }) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + + return preview.url +} + +async function proxyRequest(workerId: WorkerId, request: Request) { + let baseUrl: string | null = null + + try { + baseUrl = await getSignedPreviewUrl(workerId) + } catch (error) { + const headers = new Headers({ "Content-Type": "application/json" }) + noCacheHeaders(headers) + return new Response(JSON.stringify({ + error: "worker_proxy_refresh_failed", + message: error instanceof Error ? error.message : "unknown_error", + }), { status: 502, headers }) + } + + if (!baseUrl) { + const headers = new Headers({ "Content-Type": "application/json" }) + noCacheHeaders(headers) + return new Response(JSON.stringify({ error: "worker_proxy_unavailable" }), { + status: 404, + headers, + }) + } + + let upstream: Response + try { + upstream = await fetch(targetUrl(baseUrl, request.url, workerId), { + method: request.method, + headers: stripProxyHeaders(request.headers), + body: request.method === "GET" || request.method === "HEAD" ? undefined : await request.arrayBuffer(), + redirect: "manual", + }) + } catch (error) { + const headers = new Headers({ "Content-Type": "application/json" }) + noCacheHeaders(headers) + return new Response(JSON.stringify({ + error: "worker_proxy_upstream_failed", + message: error instanceof Error ? error.message : "unknown_error", + }), { status: 502, headers }) + } + + const headers = new Headers(upstream.headers) + headers.delete("content-length") + noCacheHeaders(headers) + + return new Response(upstream.body, { + status: upstream.status, + headers, + }) +} + +app.all("*", async (c) => { + const requestUrl = new URL(c.req.url) + if (requestUrl.pathname === "/") { + return Response.redirect("https://openworklabs.com", 302) + } + + const segments = requestUrl.pathname.split("/").filter(Boolean) + const workerId = segments[0]?.trim() + + if (!workerId) { + const headers = new Headers({ "Content-Type": "application/json" }) + noCacheHeaders(headers) + return new Response(JSON.stringify({ error: "worker_id_required" }), { + status: 400, + headers, + }) + } + + try { + return proxyRequest(normalizeDenTypeId("worker", workerId), c.req.raw) + } catch { + const headers = new Headers({ "Content-Type": "application/json" }) + noCacheHeaders(headers) + return new Response(JSON.stringify({ error: "worker_not_found" }), { + status: 404, + headers, + }) + } +}) + +export default app diff --git a/services/den-worker-proxy/src/env.ts b/services/den-worker-proxy/src/env.ts new file mode 100644 index 00000000..e6ea967d --- /dev/null +++ b/services/den-worker-proxy/src/env.ts @@ -0,0 +1,67 @@ +import { z } from "zod" + +const EnvSchema = z.object({ + DATABASE_URL: z.string().min(1).optional(), + DATABASE_HOST: z.string().min(1).optional(), + DATABASE_USERNAME: z.string().min(1).optional(), + DATABASE_PASSWORD: z.string().optional(), + DB_MODE: z.enum(["mysql", "planetscale"]).optional(), + PORT: z.string().optional(), + DAYTONA_API_URL: z.string().optional(), + DAYTONA_API_KEY: z.string().optional(), + DAYTONA_TARGET: z.string().optional(), + DAYTONA_OPENWORK_PORT: z.string().optional(), + DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(), +}).superRefine((value, ctx) => { + const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale") + + if (inferredMode === "mysql" && !value.DATABASE_URL) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "DATABASE_URL is required when using mysql mode", + path: ["DATABASE_URL"], + }) + } + + if (inferredMode === "planetscale") { + for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) { + if (!value[key]) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: `${key} is required when using planetscale mode`, + path: [key], + }) + } + } + } +}) + +const parsed = EnvSchema.parse(process.env) + +const planetscaleCredentials = + parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined + ? { + host: parsed.DATABASE_HOST, + username: parsed.DATABASE_USERNAME, + password: parsed.DATABASE_PASSWORD, + } + : null + +function optionalString(value: string | undefined) { + const trimmed = value?.trim() + return trimmed ? trimmed : undefined +} + +export const env = { + databaseUrl: parsed.DATABASE_URL, + dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"), + planetscale: planetscaleCredentials, + port: Number(parsed.PORT ?? "8789"), + daytona: { + apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api", + apiKey: optionalString(parsed.DAYTONA_API_KEY), + target: optionalString(parsed.DAYTONA_TARGET), + openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"), + signedPreviewExpiresSeconds: Number(parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400"), + }, +} diff --git a/services/den-worker-proxy/src/index.ts b/services/den-worker-proxy/src/index.ts new file mode 100644 index 00000000..1bd62f63 --- /dev/null +++ b/services/den-worker-proxy/src/index.ts @@ -0,0 +1,3 @@ +import app from "./app.js" + +export default app diff --git a/services/den-worker-proxy/src/load-env.ts b/services/den-worker-proxy/src/load-env.ts new file mode 100644 index 00000000..24fe2b65 --- /dev/null +++ b/services/den-worker-proxy/src/load-env.ts @@ -0,0 +1,42 @@ +import { existsSync } from "node:fs" +import path from "node:path" +import { fileURLToPath } from "node:url" +import dotenv from "dotenv" + +function findUpwards(startDir: string, fileName: string, maxDepth = 8) { + let current = startDir + + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = path.join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + + const parent = path.dirname(current) + if (parent === current) { + break + } + current = parent + } + + return null +} + +const srcDir = path.dirname(fileURLToPath(import.meta.url)) +const serviceDir = path.resolve(srcDir, "..") + +for (const filePath of [path.join(serviceDir, ".env.local"), path.join(serviceDir, ".env")]) { + if (existsSync(filePath)) { + dotenv.config({ path: filePath, override: false }) + } +} + +const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() +const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona") +const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath + +if (daytonaEnvPath && existsSync(daytonaEnvPath)) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +dotenv.config({ override: false }) diff --git a/services/den-worker-proxy/src/server.ts b/services/den-worker-proxy/src/server.ts new file mode 100644 index 00000000..b1359b5e --- /dev/null +++ b/services/den-worker-proxy/src/server.ts @@ -0,0 +1,7 @@ +import { serve } from "@hono/node-server" +import app from "./app.js" +import { env } from "./env.js" + +serve({ fetch: app.fetch, port: env.port }, (info) => { + console.log(`worker proxy listening on ${info.port}`) +}) diff --git a/services/den-worker-proxy/tsconfig.json b/services/den-worker-proxy/tsconfig.json new file mode 100644 index 00000000..21f5aeef --- /dev/null +++ b/services/den-worker-proxy/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "Bundler", + "rootDir": "src", + "outDir": "dist", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": ["src"] +} diff --git a/services/den-worker-runtime/Dockerfile.daytona-snapshot b/services/den-worker-runtime/Dockerfile.daytona-snapshot new file mode 100644 index 00000000..8f437112 --- /dev/null +++ b/services/den-worker-runtime/Dockerfile.daytona-snapshot @@ -0,0 +1,34 @@ +FROM node:22-bookworm-slim + +ARG OPENWORK_ORCHESTRATOR_VERSION=0.11.151 +ARG OPENCODE_VERSION=1.2.6 +ARG OPENCODE_DOWNLOAD_URL= + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates curl tar unzip \ + && rm -rf /var/lib/apt/lists/* + +RUN npm install -g "openwork-orchestrator@${OPENWORK_ORCHESTRATOR_VERSION}" + +RUN set -eux; \ + arch="$(dpkg --print-architecture)"; \ + case "$arch" in \ + amd64) asset="opencode-linux-x64-baseline.tar.gz" ;; \ + arm64) asset="opencode-linux-arm64.tar.gz" ;; \ + *) echo "unsupported architecture: $arch" >&2; exit 1 ;; \ + esac; \ + url="$OPENCODE_DOWNLOAD_URL"; \ + if [ -z "$url" ]; then \ + url="https://github.com/anomalyco/opencode/releases/download/v${OPENCODE_VERSION}/${asset}"; \ + fi; \ + tmpdir="$(mktemp -d)"; \ + curl -fsSL "$url" -o "$tmpdir/$asset"; \ + tar -xzf "$tmpdir/$asset" -C "$tmpdir"; \ + binary="$(find "$tmpdir" -type f -name opencode | head -n 1)"; \ + test -n "$binary"; \ + install -m 0755 "$binary" /usr/local/bin/opencode; \ + rm -rf "$tmpdir" + +RUN openwork --version && opencode --version + +CMD ["sleep", "infinity"] diff --git a/services/den/.env.example b/services/den/.env.example index 3ec62a1b..6beb8722 100644 --- a/services/den/.env.example +++ b/services/den/.env.example @@ -1,5 +1,9 @@ -DATABASE_URL=mysql://root:password@127.0.0.1:3306/openwork_den -BETTER_AUTH_SECRET=local-dev-secret-not-for-production-use!! +DATABASE_URL= +DATABASE_HOST= +DATABASE_USERNAME= +DATABASE_PASSWORD= +DB_MODE= +BETTER_AUTH_SECRET= BETTER_AUTH_URL=http://localhost:8788 DEN_BETTER_AUTH_TRUSTED_ORIGINS=http://localhost:3005,http://localhost:5173 GITHUB_CLIENT_ID= @@ -7,8 +11,10 @@ GITHUB_CLIENT_SECRET= GOOGLE_CLIENT_ID= GOOGLE_CLIENT_SECRET= PORT=8788 +WORKER_PROXY_PORT=8789 CORS_ORIGINS=http://localhost:3005,http://localhost:5173 PROVISIONER_MODE=stub +OPENWORK_DAYTONA_ENV_PATH= WORKER_URL_TEMPLATE=https://workers.example.com/{workerId} RENDER_API_BASE=https://api.render.com/v1 RENDER_API_KEY= @@ -37,3 +43,31 @@ POLAR_PRODUCT_ID= POLAR_BENEFIT_ID= POLAR_SUCCESS_URL=http://localhost:8788 POLAR_RETURN_URL=http://localhost:8788 +DAYTONA_API_URL=https://app.daytona.io/api +DAYTONA_API_KEY= +DAYTONA_TARGET= +DAYTONA_SNAPSHOT= +DAYTONA_SANDBOX_IMAGE=node:20-bookworm +DAYTONA_SANDBOX_CPU=2 +DAYTONA_SANDBOX_MEMORY=4 +DAYTONA_SANDBOX_DISK=8 +DAYTONA_SANDBOX_PUBLIC=false +DAYTONA_SANDBOX_AUTO_STOP_INTERVAL=0 +DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL=10080 +DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL=-1 +DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400 +DAYTONA_WORKER_PROXY_BASE_URL=https://workers.den.openworklabs +DAYTONA_SANDBOX_NAME_PREFIX=den-daytona-worker +DAYTONA_VOLUME_NAME_PREFIX=den-daytona-worker +DAYTONA_WORKSPACE_MOUNT_PATH=/workspace +DAYTONA_DATA_MOUNT_PATH=/persist/openwork +DAYTONA_RUNTIME_WORKSPACE_PATH=/tmp/openwork-workspace +DAYTONA_RUNTIME_DATA_PATH=/tmp/openwork-data +DAYTONA_SIDECAR_DIR=/tmp/openwork-sidecars +DAYTONA_OPENWORK_PORT=8787 +DAYTONA_OPENCODE_PORT=4096 +DAYTONA_OPENWORK_VERSION= +DAYTONA_CREATE_TIMEOUT_SECONDS=300 +DAYTONA_DELETE_TIMEOUT_SECONDS=120 +DAYTONA_HEALTHCHECK_TIMEOUT_MS=300000 +DAYTONA_POLL_INTERVAL_MS=5000 diff --git a/services/den/AGENT_FOCUS.md b/services/den/AGENT_FOCUS.md index 14df5afb..48265d89 100644 --- a/services/den/AGENT_FOCUS.md +++ b/services/den/AGENT_FOCUS.md @@ -5,7 +5,7 @@ This guide explains how agents should operate, test, and troubleshoot the Den se ## What this service does - Handles auth (`/api/auth/*`) and session lookup (`/v1/me`). -- Creates workers (`/v1/workers`) and provisions cloud workers on Render. +- Creates workers (`/v1/workers`) and provisions cloud workers on Render or Daytona. - Optionally enforces a Polar paywall for cloud worker creation. ## Core flows to test @@ -23,10 +23,10 @@ Expected: all succeed with `200`. Set `POLAR_FEATURE_GATE_ENABLED=false`. 1. `POST /v1/workers` with `destination="cloud"` -2. Confirm `instance.provider="render"` +2. Confirm `instance.provider` matches the configured cloud provisioner (`render` or `daytona`) 3. Poll `instance.url + "/health"` -Expected: worker creation `201`, worker health `200`. +Expected: worker creation `202`, worker health `200` after async provisioning finishes. ### 3) Cloud worker flow (paywall enabled) @@ -45,13 +45,14 @@ For an entitled user (has the required Polar benefit): 1. `POST /v1/workers` with `destination="cloud"` -Expected: worker creation `201` with Render-backed instance. +Expected: worker creation `202` with a healthy cloud-backed instance once provisioning completes. ## Required env vars (summary) - Base: `DATABASE_URL`, `BETTER_AUTH_SECRET`, `BETTER_AUTH_URL` - Optional social auth: `GITHUB_CLIENT_ID`, `GITHUB_CLIENT_SECRET`, `GOOGLE_CLIENT_ID`, `GOOGLE_CLIENT_SECRET` - Render: `PROVISIONER_MODE=render`, `RENDER_API_KEY`, `RENDER_OWNER_ID`, and `RENDER_WORKER_*` +- Daytona: `PROVISIONER_MODE=daytona`, `DAYTONA_API_KEY`, and optional `DAYTONA_*` sizing/mount settings - Polar gate: - `POLAR_FEATURE_GATE_ENABLED` - `POLAR_ACCESS_TOKEN` @@ -66,10 +67,10 @@ Expected: worker creation `201` with Render-backed instance. - `.github/workflows/deploy-den.yml` -It updates Render env vars and triggers a deploy for the configured service ID. +It updates Render env vars and triggers a deploy for the configured service ID. Daytona is intended for local/dev worker testing unless you build a separate hosted Den deployment path for it. ## Common failure modes -- `provisioning_failed`: Render deploy failed or health check timed out. +- `provisioning_failed`: Render deploy failed, Daytona sandbox boot failed, or worker health check timed out. - `payment_required`: Polar gate is enabled and user does not have the required benefit. - startup error: paywall enabled but missing Polar env vars. diff --git a/services/den/README.md b/services/den/README.md index bd13881a..210be46c 100644 --- a/services/den/README.md +++ b/services/den/README.md @@ -36,8 +36,10 @@ The script prints the exact URLs and `docker compose ... down` command to use fo - `GOOGLE_CLIENT_ID` optional OAuth app client ID for Google sign-in - `GOOGLE_CLIENT_SECRET` optional OAuth app client secret for Google sign-in - `PORT` server port -- `CORS_ORIGINS` comma-separated list of trusted browser origins for Express CORS -- `PROVISIONER_MODE` `stub` or `render` +<<<<<<< HEAD +- `CORS_ORIGINS` comma-separated list of trusted browser origins (used for Better Auth origin validation + Express CORS) +- `PROVISIONER_MODE` `stub`, `render`, or `daytona` +- `OPENWORK_DAYTONA_ENV_PATH` optional path to a shared `.env.daytona` file; when unset, Den searches upwards from the repo for `.env.daytona` - `WORKER_URL_TEMPLATE` template string with `{workerId}` - `RENDER_API_BASE` Render API base URL (default `https://api.render.com/v1`) - `RENDER_API_KEY` Render API key (required for `PROVISIONER_MODE=render`) @@ -66,6 +68,79 @@ The script prints the exact URLs and `docker compose ... down` command to use fo - `POLAR_BENEFIT_ID` Polar benefit ID required to unlock cloud workers (required when paywall enabled) - `POLAR_SUCCESS_URL` redirect URL after successful checkout (required when paywall enabled) - `POLAR_RETURN_URL` return URL shown in checkout (required when paywall enabled) +- Daytona: + - `DAYTONA_API_KEY` API key used to create sandboxes and volumes + - `DAYTONA_API_URL` Daytona API base URL (default `https://app.daytona.io/api`) + - `DAYTONA_TARGET` optional Daytona region/target + - `DAYTONA_SNAPSHOT` optional snapshot name; if omitted Den creates workers from `DAYTONA_SANDBOX_IMAGE` + - `DAYTONA_SANDBOX_IMAGE` sandbox base image when no snapshot is provided (default `node:20-bookworm`) + - `DAYTONA_SANDBOX_CPU`, `DAYTONA_SANDBOX_MEMORY`, `DAYTONA_SANDBOX_DISK` resource sizing when image-backed sandboxes are used + - `DAYTONA_SANDBOX_AUTO_STOP_INTERVAL`, `DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL`, `DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL` lifecycle controls + - `DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS` TTL for the signed OpenWork preview URL returned to Den clients (Daytona currently caps this at 24 hours) + - `DAYTONA_SANDBOX_NAME_PREFIX`, `DAYTONA_VOLUME_NAME_PREFIX` resource naming prefixes + - `DAYTONA_WORKSPACE_MOUNT_PATH`, `DAYTONA_DATA_MOUNT_PATH` volume mount paths inside the sandbox + - `DAYTONA_RUNTIME_WORKSPACE_PATH`, `DAYTONA_RUNTIME_DATA_PATH`, `DAYTONA_SIDECAR_DIR` local sandbox paths used for the live OpenWork runtime; the mounted Daytona volumes are linked into the runtime workspace under `volumes/` + - `DAYTONA_OPENWORK_PORT`, `DAYTONA_OPENCODE_PORT` ports used when launching `openwork serve` + - `DAYTONA_OPENWORK_VERSION` optional npm version to install instead of latest `openwork-orchestrator` + - `DAYTONA_CREATE_TIMEOUT_SECONDS`, `DAYTONA_DELETE_TIMEOUT_SECONDS`, `DAYTONA_HEALTHCHECK_TIMEOUT_MS`, `DAYTONA_POLL_INTERVAL_MS` provisioning timeouts + +For local Daytona development, place your Daytona API credentials in `/_repos/openwork/.env.daytona` and Den will pick them up automatically, including from task worktrees. + +## Building a Daytona snapshot + +If you want Daytona workers to start from a prebuilt runtime instead of a generic base image, create a snapshot and point Den at it. + +The snapshot builder for this repo lives at: + +- `scripts/create-daytona-openwork-snapshot.sh` +- `services/den-worker-runtime/Dockerfile.daytona-snapshot` + +It builds a Linux image with: + +- `openwork-orchestrator` +- `opencode` + +Prerequisites: + +- Docker running locally +- Daytona CLI installed and logged in +- a valid `.env.daytona` with at least `DAYTONA_API_KEY` + +From the OpenWork repo root: + +```bash +./scripts/create-daytona-openwork-snapshot.sh +``` + +To publish a custom-named snapshot: + +```bash +./scripts/create-daytona-openwork-snapshot.sh openwork-runtime +``` + +Useful optional overrides: + +- `DAYTONA_SNAPSHOT_NAME` +- `DAYTONA_SNAPSHOT_REGION` +- `DAYTONA_SNAPSHOT_CPU` +- `DAYTONA_SNAPSHOT_MEMORY` +- `DAYTONA_SNAPSHOT_DISK` +- `OPENWORK_ORCHESTRATOR_VERSION` +- `OPENCODE_VERSION` + +After the snapshot is pushed, set it in `.env.daytona`: + +```env +DAYTONA_SNAPSHOT=openwork-runtime +``` + +Then start Den in Daytona mode: + +```bash +DEN_PROVISIONER_MODE=daytona packaging/docker/den-dev-up.sh +``` + +If you do not set `DAYTONA_SNAPSHOT`, Den falls back to `DAYTONA_SANDBOX_IMAGE` and installs runtime dependencies at sandbox startup. ## Auth setup (Better Auth) @@ -80,6 +155,9 @@ Apply migrations: ```bash pnpm db:generate pnpm db:migrate + +# or use the SQL migration runner used by Docker +pnpm db:migrate:sql ``` ## API @@ -96,7 +174,7 @@ pnpm db:migrate - Includes latest instance metadata when available. - `POST /v1/workers/:id/tokens` - `DELETE /v1/workers/:id` - - Deletes worker records and attempts to suspend the backing cloud service when destination is `cloud`. + - Deletes worker records and attempts to tear down the backing cloud runtime when destination is `cloud`. ## CI deployment (dev == prod) diff --git a/services/den/drizzle.config.ts b/services/den/drizzle.config.ts index cc69e4c9..4824f8c6 100644 --- a/services/den/drizzle.config.ts +++ b/services/den/drizzle.config.ts @@ -1,4 +1,4 @@ -import "dotenv/config" +import "./src/load-env.ts" import { defineConfig } from "drizzle-kit" export default defineConfig({ diff --git a/services/den/drizzle/0000_tense_lilandra.sql b/services/den/drizzle/0000_baseline.sql similarity index 63% rename from services/den/drizzle/0000_tense_lilandra.sql rename to services/den/drizzle/0000_baseline.sql index 38fd03ba..2d1db5c8 100644 --- a/services/den/drizzle/0000_tense_lilandra.sql +++ b/services/den/drizzle/0000_baseline.sql @@ -1,3 +1,13 @@ +CREATE TABLE `admin_allowlist` ( + `id` varchar(64) NOT NULL, + `email` varchar(255) NOT NULL, + `note` varchar(255), + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + CONSTRAINT `admin_allowlist_id` PRIMARY KEY(`id`), + CONSTRAINT `admin_allowlist_email` UNIQUE(`email`) +); +--> statement-breakpoint CREATE TABLE `audit_event` ( `id` varchar(64) NOT NULL, `org_id` varchar(64) NOT NULL, @@ -11,30 +21,30 @@ CREATE TABLE `audit_event` ( --> statement-breakpoint CREATE TABLE `account` ( `id` varchar(64) NOT NULL, - `userId` varchar(64) NOT NULL, - `accountId` varchar(255) NOT NULL, - `providerId` varchar(255) NOT NULL, - `accessToken` text, - `refreshToken` text, - `accessTokenExpiresAt` timestamp(3), - `refreshTokenExpiresAt` timestamp(3), - `scope` varchar(1024), - `idToken` text, - `password` varchar(512), - `createdAt` timestamp(3) NOT NULL DEFAULT (now()), - `updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `user_id` varchar(64) NOT NULL, + `account_id` text NOT NULL, + `provider_id` text NOT NULL, + `access_token` text, + `refresh_token` text, + `access_token_expires_at` timestamp(3), + `refresh_token_expires_at` timestamp(3), + `scope` text, + `id_token` text, + `password` text, + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), CONSTRAINT `account_id` PRIMARY KEY(`id`) ); --> statement-breakpoint CREATE TABLE `session` ( `id` varchar(64) NOT NULL, - `userId` varchar(64) NOT NULL, + `user_id` varchar(64) NOT NULL, `token` varchar(255) NOT NULL, - `expiresAt` timestamp(3) NOT NULL, - `ipAddress` varchar(255), - `userAgent` varchar(1024), - `createdAt` timestamp(3) NOT NULL DEFAULT (now()), - `updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `expires_at` timestamp(3) NOT NULL, + `ip_address` text, + `user_agent` text, + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), CONSTRAINT `session_id` PRIMARY KEY(`id`), CONSTRAINT `session_token` UNIQUE(`token`) ); @@ -43,10 +53,10 @@ CREATE TABLE `user` ( `id` varchar(64) NOT NULL, `name` varchar(255) NOT NULL, `email` varchar(255) NOT NULL, - `emailVerified` boolean NOT NULL DEFAULT false, - `image` varchar(2048), - `createdAt` timestamp(3) NOT NULL DEFAULT (now()), - `updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `email_verified` boolean NOT NULL DEFAULT false, + `image` text, + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), CONSTRAINT `user_id` PRIMARY KEY(`id`), CONSTRAINT `user_email` UNIQUE(`email`) ); @@ -54,13 +64,29 @@ CREATE TABLE `user` ( CREATE TABLE `verification` ( `id` varchar(64) NOT NULL, `identifier` varchar(255) NOT NULL, - `value` varchar(1024) NOT NULL, - `expiresAt` timestamp(3) NOT NULL, - `createdAt` timestamp(3) NOT NULL DEFAULT (now()), - `updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `value` text NOT NULL, + `expires_at` timestamp(3) NOT NULL, + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), CONSTRAINT `verification_id` PRIMARY KEY(`id`) ); --> statement-breakpoint +CREATE TABLE `daytona_sandbox` ( + `id` varchar(64) NOT NULL, + `worker_id` varchar(64) NOT NULL, + `sandbox_id` varchar(128) NOT NULL, + `workspace_volume_id` varchar(128) NOT NULL, + `data_volume_id` varchar(128) NOT NULL, + `signed_preview_url` varchar(2048) NOT NULL, + `signed_preview_url_expires_at` timestamp(3) NOT NULL, + `region` varchar(64), + `created_at` timestamp(3) NOT NULL DEFAULT (now()), + `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + CONSTRAINT `daytona_sandbox_id` PRIMARY KEY(`id`), + CONSTRAINT `daytona_sandbox_worker_id` UNIQUE(`worker_id`), + CONSTRAINT `daytona_sandbox_sandbox_id` UNIQUE(`sandbox_id`) +); +--> statement-breakpoint CREATE TABLE `org_membership` ( `id` varchar(64) NOT NULL, `org_id` varchar(64) NOT NULL, @@ -105,6 +131,7 @@ CREATE TABLE `worker_instance` ( CREATE TABLE `worker` ( `id` varchar(64) NOT NULL, `org_id` varchar(64) NOT NULL, + `created_by_user_id` varchar(64), `name` varchar(255) NOT NULL, `description` varchar(1024), `destination` enum('local','cloud') NOT NULL, @@ -130,10 +157,8 @@ CREATE TABLE `worker_token` ( --> statement-breakpoint CREATE INDEX `audit_event_org_id` ON `audit_event` (`org_id`);--> statement-breakpoint CREATE INDEX `audit_event_worker_id` ON `audit_event` (`worker_id`);--> statement-breakpoint -CREATE INDEX `account_user_id` ON `account` (`userId`);--> statement-breakpoint -CREATE INDEX `account_provider_id` ON `account` (`providerId`);--> statement-breakpoint -CREATE INDEX `account_account_id` ON `account` (`accountId`);--> statement-breakpoint -CREATE INDEX `session_user_id` ON `session` (`userId`);--> statement-breakpoint +CREATE INDEX `account_user_id` ON `account` (`user_id`);--> statement-breakpoint +CREATE INDEX `session_user_id` ON `session` (`user_id`);--> statement-breakpoint CREATE INDEX `verification_identifier` ON `verification` (`identifier`);--> statement-breakpoint CREATE INDEX `org_membership_org_id` ON `org_membership` (`org_id`);--> statement-breakpoint CREATE INDEX `org_membership_user_id` ON `org_membership` (`user_id`);--> statement-breakpoint @@ -141,5 +166,6 @@ CREATE INDEX `org_owner_user_id` ON `org` (`owner_user_id`);--> statement-breakp CREATE INDEX `worker_bundle_worker_id` ON `worker_bundle` (`worker_id`);--> statement-breakpoint CREATE INDEX `worker_instance_worker_id` ON `worker_instance` (`worker_id`);--> statement-breakpoint CREATE INDEX `worker_org_id` ON `worker` (`org_id`);--> statement-breakpoint +CREATE INDEX `worker_created_by_user_id` ON `worker` (`created_by_user_id`);--> statement-breakpoint CREATE INDEX `worker_status` ON `worker` (`status`);--> statement-breakpoint -CREATE INDEX `worker_token_worker_id` ON `worker_token` (`worker_id`); +CREATE INDEX `worker_token_worker_id` ON `worker_token` (`worker_id`); \ No newline at end of file diff --git a/services/den/drizzle/0001_auth_columns_fix.sql b/services/den/drizzle/0001_auth_columns_fix.sql deleted file mode 100644 index 0111bbec..00000000 --- a/services/den/drizzle/0001_auth_columns_fix.sql +++ /dev/null @@ -1,65 +0,0 @@ -DROP TABLE IF EXISTS `account`; ---> statement-breakpoint -DROP TABLE IF EXISTS `session`; ---> statement-breakpoint -DROP TABLE IF EXISTS `verification`; ---> statement-breakpoint -DROP TABLE IF EXISTS `user`; ---> statement-breakpoint -CREATE TABLE `user` ( - `id` varchar(36) NOT NULL, - `name` varchar(255) NOT NULL, - `email` varchar(255) NOT NULL, - `email_verified` boolean NOT NULL DEFAULT false, - `image` text, - `created_at` timestamp(3) NOT NULL DEFAULT (now()), - `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), - CONSTRAINT `user_id` PRIMARY KEY(`id`), - CONSTRAINT `user_email` UNIQUE(`email`) -); ---> statement-breakpoint -CREATE TABLE `session` ( - `id` varchar(36) NOT NULL, - `expires_at` timestamp(3) NOT NULL, - `token` varchar(255) NOT NULL, - `created_at` timestamp(3) NOT NULL DEFAULT (now()), - `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), - `ip_address` text, - `user_agent` text, - `user_id` varchar(36) NOT NULL, - CONSTRAINT `session_id` PRIMARY KEY(`id`), - CONSTRAINT `session_token` UNIQUE(`token`) -); ---> statement-breakpoint -CREATE INDEX `session_user_id` ON `session` (`user_id`); ---> statement-breakpoint -CREATE TABLE `account` ( - `id` varchar(36) NOT NULL, - `account_id` text NOT NULL, - `provider_id` text NOT NULL, - `user_id` varchar(36) NOT NULL, - `access_token` text, - `refresh_token` text, - `id_token` text, - `access_token_expires_at` timestamp(3), - `refresh_token_expires_at` timestamp(3), - `scope` text, - `password` text, - `created_at` timestamp(3) NOT NULL DEFAULT (now()), - `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), - CONSTRAINT `account_id` PRIMARY KEY(`id`) -); ---> statement-breakpoint -CREATE INDEX `account_user_id` ON `account` (`user_id`); ---> statement-breakpoint -CREATE TABLE `verification` ( - `id` varchar(36) NOT NULL, - `identifier` varchar(255) NOT NULL, - `value` text NOT NULL, - `expires_at` timestamp(3) NOT NULL, - `created_at` timestamp(3) NOT NULL DEFAULT (now()), - `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), - CONSTRAINT `verification_id` PRIMARY KEY(`id`) -); ---> statement-breakpoint -CREATE INDEX `verification_identifier` ON `verification` (`identifier`); diff --git a/services/den/drizzle/0002_worker_created_by.sql b/services/den/drizzle/0002_worker_created_by.sql deleted file mode 100644 index 03f47ea2..00000000 --- a/services/den/drizzle/0002_worker_created_by.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE `worker` ADD `created_by_user_id` varchar(64); ---> statement-breakpoint -CREATE INDEX `worker_created_by_user_id` ON `worker` (`created_by_user_id`); diff --git a/services/den/drizzle/0003_admin_allowlist.sql b/services/den/drizzle/0003_admin_allowlist.sql deleted file mode 100644 index b799d763..00000000 --- a/services/den/drizzle/0003_admin_allowlist.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE TABLE `admin_allowlist` ( - `id` varchar(64) NOT NULL, - `email` varchar(255) NOT NULL, - `note` varchar(255), - `created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3), - `updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), - CONSTRAINT `admin_allowlist_id` PRIMARY KEY(`id`), - CONSTRAINT `admin_allowlist_email` UNIQUE(`email`) -); diff --git a/services/den/drizzle/meta/0000_snapshot.json b/services/den/drizzle/meta/0000_snapshot.json new file mode 100644 index 00000000..8db5a005 --- /dev/null +++ b/services/den/drizzle/meta/0000_snapshot.json @@ -0,0 +1,1108 @@ +{ + "version": "5", + "dialect": "mysql", + "id": "ebdc03dc-42bb-4f46-ab85-8096284f5ba1", + "prevId": "00000000-0000-0000-0000-000000000000", + "tables": { + "admin_allowlist": { + "name": "admin_allowlist", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "email": { + "name": "email", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "note": { + "name": "note", + "type": "varchar(255)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "admin_allowlist_email": { + "name": "admin_allowlist_email", + "columns": [ + "email" + ], + "isUnique": true + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "admin_allowlist_id": { + "name": "admin_allowlist_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "audit_event": { + "name": "audit_event", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "org_id": { + "name": "org_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "worker_id": { + "name": "worker_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "actor_user_id": { + "name": "actor_user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "action": { + "name": "action", + "type": "varchar(128)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "payload": { + "name": "payload", + "type": "json", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + } + }, + "indexes": { + "audit_event_org_id": { + "name": "audit_event_org_id", + "columns": [ + "org_id" + ], + "isUnique": false + }, + "audit_event_worker_id": { + "name": "audit_event_worker_id", + "columns": [ + "worker_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "audit_event_id": { + "name": "audit_event_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "account": { + "name": "account", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "user_id": { + "name": "user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "account_user_id": { + "name": "account_user_id", + "columns": [ + "user_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "account_id": { + "name": "account_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "session": { + "name": "session", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "user_id": { + "name": "user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "token": { + "name": "token", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "session_token": { + "name": "session_token", + "columns": [ + "token" + ], + "isUnique": true + }, + "session_user_id": { + "name": "session_user_id", + "columns": [ + "user_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "session_id": { + "name": "session_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "user": { + "name": "user", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "name": { + "name": "name", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "email": { + "name": "email", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": false + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "user_email": { + "name": "user_email", + "columns": [ + "email" + ], + "isUnique": true + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "user_id": { + "name": "user_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "verification": { + "name": "verification", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "identifier": { + "name": "identifier", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "verification_identifier": { + "name": "verification_identifier", + "columns": [ + "identifier" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "verification_id": { + "name": "verification_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "daytona_sandbox": { + "name": "daytona_sandbox", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "worker_id": { + "name": "worker_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "sandbox_id": { + "name": "sandbox_id", + "type": "varchar(128)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "workspace_volume_id": { + "name": "workspace_volume_id", + "type": "varchar(128)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "data_volume_id": { + "name": "data_volume_id", + "type": "varchar(128)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "signed_preview_url": { + "name": "signed_preview_url", + "type": "varchar(2048)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "signed_preview_url_expires_at": { + "name": "signed_preview_url_expires_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "region": { + "name": "region", + "type": "varchar(64)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "daytona_sandbox_worker_id": { + "name": "daytona_sandbox_worker_id", + "columns": [ + "worker_id" + ], + "isUnique": true + }, + "daytona_sandbox_sandbox_id": { + "name": "daytona_sandbox_sandbox_id", + "columns": [ + "sandbox_id" + ], + "isUnique": true + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "daytona_sandbox_id": { + "name": "daytona_sandbox_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "org_membership": { + "name": "org_membership", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "org_id": { + "name": "org_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "user_id": { + "name": "user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "role": { + "name": "role", + "type": "enum('owner','member')", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + } + }, + "indexes": { + "org_membership_org_id": { + "name": "org_membership_org_id", + "columns": [ + "org_id" + ], + "isUnique": false + }, + "org_membership_user_id": { + "name": "org_membership_user_id", + "columns": [ + "user_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "org_membership_id": { + "name": "org_membership_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "org": { + "name": "org", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "name": { + "name": "name", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "slug": { + "name": "slug", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "owner_user_id": { + "name": "owner_user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "org_slug": { + "name": "org_slug", + "columns": [ + "slug" + ], + "isUnique": true + }, + "org_owner_user_id": { + "name": "org_owner_user_id", + "columns": [ + "owner_user_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "org_id": { + "name": "org_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "worker_bundle": { + "name": "worker_bundle", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "worker_id": { + "name": "worker_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "storage_url": { + "name": "storage_url", + "type": "varchar(2048)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "status": { + "name": "status", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + } + }, + "indexes": { + "worker_bundle_worker_id": { + "name": "worker_bundle_worker_id", + "columns": [ + "worker_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "worker_bundle_id": { + "name": "worker_bundle_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "worker_instance": { + "name": "worker_instance", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "worker_id": { + "name": "worker_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "provider": { + "name": "provider", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "region": { + "name": "region", + "type": "varchar(64)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "url": { + "name": "url", + "type": "varchar(2048)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "status": { + "name": "status", + "type": "enum('provisioning','healthy','failed','stopped')", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "worker_instance_worker_id": { + "name": "worker_instance_worker_id", + "columns": [ + "worker_id" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "worker_instance_id": { + "name": "worker_instance_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "worker": { + "name": "worker", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "org_id": { + "name": "org_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_by_user_id": { + "name": "created_by_user_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "name": { + "name": "name", + "type": "varchar(255)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "description": { + "name": "description", + "type": "varchar(1024)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "destination": { + "name": "destination", + "type": "enum('local','cloud')", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "status": { + "name": "status", + "type": "enum('provisioning','healthy','failed','stopped')", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "image_version": { + "name": "image_version", + "type": "varchar(128)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "workspace_path": { + "name": "workspace_path", + "type": "varchar(1024)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "sandbox_backend": { + "name": "sandbox_backend", + "type": "varchar(64)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)" + } + }, + "indexes": { + "worker_org_id": { + "name": "worker_org_id", + "columns": [ + "org_id" + ], + "isUnique": false + }, + "worker_created_by_user_id": { + "name": "worker_created_by_user_id", + "columns": [ + "created_by_user_id" + ], + "isUnique": false + }, + "worker_status": { + "name": "worker_status", + "columns": [ + "status" + ], + "isUnique": false + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "worker_id": { + "name": "worker_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + }, + "worker_token": { + "name": "worker_token", + "columns": { + "id": { + "name": "id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "worker_id": { + "name": "worker_id", + "type": "varchar(64)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "scope": { + "name": "scope", + "type": "enum('client','host')", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "token": { + "name": "token", + "type": "varchar(128)", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": true, + "autoincrement": false, + "default": "(now())" + }, + "revoked_at": { + "name": "revoked_at", + "type": "timestamp(3)", + "primaryKey": false, + "notNull": false, + "autoincrement": false + } + }, + "indexes": { + "worker_token_worker_id": { + "name": "worker_token_worker_id", + "columns": [ + "worker_id" + ], + "isUnique": false + }, + "worker_token_token": { + "name": "worker_token_token", + "columns": [ + "token" + ], + "isUnique": true + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": { + "worker_token_id": { + "name": "worker_token_id", + "columns": [ + "id" + ] + } + }, + "uniqueConstraints": {}, + "checkConstraint": {} + } + }, + "views": {}, + "_meta": { + "schemas": {}, + "tables": {}, + "columns": {} + }, + "internal": { + "tables": {}, + "indexes": {} + } +} \ No newline at end of file diff --git a/services/den/drizzle/meta/_journal.json b/services/den/drizzle/meta/_journal.json index 491f85be..a96c4657 100644 --- a/services/den/drizzle/meta/_journal.json +++ b/services/den/drizzle/meta/_journal.json @@ -5,29 +5,8 @@ { "idx": 0, "version": "5", - "when": 1771638056482, - "tag": "0000_tense_lilandra", - "breakpoints": true - }, - { - "idx": 1, - "version": "5", - "when": 1771639607782, - "tag": "0001_auth_columns_fix", - "breakpoints": true - }, - { - "idx": 2, - "version": "5", - "when": 1771741800000, - "tag": "0002_worker_created_by", - "breakpoints": true - }, - { - "idx": 3, - "version": "5", - "when": 1773353100000, - "tag": "0003_admin_allowlist", + "when": 1773705583301, + "tag": "0000_baseline", "breakpoints": true }, { @@ -38,4 +17,4 @@ "breakpoints": true } ] -} +} \ No newline at end of file diff --git a/services/den/package.json b/services/den/package.json index cbda6bf2..71274f31 100644 --- a/services/den/package.json +++ b/services/den/package.json @@ -3,15 +3,20 @@ "private": true, "type": "module", "scripts": { - "dev": "OPENWORK_DEV_MODE=1 tsx watch src/index.ts", - "build": "tsc -p tsconfig.json", + "dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/index.ts", + "build": "npm run build:den-db && tsc -p tsconfig.json", + "build:den-db": "npm --prefix ../../packages/den-db run build", "start": "node dist/index.js", + "db:migrate:sql": "node scripts/run-sql-migrations.mjs", + "test:smoke:daytona": "pnpm build && node scripts/daytona-provisioner-smoke.mjs", + "test:e2e:daytona": "node scripts/e2e-daytona-worker.mjs", "test:e2e:worker-limit": "node scripts/e2e-worker-limit.mjs", "db:generate": "drizzle-kit generate", "db:migrate": "drizzle-kit migrate", "auth:generate": "npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes" }, "dependencies": { + "@daytonaio/sdk": "^0.150.0", "better-auth": "^1.4.18", "cors": "^2.8.5", "dotenv": "^16.4.5", diff --git a/services/den/scripts/daytona-provisioner-smoke.mjs b/services/den/scripts/daytona-provisioner-smoke.mjs new file mode 100644 index 00000000..cefdc139 --- /dev/null +++ b/services/den/scripts/daytona-provisioner-smoke.mjs @@ -0,0 +1,129 @@ +import { randomUUID } from "node:crypto" +import { existsSync } from "node:fs" +import { dirname, join, resolve } from "node:path" +import { fileURLToPath } from "node:url" +import { setTimeout as delay } from "node:timers/promises" +import dotenv from "dotenv" +import { Daytona } from "@daytonaio/sdk" + +const __dirname = dirname(fileURLToPath(import.meta.url)) +const serviceDir = resolve(__dirname, "..") +const repoRoot = resolve(serviceDir, "..", "..") + +function findUpwards(startDir, fileName, maxDepth = 8) { + let current = startDir + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + const parent = dirname(current) + if (parent === current) { + break + } + current = parent + } + return null +} + +const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona") +if (daytonaEnvPath) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +process.env.DATABASE_URL ||= "mysql://unused" +process.env.BETTER_AUTH_SECRET ||= "openwork-daytona-local-secret-000000000" +process.env.BETTER_AUTH_URL ||= "http://127.0.0.1" +process.env.CORS_ORIGINS ||= "http://127.0.0.1" +process.env.PROVISIONER_MODE ||= "daytona" + +function log(message, detail) { + if (detail === undefined) { + console.log(message) + return + } + console.log(message, detail) +} + +function fail(message, detail) { + if (detail !== undefined) { + console.error(message, detail) + } else { + console.error(message) + } + process.exit(1) +} + +async function waitForCleanup(daytona, workerId, attempts = 24) { + for (let index = 0; index < attempts; index += 1) { + const sandboxes = await daytona.list( + { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + }, + 1, + 20, + ) + if (sandboxes.items.length === 0) { + return + } + await delay(5000) + } + throw new Error(`cleanup_timeout:${workerId}`) +} + +async function main() { + if (!process.env.DAYTONA_API_KEY) { + fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the smoke test.") + } + + const { provisionWorker, deprovisionWorker } = await import("../dist/workers/provisioner.js") + + const workerId = randomUUID() + const clientToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "") + const hostToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "") + + const instance = await provisionWorker({ + workerId, + name: "daytona-smoke", + hostToken, + clientToken, + }) + + log("Provisioned Daytona worker", instance) + + const workspacesResponse = await fetch(`${instance.url.replace(/\/$/, "")}/workspaces`, { + headers: { + Accept: "application/json", + Authorization: `Bearer ${clientToken}`, + }, + }) + + const workspacesPayload = await workspacesResponse.text() + if (!workspacesResponse.ok) { + fail("Worker /workspaces check failed", { + status: workspacesResponse.status, + body: workspacesPayload, + }) + } + + log("Worker /workspaces responded", workspacesPayload) + + await deprovisionWorker({ + workerId, + instanceUrl: instance.url, + }) + + const daytona = new Daytona({ + apiKey: process.env.DAYTONA_API_KEY, + apiUrl: process.env.DAYTONA_API_URL, + ...(process.env.DAYTONA_TARGET ? { target: process.env.DAYTONA_TARGET } : {}), + }) + + await waitForCleanup(daytona, workerId) + log("Daytona worker cleanup completed", workerId) +} + +main().catch((error) => { + fail(error instanceof Error ? error.message : String(error)) +}) diff --git a/services/den/scripts/e2e-daytona-worker.mjs b/services/den/scripts/e2e-daytona-worker.mjs new file mode 100644 index 00000000..59a60d0d --- /dev/null +++ b/services/den/scripts/e2e-daytona-worker.mjs @@ -0,0 +1,489 @@ +import { randomUUID } from "node:crypto" +import { once } from "node:events" +import { existsSync } from "node:fs" +import net from "node:net" +import { dirname, join, resolve } from "node:path" +import { fileURLToPath } from "node:url" +import { setTimeout as delay } from "node:timers/promises" +import { spawn } from "node:child_process" +import dotenv from "dotenv" +import mysql from "mysql2/promise" +import { Daytona } from "@daytonaio/sdk" + +const __dirname = dirname(fileURLToPath(import.meta.url)) +const serviceDir = resolve(__dirname, "..") +const repoRoot = resolve(serviceDir, "..", "..") + +function log(message) { + process.stdout.write(`${message}\n`) +} + +function fail(message, detail) { + if (detail !== undefined) { + console.error(message, detail) + } else { + console.error(message) + } + process.exit(1) +} + +function findUpwards(startDir, fileName, maxDepth = 8) { + let current = startDir + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + const parent = dirname(current) + if (parent === current) { + break + } + current = parent + } + return null +} + +const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona") +if (daytonaEnvPath) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +function slug(value) { + return value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, "") +} + +function workerHint(workerId) { + return workerId.replace(/-/g, "").slice(0, 12) +} + +function sandboxLabels(workerId) { + return { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + } +} + +function workspaceVolumeName(workerId) { + const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker" + return slug(`${prefix}-${workerHint(workerId)}-workspace`).slice(0, 63) +} + +function dataVolumeName(workerId) { + const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker" + return slug(`${prefix}-${workerHint(workerId)}-data`).slice(0, 63) +} + +async function getFreePort() { + return await new Promise((resolvePort, reject) => { + const server = net.createServer() + server.listen(0, "127.0.0.1", () => { + const address = server.address() + if (!address || typeof address === "string") { + reject(new Error("failed_to_resolve_free_port")) + return + } + server.close((error) => (error ? reject(error) : resolvePort(address.port))) + }) + server.on("error", reject) + }) +} + +function spawnCommand(command, args, options = {}) { + return spawn(command, args, { + cwd: serviceDir, + env: process.env, + stdio: "pipe", + ...options, + }) +} + +async function runCommand(command, args, options = {}) { + const child = spawnCommand(command, args, options) + let stdout = "" + let stderr = "" + child.stdout?.on("data", (chunk) => { + stdout += chunk.toString() + }) + child.stderr?.on("data", (chunk) => { + stderr += chunk.toString() + }) + const [code] = await once(child, "exit") + if (code !== 0) { + throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`) + } + return { stdout, stderr } +} + +async function waitForMysqlConnection(databaseUrl, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + try { + const connection = await mysql.createConnection(databaseUrl) + await connection.query("SELECT 1") + await connection.end() + return + } catch { + await delay(1000) + } + } + throw new Error("mysql_not_ready") +} + +async function waitForHttp(url, attempts = 60, intervalMs = 500) { + for (let index = 0; index < attempts; index += 1) { + try { + const response = await fetch(url) + if (response.ok) { + return response + } + } catch { + // ignore until retries are exhausted + } + await delay(intervalMs) + } + throw new Error(`http_not_ready:${url}`) +} + +async function waitForWorkerReady(baseUrl, workerId, auth, attempts = 180) { + for (let index = 0; index < attempts; index += 1) { + const result = await requestJson(baseUrl, `/v1/workers/${workerId}`, auth) + if (result.response.ok && result.payload?.instance?.url && result.payload?.worker?.status === "healthy") { + return result.payload + } + await delay(5000) + } + throw new Error(`worker_not_ready:${workerId}`) +} + +async function waitForDaytonaCleanup(daytona, workerId, attempts = 60) { + for (let index = 0; index < attempts; index += 1) { + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + const volumes = await daytona.volume.list() + const remainingVolumes = volumes.filter((volume) => + [workspaceVolumeName(workerId), dataVolumeName(workerId)].includes(volume.name), + ) + + if (sandboxes.items.length === 0 && remainingVolumes.length === 0) { + return + } + + await delay(5000) + } + + throw new Error(`daytona_cleanup_incomplete:${workerId}`) +} + +async function forceDeleteDaytonaResources(daytona, workerId) { + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + for (const sandbox of sandboxes.items) { + await sandbox.delete(120).catch(() => {}) + } + + const volumes = await daytona.volume.list() + for (const volumeName of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) { + const volume = volumes.find((entry) => entry.name === volumeName) + if (volume) { + await daytona.volume.delete(volume).catch(() => {}) + } + } +} + +function extractAuthToken(payload) { + if (!payload || typeof payload !== "object") { + return null + } + if (typeof payload.token === "string" && payload.token.trim()) { + return payload.token + } + if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") { + return payload.session.token + } + return null +} + +async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) { + const headers = new Headers() + const origin = process.env.DEN_BROWSER_ORIGIN?.trim() || new URL(baseUrl).origin + headers.set("Accept", "application/json") + headers.set("Origin", origin) + headers.set("Referer", `${origin}/`) + if (body !== undefined) { + headers.set("Content-Type", "application/json") + } + if (token) { + headers.set("Authorization", `Bearer ${token}`) + } + if (cookie) { + headers.set("Cookie", cookie) + } + + const response = await fetch(`${baseUrl}${path}`, { + method, + headers, + body: body === undefined ? undefined : JSON.stringify(body), + }) + + const text = await response.text() + let payload = null + if (text) { + try { + payload = JSON.parse(text) + } catch { + payload = text + } + } + + return { + response, + payload, + cookie: response.headers.get("set-cookie"), + } +} + +async function main() { + if (!process.env.DAYTONA_API_KEY) { + fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the test.") + } + + const existingBaseUrl = process.env.DEN_BASE_URL?.trim() || process.env.DEN_API_URL?.trim() || "" + const mysqlPort = existingBaseUrl ? null : await getFreePort() + const appPort = existingBaseUrl ? null : await getFreePort() + const containerName = existingBaseUrl + ? null + : `openwork-den-daytona-${randomUUID().slice(0, 8)}` + const dbName = "openwork_den_daytona_e2e" + const dbPassword = "openwork-root" + const baseUrl = existingBaseUrl || `http://127.0.0.1:${appPort}` + const databaseUrl = mysqlPort + ? `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}` + : null + const runtimeEnv = { + ...process.env, + ...(databaseUrl ? { DATABASE_URL: databaseUrl } : {}), + BETTER_AUTH_SECRET: "openwork-den-daytona-secret-0000000000", + BETTER_AUTH_URL: baseUrl, + ...(appPort ? { PORT: String(appPort) } : {}), + CORS_ORIGINS: baseUrl, + PROVISIONER_MODE: "daytona", + POLAR_FEATURE_GATE_ENABLED: "false", + OPENWORK_DAYTONA_ENV_PATH: daytonaEnvPath || process.env.OPENWORK_DAYTONA_ENV_PATH || "", + } + + const daytona = new Daytona({ + apiKey: runtimeEnv.DAYTONA_API_KEY, + apiUrl: runtimeEnv.DAYTONA_API_URL, + ...(runtimeEnv.DAYTONA_TARGET ? { target: runtimeEnv.DAYTONA_TARGET } : {}), + }) + + let serviceProcess = null + let workerId = null + + const cleanup = async () => { + if (workerId) { + try { + await forceDeleteDaytonaResources(daytona, workerId) + } catch { + // cleanup best effort only + } + } + + if (serviceProcess && !serviceProcess.killed) { + serviceProcess.kill("SIGINT") + await once(serviceProcess, "exit").catch(() => {}) + } + + if (containerName) { + await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {}) + } + } + + process.on("SIGINT", async () => { + await cleanup() + process.exit(130) + }) + + try { + if (containerName && mysqlPort && databaseUrl && appPort) { + log("Starting disposable MySQL container...") + await runCommand("docker", [ + "run", + "-d", + "--rm", + "--name", + containerName, + "-e", + `MYSQL_ROOT_PASSWORD=${dbPassword}`, + "-e", + `MYSQL_DATABASE=${dbName}`, + "-p", + `${mysqlPort}:3306`, + "mysql:8.4", + ]) + + log("Waiting for MySQL...") + await waitForMysqlConnection(databaseUrl) + + log("Running Den migrations...") + await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env: runtimeEnv }) + + log("Starting Den service with Daytona provisioner...") + serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], { + cwd: serviceDir, + env: runtimeEnv, + stdio: "pipe", + }) + + let serviceOutput = "" + serviceProcess.stdout?.on("data", (chunk) => { + serviceOutput += chunk.toString() + }) + serviceProcess.stderr?.on("data", (chunk) => { + serviceOutput += chunk.toString() + }) + + serviceProcess.on("exit", (code) => { + if (code !== 0) { + console.error(serviceOutput) + } + }) + } else { + log(`Using existing Den API at ${baseUrl}`) + } + + await waitForHttp(`${baseUrl}/health`) + + const email = `den-daytona-${Date.now()}@example.com` + const password = "TestPass123!" + + log("Creating account...") + const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", { + method: "POST", + body: { + name: "Den Daytona E2E", + email, + password, + }, + }) + + if (!signup.response.ok) { + fail("Signup failed", signup.payload) + } + + const token = extractAuthToken(signup.payload) + const cookie = signup.cookie + if (!token && !cookie) { + fail("Signup did not return a bearer token or session cookie", signup.payload) + } + + const auth = { token, cookie } + + log("Validating authenticated session...") + const me = await requestJson(baseUrl, "/v1/me", auth) + if (!me.response.ok) { + fail("Session lookup failed", me.payload) + } + + log("Creating Daytona-backed cloud worker...") + const createWorker = await requestJson(baseUrl, "/v1/workers", { + method: "POST", + ...auth, + body: { + name: "daytona-worker", + destination: "cloud", + }, + }) + + if (createWorker.response.status !== 202) { + fail("Worker creation did not return async launch", { + status: createWorker.response.status, + payload: createWorker.payload, + }) + } + + workerId = createWorker.payload?.worker?.id || null + if (!workerId) { + fail("Worker response did not include an id", createWorker.payload) + } + + log("Waiting for worker provisioning to finish...") + const workerPayload = await waitForWorkerReady(baseUrl, workerId, auth) + if (workerPayload.instance.provider !== "daytona") { + fail("Worker instance did not report the Daytona provider", workerPayload) + } + + log("Checking worker health endpoint...") + await waitForHttp(`${workerPayload.instance.url.replace(/\/$/, "")}/health`, 120, 5000) + + log("Checking OpenWork connect metadata...") + const tokensResponse = await requestJson(baseUrl, `/v1/workers/${workerId}/tokens`, { + method: "POST", + ...auth, + }) + if (!tokensResponse.response.ok || !tokensResponse.payload?.connect?.openworkUrl) { + fail("Worker tokens/connect payload missing", tokensResponse.payload) + } + + const clientToken = tokensResponse.payload.tokens?.client + if (!clientToken) { + fail("Client token missing from worker token payload", tokensResponse.payload) + } + + const connectHeaders = { + Accept: "application/json", + Authorization: `Bearer ${clientToken}`, + } + const statusResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/status`, { + headers: connectHeaders, + }) + if (!statusResponse.ok) { + fail("Connected worker /status failed", await statusResponse.text()) + } + + const capabilitiesResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/capabilities`, { + headers: connectHeaders, + }) + if (!capabilitiesResponse.ok) { + fail("Connected worker /capabilities failed", await capabilitiesResponse.text()) + } + + log("Verifying Daytona resources exist...") + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + if (sandboxes.items.length === 0) { + fail("Expected a Daytona sandbox for the worker but none were found") + } + const volumes = await daytona.volume.list() + const expectedVolumeNames = [workspaceVolumeName(workerId), dataVolumeName(workerId)] + const missingVolumes = expectedVolumeNames.filter( + (name) => !volumes.some((volume) => volume.name === name), + ) + if (missingVolumes.length > 0) { + fail("Expected Daytona volumes were not created", missingVolumes) + } + + log("Deleting worker and waiting for Daytona cleanup...") + const deleteResponse = await requestJson(baseUrl, `/v1/workers/${workerId}`, { + method: "DELETE", + ...auth, + }) + if (deleteResponse.response.status !== 204) { + fail("Worker deletion failed", { + status: deleteResponse.response.status, + payload: deleteResponse.payload, + }) + } + + await waitForDaytonaCleanup(daytona, workerId) + workerId = null + + log("Daytona worker flow passed.") + } finally { + await cleanup() + } +} + +main().catch((error) => { + fail(error instanceof Error ? error.message : String(error)) +}) diff --git a/services/den/scripts/e2e-worker-limit.mjs b/services/den/scripts/e2e-worker-limit.mjs index 9380fefd..41641d87 100644 --- a/services/den/scripts/e2e-worker-limit.mjs +++ b/services/den/scripts/e2e-worker-limit.mjs @@ -165,6 +165,7 @@ async function main() { BETTER_AUTH_SECRET: "openwork-den-e2e-secret-000000000000", BETTER_AUTH_URL: baseUrl, PORT: String(appPort), + OPENWORK_DEV_MODE: "1", CORS_ORIGINS: baseUrl, PROVISIONER_MODE: "stub", WORKER_URL_TEMPLATE: "https://workers.example.com/{workerId}", @@ -306,15 +307,15 @@ async function main() { }, }); - if (secondWorker.response.status !== 409) { - fail("Second worker was not blocked by the one-worker limit", { + if (secondWorker.response.status !== 202) { + fail("Second worker should be allowed in dev mode", { status: secondWorker.response.status, payload: secondWorker.payload, }); } - if (!secondWorker.payload || secondWorker.payload.error !== "worker_limit_reached") { - fail("Second worker returned the wrong error payload", secondWorker.payload); + if (!secondWorker.payload?.worker?.id) { + fail("Second worker did not return a worker payload", secondWorker.payload); } log("Listing workers..."); @@ -324,11 +325,11 @@ async function main() { } const items = Array.isArray(workers.payload?.workers) ? workers.payload.workers : null; - if (!items || items.length !== 1) { - fail("Expected exactly one worker after limit enforcement", workers.payload); + if (!items || items.length !== 2) { + fail("Expected two cloud workers in dev mode", workers.payload); } - log("E2E worker limit check passed."); + log("E2E dev worker limit check passed."); } finally { await cleanup(); } diff --git a/services/den/scripts/run-sql-migrations.mjs b/services/den/scripts/run-sql-migrations.mjs new file mode 100644 index 00000000..e0a85cbf --- /dev/null +++ b/services/den/scripts/run-sql-migrations.mjs @@ -0,0 +1,87 @@ +import { readdir, readFile } from "node:fs/promises" +import path from "node:path" +import { fileURLToPath } from "node:url" +import mysql from "mysql2/promise" + +const __dirname = path.dirname(fileURLToPath(import.meta.url)) +const drizzleDir = path.resolve(__dirname, "..", "drizzle") + +function splitStatements(sql) { + return sql + .split(/--> statement-breakpoint/g) + .map((part) => part.trim()) + .filter(Boolean) +} + +async function ensureMigrationsTable(connection) { + await connection.query(` + CREATE TABLE IF NOT EXISTS __den_migrations ( + id varchar(255) NOT NULL PRIMARY KEY, + applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP + ) + `) +} + +async function appliedMigrations(connection) { + const [rows] = await connection.query("SELECT id FROM __den_migrations") + return new Set(rows.map((row) => row.id)) +} + +function connectionConfigFromEnv() { + const databaseUrl = process.env.DATABASE_URL?.trim() + if (databaseUrl) { + return databaseUrl + } + + const host = process.env.DATABASE_HOST?.trim() + const user = process.env.DATABASE_USERNAME?.trim() + const password = process.env.DATABASE_PASSWORD ?? "" + + if (!host || !user) { + throw new Error("DATABASE_URL or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD is required") + } + + return { + host, + user, + password, + ssl: { + rejectUnauthorized: true, + }, + } +} + +async function run() { + const connection = await mysql.createConnection(connectionConfigFromEnv()) + + try { + await ensureMigrationsTable(connection) + const completed = await appliedMigrations(connection) + const files = (await readdir(drizzleDir)) + .filter((file) => file.endsWith(".sql")) + .sort((left, right) => left.localeCompare(right)) + + for (const file of files) { + if (completed.has(file)) { + continue + } + + const sql = await readFile(path.join(drizzleDir, file), "utf8") + const statements = splitStatements(sql) + + for (const statement of statements) { + await connection.query(statement) + } + + await connection.query("INSERT INTO __den_migrations (id) VALUES (?)", [file]) + process.stdout.write(`[den] Applied migration ${file}\n`) + } + } finally { + await connection.end() + } +} + +run().catch((error) => { + console.error(error instanceof Error ? error.stack ?? error.message : String(error)) + process.exit(1) +}) diff --git a/services/den/src/admin-allowlist.ts b/services/den/src/admin-allowlist.ts index 40d238b3..d5bd0b4f 100644 --- a/services/den/src/admin-allowlist.ts +++ b/services/den/src/admin-allowlist.ts @@ -1,34 +1,37 @@ -import { inArray, sql } from "drizzle-orm" +import { sql } from "./db/drizzle.js" import { db } from "./db/index.js" import { AdminAllowlistTable } from "./db/schema.js" +import { createDenTypeId } from "./db/typeid.js" const ADMIN_ALLOWLIST_SEEDS = [ { - id: "admin-ben-openworklabs-com", email: "ben@openworklabs.com", note: "Seeded internal admin", }, { - id: "admin-berk-openworklabs-com", + email: "jan@openworklabs.com", + note: "Seeded internal admin", + }, + { + email: "omar@openworklabs.com", + note: "Seeded internal admin", + }, + { email: "berk@openworklabs.com", note: "Seeded internal admin", }, ] as const -const MANAGED_ADMIN_ALLOWLIST_IDS = [ - "admin-ben-openworklabs-com", - "admin-jan-openworklabs-com", - "admin-omar-openworklabs-com", - "admin-berk-openworklabs-com", -] as const - let ensureAdminAllowlistSeededPromise: Promise | null = null async function seedAdminAllowlist() { for (const entry of ADMIN_ALLOWLIST_SEEDS) { await db .insert(AdminAllowlistTable) - .values(entry) + .values({ + id: createDenTypeId("adminAllowlist"), + ...entry, + }) .onDuplicateKeyUpdate({ set: { note: entry.note, @@ -36,13 +39,6 @@ async function seedAdminAllowlist() { }, }) } - - const activeSeedIds = new Set(ADMIN_ALLOWLIST_SEEDS.map((entry) => entry.id)) - const staleSeedIds = MANAGED_ADMIN_ALLOWLIST_IDS.filter((id) => !activeSeedIds.has(id)) - - if (staleSeedIds.length > 0) { - await db.delete(AdminAllowlistTable).where(inArray(AdminAllowlistTable.id, staleSeedIds)) - } } export async function ensureAdminAllowlistSeeded() { diff --git a/services/den/src/auth.ts b/services/den/src/auth.ts index efc3b3c8..93917653 100644 --- a/services/den/src/auth.ts +++ b/services/den/src/auth.ts @@ -2,6 +2,7 @@ import { betterAuth } from "better-auth" import { drizzleAdapter } from "better-auth/adapters/drizzle" import { db } from "./db/index.js" import * as schema from "./db/schema.js" +import { createDenTypeId, normalizeDenTypeId } from "./db/typeid.js" import { env } from "./env.js" import { ensureDefaultOrg } from "./orgs.js" @@ -33,6 +34,24 @@ export const auth = betterAuth({ provider: "mysql", schema, }), + advanced: { + database: { + generateId: (options) => { + switch (options.model) { + case "user": + return createDenTypeId("user") + case "session": + return createDenTypeId("session") + case "account": + return createDenTypeId("account") + case "verification": + return createDenTypeId("verification") + default: + return false + } + }, + }, + }, emailAndPassword: { enabled: true, }, @@ -41,7 +60,7 @@ export const auth = betterAuth({ create: { after: async (user) => { const name = user.name ?? user.email ?? "Personal" - await ensureDefaultOrg(user.id, name) + await ensureDefaultOrg(normalizeDenTypeId("user", user.id), name) }, }, }, diff --git a/services/den/src/db/drizzle.ts b/services/den/src/db/drizzle.ts new file mode 100644 index 00000000..54698bf6 --- /dev/null +++ b/services/den/src/db/drizzle.ts @@ -0,0 +1 @@ +export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "../../../../packages/den-db/dist/drizzle.js" diff --git a/services/den/src/db/index.ts b/services/den/src/db/index.ts index 86437537..a305df7e 100644 --- a/services/den/src/db/index.ts +++ b/services/den/src/db/index.ts @@ -1,129 +1,9 @@ -import { drizzle } from "drizzle-orm/mysql2" -import type { FieldPacket, QueryOptions, QueryResult } from "mysql2" -import mysql from "mysql2/promise" +import { createDenDb, isTransientDbConnectionError } from "../../../../packages/den-db/dist/index.js" import { env } from "../env.js" -import * as schema from "./schema.js" -const TRANSIENT_DB_ERROR_CODES = new Set([ - "ECONNRESET", - "EPIPE", - "ETIMEDOUT", - "PROTOCOL_CONNECTION_LOST", - "PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR", -]) - -const RETRYABLE_QUERY_PREFIXES = ["select", "show", "describe", "explain"] - -function isRecord(value: unknown): value is Record { - return typeof value === "object" && value !== null -} - -function getErrorCode(error: unknown): string | null { - if (!isRecord(error)) { - return null - } - - if (typeof error.code === "string") { - return error.code - } - - return getErrorCode(error.cause) -} - -function isTransientDbConnectionError(error: unknown): boolean { - const code = getErrorCode(error) - if (!code) { - return false - } - return TRANSIENT_DB_ERROR_CODES.has(code) -} - -function extractSql(value: unknown): string | null { - if (typeof value === "string") { - return value - } - - if (!isRecord(value)) { - return null - } - - if (typeof value.sql === "string") { - return value.sql - } - - return null -} - -function isRetryableReadQuery(sql: string | null): boolean { - if (!sql) { - return false - } - - const normalized = sql.trimStart().toLowerCase() - return RETRYABLE_QUERY_PREFIXES.some((prefix) => normalized.startsWith(prefix)) -} - -async function retryReadQuery(label: "query" | "execute", sql: string | null, run: () => Promise): Promise { - try { - return await run() - } catch (error) { - if (!isRetryableReadQuery(sql) || !isTransientDbConnectionError(error)) { - throw error - } - - const queryType = sql?.trimStart().split(/\s+/, 1)[0]?.toUpperCase() ?? "QUERY" - console.warn(`[db] transient mysql error on ${label} (${queryType}); retrying once`) - return run() - } -} - -const client = mysql.createPool({ - uri: env.databaseUrl, - waitForConnections: true, - connectionLimit: 10, - maxIdle: 10, - idleTimeout: 60_000, - queueLimit: 0, - enableKeepAlive: true, - keepAliveInitialDelay: 0, +export const { db } = createDenDb({ + databaseUrl: env.databaseUrl, + mode: env.dbMode, + planetscale: env.planetscale, }) - -const query = client.query.bind(client) - -async function retryingQuery(sql: string): Promise<[T, FieldPacket[]]> -async function retryingQuery(sql: string, values: unknown): Promise<[T, FieldPacket[]]> -async function retryingQuery(options: QueryOptions): Promise<[T, FieldPacket[]]> -async function retryingQuery( - options: QueryOptions, - values: unknown, -): Promise<[T, FieldPacket[]]> -async function retryingQuery( - sqlOrOptions: string | QueryOptions, - values?: unknown, -): Promise<[T, FieldPacket[]]> { - const sql = extractSql(sqlOrOptions) - return retryReadQuery("query", sql, () => query(sqlOrOptions as never, values as never)) -} - -client.query = retryingQuery - -const execute = client.execute.bind(client) - -async function retryingExecute(sql: string): Promise<[T, FieldPacket[]]> -async function retryingExecute(sql: string, values: unknown): Promise<[T, FieldPacket[]]> -async function retryingExecute(options: QueryOptions): Promise<[T, FieldPacket[]]> -async function retryingExecute( - options: QueryOptions, - values: unknown, -): Promise<[T, FieldPacket[]]> -async function retryingExecute( - sqlOrOptions: string | QueryOptions, - values?: unknown, -): Promise<[T, FieldPacket[]]> { - const sql = extractSql(sqlOrOptions) - return retryReadQuery("execute", sql, () => execute(sqlOrOptions as never, values as never)) -} - -client.execute = retryingExecute - -export const db = drizzle(client, { schema, mode: "default" }) +export { isTransientDbConnectionError } diff --git a/services/den/src/db/schema.ts b/services/den/src/db/schema.ts index e95652ea..e51175cf 100644 --- a/services/den/src/db/schema.ts +++ b/services/den/src/db/schema.ts @@ -1,230 +1 @@ -import { sql } from "drizzle-orm" -import { - boolean, - index, - json, - mysqlEnum, - mysqlTable, - text, - timestamp, - uniqueIndex, - varchar, -} from "drizzle-orm/mysql-core" - -const id = () => varchar("id", { length: 64 }).notNull() - -const timestamps = { - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - updated_at: timestamp("updated_at", { fsp: 3 }) - .notNull() - .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), -} - -export const OrgRole = ["owner", "member"] as const -export const WorkerDestination = ["local", "cloud"] as const -export const WorkerStatus = ["provisioning", "healthy", "failed", "stopped"] as const -export const TokenScope = ["client", "host"] as const - -export const AuthUserTable = mysqlTable( - "user", - { - id: varchar("id", { length: 36 }).notNull().primaryKey(), - name: varchar("name", { length: 255 }).notNull(), - email: varchar("email", { length: 255 }).notNull(), - emailVerified: boolean("email_verified").notNull().default(false), - image: text("image"), - createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - updatedAt: timestamp("updated_at", { fsp: 3 }) - .notNull() - .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), - }, - (table) => [uniqueIndex("user_email").on(table.email)], -) - -export const AuthSessionTable = mysqlTable( - "session", - { - id: varchar("id", { length: 36 }).notNull().primaryKey(), - userId: varchar("user_id", { length: 36 }).notNull(), - token: varchar("token", { length: 255 }).notNull(), - expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(), - ipAddress: text("ip_address"), - userAgent: text("user_agent"), - createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - updatedAt: timestamp("updated_at", { fsp: 3 }) - .notNull() - .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), - }, - (table) => [ - uniqueIndex("session_token").on(table.token), - index("session_user_id").on(table.userId), - ], -) - -export const AuthAccountTable = mysqlTable( - "account", - { - id: varchar("id", { length: 36 }).notNull().primaryKey(), - userId: varchar("user_id", { length: 36 }).notNull(), - accountId: text("account_id").notNull(), - providerId: text("provider_id").notNull(), - accessToken: text("access_token"), - refreshToken: text("refresh_token"), - accessTokenExpiresAt: timestamp("access_token_expires_at", { fsp: 3 }), - refreshTokenExpiresAt: timestamp("refresh_token_expires_at", { fsp: 3 }), - scope: text("scope"), - idToken: text("id_token"), - password: text("password"), - createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - updatedAt: timestamp("updated_at", { fsp: 3 }) - .notNull() - .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), - }, - (table) => [index("account_user_id").on(table.userId)], -) - -export const AuthVerificationTable = mysqlTable( - "verification", - { - id: varchar("id", { length: 36 }).notNull().primaryKey(), - identifier: varchar("identifier", { length: 255 }).notNull(), - value: text("value").notNull(), - expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(), - createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - updatedAt: timestamp("updated_at", { fsp: 3 }) - .notNull() - .default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`), - }, - (table) => [index("verification_identifier").on(table.identifier)], -) - -export const user = AuthUserTable -export const session = AuthSessionTable -export const account = AuthAccountTable -export const verification = AuthVerificationTable - -export const DesktopHandoffGrantTable = mysqlTable( - "desktop_handoff_grant", - { - id: id().primaryKey(), - user_id: varchar("user_id", { length: 64 }).notNull(), - session_token: text("session_token").notNull(), - expires_at: timestamp("expires_at", { fsp: 3 }).notNull(), - consumed_at: timestamp("consumed_at", { fsp: 3 }), - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - }, - (table) => [index("desktop_handoff_grant_user_id").on(table.user_id), index("desktop_handoff_grant_expires_at").on(table.expires_at)], -) - -export const OrgTable = mysqlTable( - "org", - { - id: id().primaryKey(), - name: varchar("name", { length: 255 }).notNull(), - slug: varchar("slug", { length: 255 }).notNull(), - owner_user_id: varchar("owner_user_id", { length: 64 }).notNull(), - ...timestamps, - }, - (table) => [uniqueIndex("org_slug").on(table.slug), index("org_owner_user_id").on(table.owner_user_id)], -) - -export const OrgMembershipTable = mysqlTable( - "org_membership", - { - id: id().primaryKey(), - org_id: varchar("org_id", { length: 64 }).notNull(), - user_id: varchar("user_id", { length: 64 }).notNull(), - role: mysqlEnum("role", OrgRole).notNull(), - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - }, - (table) => [index("org_membership_org_id").on(table.org_id), index("org_membership_user_id").on(table.user_id)], -) - -export const AdminAllowlistTable = mysqlTable( - "admin_allowlist", - { - id: id().primaryKey(), - email: varchar("email", { length: 255 }).notNull(), - note: varchar("note", { length: 255 }), - ...timestamps, - }, - (table) => [uniqueIndex("admin_allowlist_email").on(table.email)], -) - -export const WorkerTable = mysqlTable( - "worker", - { - id: id().primaryKey(), - org_id: varchar("org_id", { length: 64 }).notNull(), - created_by_user_id: varchar("created_by_user_id", { length: 64 }), - name: varchar("name", { length: 255 }).notNull(), - description: varchar("description", { length: 1024 }), - destination: mysqlEnum("destination", WorkerDestination).notNull(), - status: mysqlEnum("status", WorkerStatus).notNull(), - image_version: varchar("image_version", { length: 128 }), - workspace_path: varchar("workspace_path", { length: 1024 }), - sandbox_backend: varchar("sandbox_backend", { length: 64 }), - ...timestamps, - }, - (table) => [ - index("worker_org_id").on(table.org_id), - index("worker_created_by_user_id").on(table.created_by_user_id), - index("worker_status").on(table.status), - ], -) - -export const WorkerInstanceTable = mysqlTable( - "worker_instance", - { - id: id().primaryKey(), - worker_id: varchar("worker_id", { length: 64 }).notNull(), - provider: varchar("provider", { length: 64 }).notNull(), - region: varchar("region", { length: 64 }), - url: varchar("url", { length: 2048 }).notNull(), - status: mysqlEnum("status", WorkerStatus).notNull(), - ...timestamps, - }, - (table) => [index("worker_instance_worker_id").on(table.worker_id)], -) - -export const WorkerTokenTable = mysqlTable( - "worker_token", - { - id: id().primaryKey(), - worker_id: varchar("worker_id", { length: 64 }).notNull(), - scope: mysqlEnum("scope", TokenScope).notNull(), - token: varchar("token", { length: 128 }).notNull(), - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - revoked_at: timestamp("revoked_at", { fsp: 3 }), - }, - (table) => [ - index("worker_token_worker_id").on(table.worker_id), - uniqueIndex("worker_token_token").on(table.token), - ], -) - -export const WorkerBundleTable = mysqlTable( - "worker_bundle", - { - id: id().primaryKey(), - worker_id: varchar("worker_id", { length: 64 }).notNull(), - storage_url: varchar("storage_url", { length: 2048 }).notNull(), - status: varchar("status", { length: 64 }).notNull(), - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - }, - (table) => [index("worker_bundle_worker_id").on(table.worker_id)], -) - -export const AuditEventTable = mysqlTable( - "audit_event", - { - id: id().primaryKey(), - org_id: varchar("org_id", { length: 64 }).notNull(), - worker_id: varchar("worker_id", { length: 64 }), - actor_user_id: varchar("actor_user_id", { length: 64 }).notNull(), - action: varchar("action", { length: 128 }).notNull(), - payload: json("payload"), - created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(), - }, - (table) => [index("audit_event_org_id").on(table.org_id), index("audit_event_worker_id").on(table.worker_id)], -) +export * from "../../../../packages/den-db/dist/schema.js" diff --git a/services/den/src/db/typeid.ts b/services/den/src/db/typeid.ts new file mode 100644 index 00000000..c966ad51 --- /dev/null +++ b/services/den/src/db/typeid.ts @@ -0,0 +1 @@ +export * from "../../../../packages/utils/dist/typeid.js" diff --git a/services/den/src/env.ts b/services/den/src/env.ts index 46c4b170..a9f40a51 100644 --- a/services/den/src/env.ts +++ b/services/den/src/env.ts @@ -1,7 +1,11 @@ import { z } from "zod"; const schema = z.object({ - DATABASE_URL: z.string().min(1), + DATABASE_URL: z.string().min(1).optional(), + DATABASE_HOST: z.string().min(1).optional(), + DATABASE_USERNAME: z.string().min(1).optional(), + DATABASE_PASSWORD: z.string().optional(), + DB_MODE: z.enum(["mysql", "planetscale"]).optional(), BETTER_AUTH_SECRET: z.string().min(32), BETTER_AUTH_URL: z.string().min(1), DEN_BETTER_AUTH_TRUSTED_ORIGINS: z.string().optional(), @@ -10,9 +14,12 @@ const schema = z.object({ GOOGLE_CLIENT_ID: z.string().optional(), GOOGLE_CLIENT_SECRET: z.string().optional(), PORT: z.string().optional(), + WORKER_PROXY_PORT: z.string().optional(), + OPENWORK_DEV_MODE: z.string().optional(), CORS_ORIGINS: z.string().optional(), - PROVISIONER_MODE: z.enum(["stub", "render"]).optional(), + PROVISIONER_MODE: z.enum(["stub", "render", "daytona"]).optional(), WORKER_URL_TEMPLATE: z.string().optional(), + OPENWORK_DAYTONA_ENV_PATH: z.string().optional(), RENDER_API_BASE: z.string().optional(), RENDER_API_KEY: z.string().optional(), RENDER_OWNER_ID: z.string().optional(), @@ -40,10 +47,65 @@ const schema = z.object({ POLAR_BENEFIT_ID: z.string().optional(), POLAR_SUCCESS_URL: z.string().optional(), POLAR_RETURN_URL: z.string().optional(), + DAYTONA_API_URL: z.string().optional(), + DAYTONA_API_KEY: z.string().optional(), + DAYTONA_TARGET: z.string().optional(), + DAYTONA_SNAPSHOT: z.string().optional(), + DAYTONA_SANDBOX_IMAGE: z.string().optional(), + DAYTONA_SANDBOX_CPU: z.string().optional(), + DAYTONA_SANDBOX_MEMORY: z.string().optional(), + DAYTONA_SANDBOX_DISK: z.string().optional(), + DAYTONA_SANDBOX_PUBLIC: z.string().optional(), + DAYTONA_SANDBOX_AUTO_STOP_INTERVAL: z.string().optional(), + DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL: z.string().optional(), + DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL: z.string().optional(), + DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(), + DAYTONA_WORKER_PROXY_BASE_URL: z.string().optional(), + DAYTONA_SANDBOX_NAME_PREFIX: z.string().optional(), + DAYTONA_VOLUME_NAME_PREFIX: z.string().optional(), + DAYTONA_WORKSPACE_MOUNT_PATH: z.string().optional(), + DAYTONA_DATA_MOUNT_PATH: z.string().optional(), + DAYTONA_RUNTIME_WORKSPACE_PATH: z.string().optional(), + DAYTONA_RUNTIME_DATA_PATH: z.string().optional(), + DAYTONA_SIDECAR_DIR: z.string().optional(), + DAYTONA_OPENWORK_PORT: z.string().optional(), + DAYTONA_OPENCODE_PORT: z.string().optional(), + DAYTONA_OPENWORK_VERSION: z.string().optional(), + DAYTONA_CREATE_TIMEOUT_SECONDS: z.string().optional(), + DAYTONA_DELETE_TIMEOUT_SECONDS: z.string().optional(), + DAYTONA_HEALTHCHECK_TIMEOUT_MS: z.string().optional(), + DAYTONA_POLL_INTERVAL_MS: z.string().optional(), +}).superRefine((value, ctx) => { + const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale") + + if (inferredMode === "mysql" && !value.DATABASE_URL) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "DATABASE_URL is required when using mysql mode", + path: ["DATABASE_URL"], + }) + } + + if (inferredMode === "planetscale") { + for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) { + if (!value[key]) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: `${key} is required when using planetscale mode`, + path: [key], + }) + } + } + } }); const parsed = schema.parse(process.env); +function optionalString(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + function normalizeOrigin(origin: string): string { const value = origin.trim(); if (value === "*") { @@ -66,11 +128,26 @@ const betterAuthTrustedOrigins = const polarFeatureGateEnabled = (parsed.POLAR_FEATURE_GATE_ENABLED ?? "false").toLowerCase() === "true"; +const daytonaSandboxPublic = + (parsed.DAYTONA_SANDBOX_PUBLIC ?? "false").toLowerCase() === "true"; + +const planetscaleCredentials = + parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined + ? { + host: parsed.DATABASE_HOST, + username: parsed.DATABASE_USERNAME, + password: parsed.DATABASE_PASSWORD, + } + : null + export const env = { databaseUrl: parsed.DATABASE_URL, + dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"), + planetscale: planetscaleCredentials, betterAuthSecret: parsed.BETTER_AUTH_SECRET, betterAuthUrl: parsed.BETTER_AUTH_URL, betterAuthTrustedOrigins, + devMode: (parsed.OPENWORK_DEV_MODE ?? "0").trim() === "1", github: { clientId: parsed.GITHUB_CLIENT_ID?.trim() || undefined, clientSecret: parsed.GITHUB_CLIENT_SECRET?.trim() || undefined, @@ -80,6 +157,7 @@ export const env = { clientSecret: parsed.GOOGLE_CLIENT_SECRET?.trim() || undefined, }, port: Number(parsed.PORT ?? "8788"), + workerProxyPort: Number(parsed.WORKER_PROXY_PORT ?? "8789"), corsOrigins: corsOrigins ?? [], provisionerMode: parsed.PROVISIONER_MODE ?? "stub", workerUrlTemplate: parsed.WORKER_URL_TEMPLATE, @@ -122,4 +200,54 @@ export const env = { successUrl: parsed.POLAR_SUCCESS_URL, returnUrl: parsed.POLAR_RETURN_URL, }, + daytona: { + envPath: optionalString(parsed.OPENWORK_DAYTONA_ENV_PATH), + apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api", + apiKey: optionalString(parsed.DAYTONA_API_KEY), + target: optionalString(parsed.DAYTONA_TARGET), + snapshot: optionalString(parsed.DAYTONA_SNAPSHOT), + image: optionalString(parsed.DAYTONA_SANDBOX_IMAGE) ?? "node:20-bookworm", + resources: { + cpu: Number(parsed.DAYTONA_SANDBOX_CPU ?? "2"), + memory: Number(parsed.DAYTONA_SANDBOX_MEMORY ?? "4"), + disk: Number(parsed.DAYTONA_SANDBOX_DISK ?? "8"), + }, + public: daytonaSandboxPublic, + autoStopInterval: Number(parsed.DAYTONA_SANDBOX_AUTO_STOP_INTERVAL ?? "0"), + autoArchiveInterval: Number( + parsed.DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL ?? "10080", + ), + autoDeleteInterval: Number( + parsed.DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL ?? "-1", + ), + signedPreviewExpiresSeconds: Number( + parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400", + ), + workerProxyBaseUrl: + optionalString(parsed.DAYTONA_WORKER_PROXY_BASE_URL) ?? "https://workers.den.openworklabs", + sandboxNamePrefix: + optionalString(parsed.DAYTONA_SANDBOX_NAME_PREFIX) ?? "den-daytona-worker", + volumeNamePrefix: + optionalString(parsed.DAYTONA_VOLUME_NAME_PREFIX) ?? "den-daytona-worker", + workspaceMountPath: + optionalString(parsed.DAYTONA_WORKSPACE_MOUNT_PATH) ?? "/workspace", + dataMountPath: + optionalString(parsed.DAYTONA_DATA_MOUNT_PATH) ?? "/persist/openwork", + runtimeWorkspacePath: + optionalString(parsed.DAYTONA_RUNTIME_WORKSPACE_PATH) ?? + "/tmp/openwork-workspace", + runtimeDataPath: + optionalString(parsed.DAYTONA_RUNTIME_DATA_PATH) ?? "/tmp/openwork-data", + sidecarDir: + optionalString(parsed.DAYTONA_SIDECAR_DIR) ?? "/tmp/openwork-sidecars", + openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"), + opencodePort: Number(parsed.DAYTONA_OPENCODE_PORT ?? "4096"), + openworkVersion: optionalString(parsed.DAYTONA_OPENWORK_VERSION), + createTimeoutSeconds: Number(parsed.DAYTONA_CREATE_TIMEOUT_SECONDS ?? "300"), + deleteTimeoutSeconds: Number(parsed.DAYTONA_DELETE_TIMEOUT_SECONDS ?? "120"), + healthcheckTimeoutMs: Number( + parsed.DAYTONA_HEALTHCHECK_TIMEOUT_MS ?? "300000", + ), + pollIntervalMs: Number(parsed.DAYTONA_POLL_INTERVAL_MS ?? "5000"), + }, }; diff --git a/services/den/src/http/admin.ts b/services/den/src/http/admin.ts index f3ab7c65..cfbdcbbb 100644 --- a/services/den/src/http/admin.ts +++ b/services/den/src/http/admin.ts @@ -1,11 +1,15 @@ import express from "express" -import { asc, desc, eq, isNotNull, sql } from "drizzle-orm" +import { fromNodeHeaders } from "better-auth/node" +import { asc, desc, eq, isNotNull, sql } from "../db/drizzle.js" import { ensureAdminAllowlistSeeded } from "../admin-allowlist.js" +import { auth } from "../auth.js" import { getCloudWorkerAdminBillingStatus } from "../billing/polar.js" import { db } from "../db/index.js" import { AdminAllowlistTable, AuthAccountTable, AuthSessionTable, AuthUserTable, WorkerTable } from "../db/schema.js" +import { normalizeDenTypeId } from "../db/typeid.js" import { asyncRoute } from "./errors.js" -import { getRequestSession } from "./session.js" + +type UserId = typeof AuthUserTable.$inferSelect.id function normalizeEmail(value: string | null | undefined) { return value?.trim().toLowerCase() ?? "" @@ -82,13 +86,17 @@ async function mapWithConcurrency(items: T[], limit: number, mapper: (item } async function requireAdminSession(req: express.Request, res: express.Response) { - const session = await getRequestSession(req) + const session = await auth.api.getSession({ + headers: fromNodeHeaders(req.headers), + }) if (!session?.user?.id) { res.status(401).json({ error: "unauthorized" }) return null } + const userId = normalizeDenTypeId("user", session.user.id) + const email = normalizeEmail(session.user.email) if (!email) { res.status(403).json({ error: "admin_email_required" }) @@ -108,7 +116,13 @@ async function requireAdminSession(req: express.Request, res: express.Response) return null } - return session + return { + ...session, + user: { + ...session.user, + id: userId, + }, + } } export const adminRouter = express.Router() @@ -155,7 +169,7 @@ adminRouter.get("/overview", asyncRoute(async (req, res) => { .from(AuthAccountTable), ]) - const workerStatsByUser = new Map { }) } - const sessionStatsByUser = new Map() @@ -187,7 +201,7 @@ adminRouter.get("/overview", asyncRoute(async (req, res) => { }) } - const providersByUser = new Map>() + const providersByUser = new Map>() for (const row of accountRows) { const providerId = normalizeProvider(row.providerId) const existing = providersByUser.get(row.userId) ?? new Set() diff --git a/services/den/src/http/session.ts b/services/den/src/http/session.ts index e70e64fc..587b83bc 100644 --- a/services/den/src/http/session.ts +++ b/services/den/src/http/session.ts @@ -1,9 +1,10 @@ import type express from "express" import { fromNodeHeaders } from "better-auth/node" -import { and, eq, gt } from "drizzle-orm" +import { and, eq, gt } from "../db/drizzle.js" import { auth } from "../auth.js" import { db } from "../db/index.js" import { AuthSessionTable, AuthUserTable } from "../db/schema.js" +import { normalizeDenTypeId } from "../db/typeid.js" type AuthSessionLike = Awaited> @@ -57,7 +58,10 @@ async function getSessionFromBearerToken(token: string): Promise randomBytes(32).toString("hex") type WorkerRow = typeof WorkerTable.$inferSelect type WorkerInstanceRow = typeof WorkerInstanceTable.$inferSelect +type WorkerId = WorkerRow["id"] +type OrgId = typeof OrgMembershipTable.$inferSelect.org_id +type UserId = typeof AuthUserTable.$inferSelect.id + +function parseWorkerIdParam(value: string): WorkerId { + return normalizeDenTypeId("worker", value) +} + +function parseUserId(value: string): UserId { + return normalizeDenTypeId("user", value) +} function isRecord(value: unknown): value is Record { return typeof value === "object" && value !== null @@ -70,14 +85,6 @@ function parseWorkspaceSelection(payload: unknown): { workspaceId: string; openw } } -function parseIssuedToken(payload: unknown): string | null { - if (!isRecord(payload)) { - return null - } - const token = typeof payload.token === "string" ? payload.token.trim() : "" - return token || null -} - async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: string) { const baseUrl = normalizeUrl(instanceUrl) if (!baseUrl || !clientToken.trim()) { @@ -108,7 +115,7 @@ async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: str } } -function getConnectUrlCandidates(workerId: string, instanceUrl: string | null) { +function getConnectUrlCandidates(workerId: WorkerId, instanceUrl: string | null) { const candidates: string[] = [] const vanityHostname = customDomainForWorker(workerId, env.render.workerPublicDomainSuffix) if (vanityHostname) { @@ -138,7 +145,7 @@ function queryIncludesFlag(value: unknown): boolean { return false } -async function resolveConnectUrlFromCandidates(workerId: string, instanceUrl: string | null, clientToken: string) { +async function resolveConnectUrlFromCandidates(workerId: WorkerId, instanceUrl: string | null, clientToken: string) { const candidates = getConnectUrlCandidates(workerId, instanceUrl) for (const candidate of candidates) { const resolved = await resolveConnectUrlFromWorker(candidate, clientToken) @@ -149,7 +156,7 @@ async function resolveConnectUrlFromCandidates(workerId: string, instanceUrl: st return null } -async function getWorkerRuntimeAccess(workerId: string) { +async function getWorkerRuntimeAccess(workerId: WorkerId) { const instance = await getLatestWorkerInstance(workerId) const tokenRows = await db .select() @@ -170,7 +177,7 @@ async function getWorkerRuntimeAccess(workerId: string) { } async function fetchWorkerRuntimeJson(input: { - workerId: string + workerId: WorkerId path: string method?: "GET" | "POST" body?: unknown @@ -221,73 +228,36 @@ async function fetchWorkerRuntimeJson(input: { return { ok: false as const, status: lastStatus, payload: lastPayload } } -async function issueWorkerOwnerToken(workerId: string): Promise { - const result = await fetchWorkerRuntimeJson({ - workerId, - path: "/tokens", - method: "POST", - body: { scope: "owner", label: "Den owner token" }, - }) - - const token = parseIssuedToken(result.payload) - if (result.ok && token) { - return token - } - - const message = - isRecord(result.payload) && typeof result.payload.message === "string" - ? result.payload.message - : `Owner token request failed with ${result.status}.` - throw new Error(message) -} - async function requireSession(req: express.Request, res: express.Response) { - const session = await getRequestSession(req) + const session = await auth.api.getSession({ + headers: fromNodeHeaders(req.headers), + }) if (!session?.user?.id) { res.status(401).json({ error: "unauthorized" }) return null } - return session + return { + ...session, + user: { + ...session.user, + id: parseUserId(session.user.id), + }, + } } -function readRequestedOrgId(req: express.Request): string | null { - const queryValue = typeof req.query.orgId === "string" ? req.query.orgId : "" - if (queryValue.trim()) { - return queryValue.trim() +async function getOrgId(userId: UserId): Promise { + const membership = await db + .select() + .from(OrgMembershipTable) + .where(eq(OrgMembershipTable.user_id, userId)) + .limit(1) + if (membership.length === 0) { + return null } - - if (isRecord(req.body) && typeof req.body.orgId === "string" && req.body.orgId.trim()) { - return req.body.orgId.trim() - } - - return null + return membership[0].org_id } -async function requireOrgContext(req: express.Request, res: express.Response, userId: string) { - const requestedOrgId = readRequestedOrgId(req) - const org = await resolveUserOrg(userId, requestedOrgId) - - if (!org) { - const memberships = await listUserOrgs(userId) - if (memberships.length === 0) { - return null - } - - if (requestedOrgId) { - res.status(403).json({ - error: "org_forbidden", - message: "You do not have access to that org.", - }) - return undefined - } - - return memberships[0] - } - - return org -} - -async function countUserCloudWorkers(userId: string) { +async function countUserCloudWorkers(userId: UserId) { const rows = await db .select({ id: WorkerTable.id }) .from(WorkerTable) @@ -297,7 +267,22 @@ async function countUserCloudWorkers(userId: string) { return rows.length } -async function getLatestWorkerInstance(workerId: string) { +function getExperimentBillingSummary() { + return { + featureGateEnabled: false, + hasActivePlan: false, + checkoutRequired: false, + checkoutUrl: null, + portalUrl: null, + price: null, + subscription: null, + invoices: [], + productId: env.polar.productId, + benefitId: env.polar.benefitId, + } +} + +async function getLatestWorkerInstance(workerId: WorkerId) { for (let attempt = 0; attempt < 2; attempt += 1) { try { const rows = await db @@ -359,7 +344,7 @@ function toWorkerResponse(row: WorkerRow, userId: string) { } } -async function continueCloudProvisioning(input: { workerId: string; name: string; hostToken: string; clientToken: string }) { +async function continueCloudProvisioning(input: { workerId: WorkerId; name: string; hostToken: string; clientToken: string }) { try { const provisioned = await provisionWorker({ workerId: input.workerId, @@ -374,7 +359,7 @@ async function continueCloudProvisioning(input: { workerId: string; name: string .where(eq(WorkerTable.id, input.workerId)) await db.insert(WorkerInstanceTable).values({ - id: randomUUID(), + id: createDenTypeId("workerInstance"), worker_id: input.workerId, provider: provisioned.provider, region: provisioned.region, @@ -398,11 +383,8 @@ workersRouter.get("/", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { - return - } - if (!org) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { res.json({ workers: [] }) return } @@ -416,7 +398,7 @@ workersRouter.get("/", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(eq(WorkerTable.org_id, org.id)) + .where(eq(WorkerTable.org_id, orgId)) .orderBy(desc(WorkerTable.created_at)) .limit(parsed.data.limit) @@ -448,44 +430,38 @@ workersRouter.post("/", asyncRoute(async (req, res) => { return } - if (parsed.data.destination === "cloud" && (await countUserCloudWorkers(session.user.id)) > 0) { - const access = await requireCloudWorkerAccess({ - userId: session.user.id, - email: session.user.email ?? `${session.user.id}@placeholder.local`, - name: session.user.name ?? session.user.email ?? "OpenWork User" + if (parsed.data.destination === "cloud" && !env.devMode && (await countUserCloudWorkers(session.user.id)) > 0) { + // Polar is temporarily disabled for this experiment. + // Keep the previous paywall block nearby so it can be restored quickly. + // + // const access = await requireCloudWorkerAccess({ + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // }) + // if (!access.allowed) { + // res.status(402).json({ + // error: "payment_required", + // message: "Additional cloud workers require an active Den Cloud plan.", + // polar: { + // checkoutUrl: access.checkoutUrl, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) + // return + // } + + res.status(409).json({ + error: "worker_limit_reached", + message: "You can only create one cloud worker during this experiment.", }) - if (!access.allowed) { - res.status(402).json({ - error: "payment_required", - message: "Additional cloud workers require an active Den Cloud plan.", - polar: { - checkoutUrl: access.checkoutUrl, - productId: env.polar.productId, - benefitId: env.polar.benefitId - } - }) - return - } + return } - const requestedOrgId = readRequestedOrgId(req) - let orgId = requestedOrgId - if (requestedOrgId) { - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { - return - } - if (!org) { - res.status(404).json({ error: "org_not_found" }) - return - } - orgId = org.id - } - - if (!orgId) { - orgId = (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal")) - } - const workerId = randomUUID() + const orgId = + (await getOrgId(session.user.id)) ?? (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal")) + const workerId = createDenTypeId("worker") let workerStatus: WorkerRow["status"] = parsed.data.destination === "cloud" ? "provisioning" : "healthy" await db.insert(WorkerTable).values({ @@ -505,13 +481,13 @@ workersRouter.post("/", asyncRoute(async (req, res) => { const clientToken = token() await db.insert(WorkerTokenTable).values([ { - id: randomUUID(), + id: createDenTypeId("workerToken"), worker_id: workerId, scope: "host", token: hostToken, }, { - id: randomUUID(), + id: createDenTypeId("workerToken"), worker_id: workerId, scope: "client", token: clientToken, @@ -558,32 +534,37 @@ workersRouter.get("/billing", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout) - const includePortalUrl = !queryIncludesFlag(req.query.excludePortal) - const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices) - - const billingInput = { - userId: session.user.id, - email: session.user.email ?? `${session.user.id}@placeholder.local`, - name: session.user.name ?? session.user.email ?? "OpenWork User" - } - - const billing = await getCloudWorkerBillingStatus( - billingInput, - { - includeCheckoutUrl, - includePortalUrl, - includeInvoices - } - ) - res.json({ - billing: { - ...billing, - productId: env.polar.productId, - benefitId: env.polar.benefitId - } + billing: getExperimentBillingSummary(), }) + + // Polar billing is temporarily disabled for the one-worker experiment. + // const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout) + // const includePortalUrl = !queryIncludesFlag(req.query.excludePortal) + // const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices) + // + // const billingInput = { + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // } + // + // const billing = await getCloudWorkerBillingStatus( + // billingInput, + // { + // includeCheckoutUrl, + // includePortalUrl, + // includeInvoices, + // }, + // ) + // + // res.json({ + // billing: { + // ...billing, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) })) workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => { @@ -596,38 +577,49 @@ workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => { return } - const billingInput = { - userId: session.user.id, - email: session.user.email ?? `${session.user.id}@placeholder.local`, - name: session.user.name ?? session.user.email ?? "OpenWork User" - } - - const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd) - const billing = await getCloudWorkerBillingStatus(billingInput, { - includeCheckoutUrl: false, - includePortalUrl: true, - includeInvoices: true - }) - res.json({ - subscription, - billing: { - ...billing, - productId: env.polar.productId, - benefitId: env.polar.benefitId - } + subscription: null, + billing: getExperimentBillingSummary(), }) + + // Polar billing is temporarily disabled for the one-worker experiment. + // const billingInput = { + // userId: session.user.id, + // email: session.user.email ?? `${session.user.id}@placeholder.local`, + // name: session.user.name ?? session.user.email ?? "OpenWork User", + // } + // + // const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd) + // const billing = await getCloudWorkerBillingStatus(billingInput, { + // includeCheckoutUrl: false, + // includePortalUrl: true, + // includeInvoices: true, + // }) + // + // res.json({ + // subscription, + // billing: { + // ...billing, + // productId: env.polar.productId, + // benefitId: env.polar.benefitId, + // }, + // }) })) workersRouter.get("/:id", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) return } - if (!org) { + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { res.status(404).json({ error: "worker_not_found" }) return } @@ -635,7 +627,7 @@ workersRouter.get("/:id", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id))) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) .limit(1) if (rows.length === 0) { @@ -655,11 +647,16 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) return } - if (!org) { + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { res.status(404).json({ error: "worker_not_found" }) return } @@ -667,10 +664,10 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(eq(WorkerTable.id, req.params.id)) + .where(eq(WorkerTable.id, workerId)) .limit(1) - if (rows.length === 0 || rows[0].org_id !== org.id) { + if (rows.length === 0 || rows[0].org_id !== orgId) { res.status(404).json({ error: "worker_not_found" }) return } @@ -694,24 +691,11 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => { const instance = await getLatestWorkerInstance(rows[0].id) const connect = await resolveConnectUrlFromCandidates(rows[0].id, instance?.url ?? null, clientToken) - let ownerToken: string - - try { - ownerToken = await issueWorkerOwnerToken(rows[0].id) - } catch (error) { - res.status(502).json({ - error: "worker_owner_token_unavailable", - message: error instanceof Error ? error.message : "Could not mint an owner token for this worker.", - }) - return - } res.json({ tokens: { host: hostToken, client: clientToken, - collaborator: clientToken, - owner: ownerToken, }, connect: connect ?? (instance?.url ? { openworkUrl: instance.url, workspaceId: null } : null), }) @@ -721,11 +705,16 @@ workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) return } - if (!org) { + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { res.status(404).json({ error: "worker_not_found" }) return } @@ -733,7 +722,7 @@ workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id))) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) .limit(1) if (rows.length === 0) { @@ -753,11 +742,16 @@ workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) return } - if (!org) { + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { res.status(404).json({ error: "worker_not_found" }) return } @@ -765,7 +759,7 @@ workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id))) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) .limit(1) if (rows.length === 0) { @@ -787,11 +781,16 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => { const session = await requireSession(req, res) if (!session) return - const org = await requireOrgContext(req, res, session.user.id) - if (org === undefined) { + const orgId = await getOrgId(session.user.id) + if (!orgId) { + res.status(404).json({ error: "worker_not_found" }) return } - if (!org) { + + let workerId: WorkerId + try { + workerId = parseWorkerIdParam(req.params.id) + } catch { res.status(404).json({ error: "worker_not_found" }) return } @@ -799,7 +798,7 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => { const rows = await db .select() .from(WorkerTable) - .where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id))) + .where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId))) .limit(1) if (rows.length === 0) { @@ -824,6 +823,7 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => { await db.transaction(async (tx) => { await tx.delete(WorkerTokenTable).where(eq(WorkerTokenTable.worker_id, worker.id)) + await tx.delete(DaytonaSandboxTable).where(eq(DaytonaSandboxTable.worker_id, worker.id)) await tx.delete(WorkerInstanceTable).where(eq(WorkerInstanceTable.worker_id, worker.id)) await tx.delete(WorkerBundleTable).where(eq(WorkerBundleTable.worker_id, worker.id)) await tx.delete(AuditEventTable).where(eq(AuditEventTable.worker_id, worker.id)) diff --git a/services/den/src/index.ts b/services/den/src/index.ts index 11ed6761..d6298885 100644 --- a/services/den/src/index.ts +++ b/services/den/src/index.ts @@ -1,4 +1,4 @@ -import "dotenv/config" +import "./load-env.js" import cors from "cors" import express from "express" import path from "node:path" @@ -11,6 +11,7 @@ import { desktopAuthRouter } from "./http/desktop-auth.js" import { asyncRoute, errorMiddleware } from "./http/errors.js" import { getRequestSession } from "./http/session.js" import { workersRouter } from "./http/workers.js" +import { normalizeDenTypeId } from "./db/typeid.js" import { listUserOrgs } from "./orgs.js" const app = express() @@ -51,7 +52,7 @@ app.get("/v1/me/orgs", asyncRoute(async (req, res) => { return } - const orgs = await listUserOrgs(session.user.id) + const orgs = await listUserOrgs(normalizeDenTypeId("user", session.user.id)) res.json({ orgs, defaultOrgId: orgs[0]?.id ?? null, diff --git a/services/den/src/load-env.ts b/services/den/src/load-env.ts new file mode 100644 index 00000000..ef46b39f --- /dev/null +++ b/services/den/src/load-env.ts @@ -0,0 +1,45 @@ +import { existsSync } from "node:fs" +import path from "node:path" +import { fileURLToPath } from "node:url" +import dotenv from "dotenv" + +function findUpwards(startDir: string, fileName: string, maxDepth = 8) { + let current = startDir + + for (let depth = 0; depth <= maxDepth; depth += 1) { + const candidate = path.join(current, fileName) + if (existsSync(candidate)) { + return candidate + } + + const parent = path.dirname(current) + if (parent === current) { + break + } + current = parent + } + + return null +} + +const srcDir = path.dirname(fileURLToPath(import.meta.url)) +const serviceDir = path.resolve(srcDir, "..") + +for (const filePath of [ + path.join(serviceDir, ".env.local"), + path.join(serviceDir, ".env"), +]) { + if (existsSync(filePath)) { + dotenv.config({ path: filePath, override: false }) + } +} + +const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() +const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona") +const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath + +if (daytonaEnvPath && existsSync(daytonaEnvPath)) { + dotenv.config({ path: daytonaEnvPath, override: false }) +} + +dotenv.config({ override: false }) diff --git a/services/den/src/orgs.ts b/services/den/src/orgs.ts index 5940eb42..845884d6 100644 --- a/services/den/src/orgs.ts +++ b/services/den/src/orgs.ts @@ -1,16 +1,12 @@ -import { randomUUID } from "crypto" -import { asc, eq } from "drizzle-orm" +import { eq } from "./db/drizzle.js" import { db } from "./db/index.js" -import { OrgMembershipTable, OrgTable } from "./db/schema.js" +import { AuthUserTable, OrgMembershipTable, OrgTable } from "./db/schema.js" +import { createDenTypeId } from "./db/typeid.js" -export type OrgSummary = { - id: string - name: string - slug: string - role: "owner" | "member" -} +type UserId = typeof AuthUserTable.$inferSelect.id +type OrgId = typeof OrgTable.$inferSelect.id -export async function ensureDefaultOrg(userId: string, name: string) { +export async function ensureDefaultOrg(userId: UserId, name: string): Promise { const existing = await db .select() .from(OrgMembershipTable) @@ -21,7 +17,7 @@ export async function ensureDefaultOrg(userId: string, name: string) { return existing[0].org_id } - const orgId = randomUUID() + const orgId = createDenTypeId("org") const slug = `personal-${orgId.slice(0, 8)}` await db.insert(OrgTable).values({ id: orgId, @@ -30,7 +26,7 @@ export async function ensureDefaultOrg(userId: string, name: string) { owner_user_id: userId, }) await db.insert(OrgMembershipTable).values({ - id: randomUUID(), + id: createDenTypeId("orgMembership"), org_id: orgId, user_id: userId, role: "owner", @@ -38,38 +34,32 @@ export async function ensureDefaultOrg(userId: string, name: string) { return orgId } -export async function listUserOrgs(userId: string): Promise { - const rows = await db +export async function listUserOrgs(userId: UserId) { + const memberships = await db .select({ - id: OrgTable.id, - name: OrgTable.name, - slug: OrgTable.slug, + membershipId: OrgMembershipTable.id, role: OrgMembershipTable.role, - createdAt: OrgTable.created_at, + org: { + id: OrgTable.id, + name: OrgTable.name, + slug: OrgTable.slug, + ownerUserId: OrgTable.owner_user_id, + createdAt: OrgTable.created_at, + updatedAt: OrgTable.updated_at, + }, }) .from(OrgMembershipTable) .innerJoin(OrgTable, eq(OrgMembershipTable.org_id, OrgTable.id)) .where(eq(OrgMembershipTable.user_id, userId)) - .orderBy(asc(OrgTable.created_at)) - return rows.map((row) => ({ - id: row.id, - name: row.name, - slug: row.slug, + return memberships.map((row) => ({ + id: row.org.id, + name: row.org.name, + slug: row.org.slug, + ownerUserId: row.org.ownerUserId, role: row.role, + membershipId: row.membershipId, + createdAt: row.org.createdAt, + updatedAt: row.org.updatedAt, })) } - -export async function resolveUserOrg(userId: string, requestedOrgId?: string | null): Promise { - const orgs = await listUserOrgs(userId) - if (orgs.length === 0) { - return null - } - - const requested = requestedOrgId?.trim() ?? "" - if (!requested) { - return orgs[0] - } - - return orgs.find((org) => org.id === requested) ?? null -} diff --git a/services/den/src/workers/daytona.ts b/services/den/src/workers/daytona.ts new file mode 100644 index 00000000..f97db0ab --- /dev/null +++ b/services/den/src/workers/daytona.ts @@ -0,0 +1,484 @@ +import { Daytona, type Sandbox } from "@daytonaio/sdk" +import { eq } from "../db/drizzle.js" +import { db } from "../db/index.js" +import { DaytonaSandboxTable } from "../db/schema.js" +import { createDenTypeId } from "../db/typeid.js" +import { env } from "../env.js" + +type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id + +type ProvisionInput = { + workerId: WorkerId + name: string + hostToken: string + clientToken: string +} + +type ProvisionedInstance = { + provider: string + url: string + status: "provisioning" | "healthy" + region?: string +} + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) +const maxSignedPreviewExpirySeconds = 60 * 60 * 24 +const signedPreviewRefreshLeadMs = 5 * 60 * 1000 + +const slug = (value: string) => + value + .toLowerCase() + .replace(/[^a-z0-9-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-|-$/g, "") + +function shellQuote(value: string) { + return `'${value.replace(/'/g, `'"'"'`)}'` +} + +function createDaytonaClient() { + return new Daytona({ + apiKey: env.daytona.apiKey, + apiUrl: env.daytona.apiUrl, + ...(env.daytona.target ? { target: env.daytona.target } : {}), + }) +} + +function normalizedSignedPreviewExpirySeconds() { + return Math.max( + 1, + Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds), + ) +} + +function signedPreviewRefreshAt(expiresInSeconds: number) { + return new Date( + Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs), + ) +} + +function workerProxyUrl(workerId: WorkerId) { + return `${env.daytona.workerProxyBaseUrl.replace(/\/+$/, "")}/${encodeURIComponent(workerId)}` +} + +function assertDaytonaConfig() { + if (!env.daytona.apiKey) { + throw new Error("DAYTONA_API_KEY is required for daytona provisioner") + } +} + +function workerHint(workerId: WorkerId) { + return workerId.replace(/-/g, "").slice(0, 12) +} + +function sandboxLabels(workerId: WorkerId) { + return { + "openwork.den.provider": "daytona", + "openwork.den.worker-id": workerId, + } +} + +function sandboxName(input: ProvisionInput) { + return slug( + `${env.daytona.sandboxNamePrefix}-${input.name}-${workerHint(input.workerId)}`, + ).slice(0, 63) +} + +function workspaceVolumeName(workerId: WorkerId) { + return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-workspace`).slice(0, 63) +} + +function dataVolumeName(workerId: WorkerId) { + return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-data`).slice(0, 63) +} + +function buildOpenWorkStartCommand(input: ProvisionInput) { + const orchestratorPackage = env.daytona.openworkVersion?.trim() + ? `openwork-orchestrator@${env.daytona.openworkVersion.trim()}` + : "openwork-orchestrator" + const installStep = [ + `if ! command -v openwork >/dev/null 2>&1; then npm install -g ${shellQuote(orchestratorPackage)}; fi`, + "if ! command -v opencode >/dev/null 2>&1; then echo 'opencode binary missing from Daytona runtime; bake it into the snapshot image and expose it on PATH' >&2; exit 1; fi", + ].join("; ") + const openworkServe = [ + "OPENWORK_DATA_DIR=", + shellQuote(env.daytona.runtimeDataPath), + " OPENWORK_SIDECAR_DIR=", + shellQuote(env.daytona.sidecarDir), + " OPENWORK_TOKEN=", + shellQuote(input.clientToken), + " OPENWORK_HOST_TOKEN=", + shellQuote(input.hostToken), + " openwork serve", + ` --workspace ${shellQuote(env.daytona.runtimeWorkspacePath)}`, + ` --openwork-host 0.0.0.0`, + ` --openwork-port ${env.daytona.openworkPort}`, + ` --opencode-host 127.0.0.1`, + ` --opencode-port ${env.daytona.opencodePort}`, + ` --connect-host 127.0.0.1`, + ` --cors '*'`, + ` --approval manual`, + ` --allow-external`, + ` --opencode-source external`, + ` --opencode-bin $(command -v opencode)`, + ` --no-opencode-router`, + ` --verbose`, + ].join("") + + const script = ` +set -u +mkdir -p ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(env.daytona.runtimeWorkspacePath)} ${shellQuote(env.daytona.runtimeDataPath)} ${shellQuote(env.daytona.sidecarDir)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes`)} +ln -sfn ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/workspace`) } +ln -sfn ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/data`) } +${installStep} +attempt=0 +while [ "$attempt" -lt 3 ]; do + attempt=$((attempt + 1)) + if ${openworkServe}; then + exit 0 + fi + status=$? + echo "openwork serve failed (attempt $attempt, exit $status); retrying in 3s" + sleep 3 +done +exit 1 +`.trim() + + return `sh -lc ${shellQuote(script)}` +} + +async function waitForVolumeReady(daytona: Daytona, name: string, timeoutMs: number) { + const startedAt = Date.now() + + while (Date.now() - startedAt < timeoutMs) { + const volume = await daytona.volume.get(name) + if (volume.state === "ready") { + return volume + } + await sleep(env.daytona.pollIntervalMs) + } + + throw new Error(`Timed out waiting for Daytona volume ${name} to become ready`) +} + +async function waitForHealth(url: string, timeoutMs: number, sandbox: Sandbox, sessionId: string, commandId: string) { + const startedAt = Date.now() + + while (Date.now() - startedAt < timeoutMs) { + try { + const response = await fetch(`${url.replace(/\/$/, "")}/health`, { method: "GET" }) + if (response.ok) { + return + } + } catch { + // ignore transient startup failures + } + + try { + const command = await sandbox.process.getSessionCommand(sessionId, commandId) + if (typeof command.exitCode === "number" && command.exitCode !== 0) { + const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId) + throw new Error( + [ + `openwork session exited with ${command.exitCode}`, + logs.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "", + logs.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "", + ] + .filter(Boolean) + .join("\n\n"), + ) + } + } catch (error) { + if (error instanceof Error && error.message.startsWith("openwork session exited")) { + throw error + } + } + + await sleep(env.daytona.pollIntervalMs) + } + + const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId).catch( + () => null, + ) + throw new Error( + [ + `Timed out waiting for Daytona worker health at ${url.replace(/\/$/, "")}/health`, + logs?.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "", + logs?.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "", + ] + .filter(Boolean) + .join("\n\n"), + ) +} + +async function upsertDaytonaSandbox(input: { + workerId: WorkerId + sandboxId: string + workspaceVolumeId: string + dataVolumeId: string + signedPreviewUrl: string + signedPreviewUrlExpiresAt: Date + region: string | null +}) { + const existing = await db + .select({ id: DaytonaSandboxTable.id }) + .from(DaytonaSandboxTable) + .where(eq(DaytonaSandboxTable.worker_id, input.workerId)) + .limit(1) + + if (existing.length > 0) { + await db + .update(DaytonaSandboxTable) + .set({ + sandbox_id: input.sandboxId, + workspace_volume_id: input.workspaceVolumeId, + data_volume_id: input.dataVolumeId, + signed_preview_url: input.signedPreviewUrl, + signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt, + region: input.region, + }) + .where(eq(DaytonaSandboxTable.worker_id, input.workerId)) + return + } + + await db.insert(DaytonaSandboxTable).values({ + id: createDenTypeId("daytonaSandbox"), + worker_id: input.workerId, + sandbox_id: input.sandboxId, + workspace_volume_id: input.workspaceVolumeId, + data_volume_id: input.dataVolumeId, + signed_preview_url: input.signedPreviewUrl, + signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt, + region: input.region, + }) +} + +export async function getDaytonaSandboxRecord(workerId: WorkerId) { + const rows = await db + .select() + .from(DaytonaSandboxTable) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + .limit(1) + + return rows[0] ?? null +} + +export async function refreshDaytonaSignedPreview(workerId: WorkerId) { + assertDaytonaConfig() + + const record = await getDaytonaSandboxRecord(workerId) + if (!record) { + return null + } + + const daytona = createDaytonaClient() + const sandbox = await daytona.get(record.sandbox_id) + await sandbox.refreshData() + + const expiresInSeconds = normalizedSignedPreviewExpirySeconds() + const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds) + const expiresAt = signedPreviewRefreshAt(expiresInSeconds) + + await db + .update(DaytonaSandboxTable) + .set({ + signed_preview_url: preview.url, + signed_preview_url_expires_at: expiresAt, + region: sandbox.target, + }) + .where(eq(DaytonaSandboxTable.worker_id, workerId)) + + return { + ...record, + signed_preview_url: preview.url, + signed_preview_url_expires_at: expiresAt, + region: sandbox.target, + } +} + +export async function getDaytonaSignedPreviewForProxy(workerId: WorkerId) { + const record = await getDaytonaSandboxRecord(workerId) + if (!record) { + return null + } + + if (record.signed_preview_url_expires_at.getTime() > Date.now()) { + return record.signed_preview_url + } + + const refreshed = await refreshDaytonaSignedPreview(workerId) + return refreshed?.signed_preview_url ?? null +} + +export async function provisionWorkerOnDaytona( + input: ProvisionInput, +): Promise { + assertDaytonaConfig() + + const daytona = createDaytonaClient() + const labels = sandboxLabels(input.workerId) + const workspaceVolumeNameValue = workspaceVolumeName(input.workerId) + const dataVolumeNameValue = dataVolumeName(input.workerId) + await daytona.volume.get(workspaceVolumeNameValue, true) + await daytona.volume.get(dataVolumeNameValue, true) + const workspaceVolume = await waitForVolumeReady( + daytona, + workspaceVolumeNameValue, + env.daytona.createTimeoutSeconds * 1000, + ) + const dataVolume = await waitForVolumeReady( + daytona, + dataVolumeNameValue, + env.daytona.createTimeoutSeconds * 1000, + ) + let sandbox: Awaited> | null = null + + try { + sandbox = env.daytona.snapshot + ? await daytona.create( + { + name: sandboxName(input), + snapshot: env.daytona.snapshot, + autoStopInterval: env.daytona.autoStopInterval, + autoArchiveInterval: env.daytona.autoArchiveInterval, + autoDeleteInterval: env.daytona.autoDeleteInterval, + public: env.daytona.public, + labels, + envVars: { + DEN_WORKER_ID: input.workerId, + }, + volumes: [ + { + volumeId: workspaceVolume.id, + mountPath: env.daytona.workspaceMountPath, + }, + { + volumeId: dataVolume.id, + mountPath: env.daytona.dataMountPath, + }, + ], + }, + { timeout: env.daytona.createTimeoutSeconds }, + ) + : await daytona.create( + { + name: sandboxName(input), + image: env.daytona.image, + autoStopInterval: env.daytona.autoStopInterval, + autoArchiveInterval: env.daytona.autoArchiveInterval, + autoDeleteInterval: env.daytona.autoDeleteInterval, + public: env.daytona.public, + labels, + envVars: { + DEN_WORKER_ID: input.workerId, + }, + resources: { + cpu: env.daytona.resources.cpu, + memory: env.daytona.resources.memory, + disk: env.daytona.resources.disk, + }, + volumes: [ + { + volumeId: workspaceVolume.id, + mountPath: env.daytona.workspaceMountPath, + }, + { + volumeId: dataVolume.id, + mountPath: env.daytona.dataMountPath, + }, + ], + }, + { timeout: env.daytona.createTimeoutSeconds }, + ) + + const sessionId = `openwork-${workerHint(input.workerId)}` + await sandbox.process.createSession(sessionId) + const command = await sandbox.process.executeSessionCommand( + sessionId, + { + command: buildOpenWorkStartCommand(input), + runAsync: true, + }, + 0, + ) + + const expiresInSeconds = normalizedSignedPreviewExpirySeconds() + const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds) + await waitForHealth(preview.url, env.daytona.healthcheckTimeoutMs, sandbox, sessionId, command.cmdId) + await upsertDaytonaSandbox({ + workerId: input.workerId, + sandboxId: sandbox.id, + workspaceVolumeId: workspaceVolume.id, + dataVolumeId: dataVolume.id, + signedPreviewUrl: preview.url, + signedPreviewUrlExpiresAt: signedPreviewRefreshAt(expiresInSeconds), + region: sandbox.target ?? null, + }) + + return { + provider: "daytona", + url: workerProxyUrl(input.workerId), + status: "healthy", + region: sandbox.target, + } + } catch (error) { + if (sandbox) { + await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch(() => {}) + } + await daytona.volume.delete(workspaceVolume).catch(() => {}) + await daytona.volume.delete(dataVolume).catch(() => {}) + throw error + } +} + +export async function deprovisionWorkerOnDaytona(workerId: WorkerId) { + assertDaytonaConfig() + + const daytona = createDaytonaClient() + const record = await getDaytonaSandboxRecord(workerId) + + if (record) { + try { + const sandbox = await daytona.get(record.sandbox_id) + await sandbox.delete(env.daytona.deleteTimeoutSeconds) + } catch (error) { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona sandbox ${record.sandbox_id}: ${message}`) + } + + const volumes = await daytona.volume.list().catch(() => []) + for (const volumeId of [record.workspace_volume_id, record.data_volume_id]) { + const volume = volumes.find((entry) => entry.id === volumeId) + if (!volume) { + continue + } + await daytona.volume.delete(volume).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona volume ${volumeId}: ${message}`) + }) + } + + return + } + + const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20) + + for (const sandbox of sandboxes.items) { + await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona sandbox ${sandbox.id}: ${message}`) + }) + } + + const volumes = await daytona.volume.list() + for (const name of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) { + const volume = volumes.find((entry) => entry.name === name) + if (!volume) { + continue + } + await daytona.volume.delete(volume).catch((error) => { + const message = error instanceof Error ? error.message : "unknown_error" + console.warn(`[provisioner] failed to delete Daytona volume ${name}: ${message}`) + }) + } +} diff --git a/services/den/src/workers/provisioner.ts b/services/den/src/workers/provisioner.ts index 26fe1f3a..166839db 100644 --- a/services/den/src/workers/provisioner.ts +++ b/services/den/src/workers/provisioner.ts @@ -1,11 +1,18 @@ import { env } from "../env.js"; +import { WorkerTable } from "../db/schema.js"; +import { + deprovisionWorkerOnDaytona, + provisionWorkerOnDaytona, +} from "./daytona.js"; import { customDomainForWorker, ensureVercelDnsRecord, } from "./vanity-domain.js"; +type WorkerId = typeof WorkerTable.$inferSelect.id; + export type ProvisionInput = { - workerId: string; + workerId: WorkerId; name: string; hostToken: string; clientToken: string; @@ -331,6 +338,10 @@ export async function provisionWorker( return provisionWorkerOnRender(input); } + if (env.provisionerMode === "daytona") { + return provisionWorkerOnDaytona(input); + } + const template = env.workerUrlTemplate ?? "https://workers.local/{workerId}"; const url = template.replace("{workerId}", input.workerId); return { @@ -341,9 +352,14 @@ export async function provisionWorker( } export async function deprovisionWorker(input: { - workerId: string; + workerId: WorkerId; instanceUrl: string | null; }) { + if (env.provisionerMode === "daytona") { + await deprovisionWorkerOnDaytona(input.workerId); + return; + } + if (env.provisionerMode !== "render") { return; }