feat(den): add daytona-backed docker dev flow (#918)

* feat(den): add daytona-backed docker dev flow

* fix(den): allow multiple cloud workers in dev

* fix(den): use Daytona snapshots for sandbox runtime

Use a prebuilt Daytona snapshot for the dev worker runtime so sandboxes start with openwork and opencode already installed. Pass the snapshot through the local Docker flow and add a helper to build the snapshot image for repeatable setup.

* chore(den): lower Daytona snapshot defaults

Reduce the default snapshot footprint to 1 CPU and 2GB RAM so local Daytona worker testing fits smaller org limits more easily.

* Omar is comfortable

Make Daytona-backed cloud workers stable enough to reconnect through a dedicated proxy instead of persisting expiring signed preview URLs. Split the proxy into its own deployable service, share Den schema access through a common package, and fix the web badge so healthy workers show ready.

* chore(den-db): add Drizzle package scripts

Move the shared schema package toward owning its own migration workflow by adding generate and migrate commands plus a local Drizzle config.

* chore: update lockfile

Refresh the workspace lockfile so the new den-db Drizzle tooling is captured in pnpm-lock.yaml.

* feat(den-worker-proxy): make Vercel deployment-ready

Align the proxy service with Vercel's Hono runtime entry pattern and keep a separate Node server entry for Docker/local runs. Also scaffold the Vercel project/env setup and wire Render deploy sync to pass Daytona variables needed for daytona mode.

* feat(den-db): add db mode switch for PlanetScale

Support DB_MODE=planetscale with Drizzle's PlanetScale serverless driver while keeping mysql2 as the local default. This lets Vercel-hosted services use HTTP database access without changing local development workflows.

* refactor(den-db): adopt shared TypeID ids

Move the Den TypeID system into a shared utils package and use it across auth, org, worker, and sandbox records so fresh databases get one consistent internal ID format. Wire Better Auth into the same generator and update Den request boundaries to normalize typed ids cleanly.

* fix(den): restore docker dev stack after refactor

Include the shared utils package in the Den Docker images, expose MySQL to the host for local inspection, and fix the remaining Den build/runtime issues surfaced by the Docker path after the shared package and TypeID changes.

* docs(den): document Daytona snapshot setup

Add README guidance for building and publishing the prebuilt Daytona runtime snapshot, including the helper script, required env, and how to point Den at the snapshot for local Daytona mode.

* refactor(den-db): reset migrations and load env files

Replace the old Den SQL migration history with a fresh baseline for the current schema, and let Drizzle commands load database credentials from env files. Default to mysql when DATABASE_URL is present and otherwise use PlanetScale credentials so local Docker and hosted environments can share the same DB package cleanly.

* fix(den): prepare manual PlanetScale deploys

Update the Render workflow and Docker build path for the shared workspace packages, support PlanetScale credentials in the manual SQL migration runner, and stop auto-running DB migrations on Den startup so schema changes stay manual.

* feat(den-v2): add Daytona-first control plane

Create a new den-v2 service from the current Daytona-enabled control plane, default it to Daytona provisioning, and add a dedicated Render deployment workflow targeting the new v2 Render service.

* feat(den-worker-proxy): redirect root to landing

Send root proxy traffic to openworklabs.com so direct visits to the worker proxy domain do not hit worker-resolution errors.

---------

Co-authored-by: OmarMcAdam <gh@mcadam.io>
This commit is contained in:
ben
2026-03-16 21:20:26 -07:00
committed by GitHub
parent 65a5046e71
commit 0e88389849
93 changed files with 12478 additions and 931 deletions

10
.dockerignore Normal file
View File

@@ -0,0 +1,10 @@
.git
.github
.opencode
node_modules
**/node_modules
tmp
dist
**/dist
.env
.env.*

394
.github/workflows/deploy-den-v2.yml vendored Normal file
View File

@@ -0,0 +1,394 @@
name: Deploy Den v2
on:
push:
branches:
- dev
paths:
- "services/den-v2/**"
- "packages/den-db/**"
- "packages/utils/**"
- ".github/workflows/deploy-den-v2.yml"
workflow_dispatch:
inputs:
render_service_id:
description: "Optional Render service id override for test/staging deploys"
required: false
type: string
permissions:
contents: read
concurrency:
group: deploy-den-v2-${{ github.ref }}
cancel-in-progress: true
jobs:
deploy:
runs-on: ubuntu-latest
if: github.repository == 'different-ai/openwork'
steps:
- name: Validate required secrets
env:
RENDER_API_KEY: ${{ secrets.RENDER_API_KEY }}
RENDER_DEN_CONTROL_PLANE_SERVICE_ID: ${{ inputs.render_service_id || 'srv-d6sajsua2pns7383mis0' }}
RENDER_OWNER_ID: ${{ secrets.RENDER_OWNER_ID }}
DEN_DATABASE_URL: ${{ secrets.DEN_DATABASE_URL || secrets.DATABASE_URL }}
DEN_DATABASE_HOST: ${{ secrets.DEN_DATABASE_HOST || secrets.DATABASE_HOST }}
DEN_DATABASE_USERNAME: ${{ secrets.DEN_DATABASE_USERNAME || secrets.DATABASE_USERNAME }}
DEN_DATABASE_PASSWORD: ${{ secrets.DEN_DATABASE_PASSWORD || secrets.DATABASE_PASSWORD }}
DEN_BETTER_AUTH_SECRET: ${{ secrets.DEN_BETTER_AUTH_SECRET }}
DEN_GITHUB_CLIENT_ID: ${{ secrets.DEN_GITHUB_CLIENT_ID }}
DEN_GITHUB_CLIENT_SECRET: ${{ secrets.DEN_GITHUB_CLIENT_SECRET }}
DEN_GOOGLE_CLIENT_ID: ${{ secrets.DEN_GOOGLE_CLIENT_ID }}
DEN_GOOGLE_CLIENT_SECRET: ${{ secrets.DEN_GOOGLE_CLIENT_SECRET }}
DAYTONA_API_KEY: ${{ secrets.DAYTONA_API_KEY }}
POLAR_ACCESS_TOKEN: ${{ secrets.POLAR_ACCESS_TOKEN }}
POLAR_PRODUCT_ID: ${{ secrets.POLAR_PRODUCT_ID }}
POLAR_BENEFIT_ID: ${{ secrets.POLAR_BENEFIT_ID }}
VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
DEN_PROVISIONER_MODE: ${{ vars.DEN_PROVISIONER_MODE }}
DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: ${{ vars.DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX }}
DEN_DAYTONA_WORKER_PROXY_BASE_URL: ${{ vars.DEN_DAYTONA_WORKER_PROXY_BASE_URL }}
DEN_POLAR_FEATURE_GATE_ENABLED: ${{ vars.DEN_POLAR_FEATURE_GATE_ENABLED }}
run: |
missing=0
for key in RENDER_API_KEY RENDER_DEN_CONTROL_PLANE_SERVICE_ID RENDER_OWNER_ID DEN_BETTER_AUTH_SECRET; do
if [ -z "${!key}" ]; then
echo "::error::Missing required secret: $key"
missing=1
fi
done
if [ -z "$DEN_DATABASE_URL" ]; then
for key in DEN_DATABASE_HOST DEN_DATABASE_USERNAME DEN_DATABASE_PASSWORD; do
if [ -z "${!key}" ]; then
echo "::error::Missing required database secret: $key (required when DEN_DATABASE_URL is not set)"
missing=1
fi
done
fi
vanity_suffix="${DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX:-openwork.studio}"
if [ -n "$vanity_suffix" ] && [ -z "$VERCEL_TOKEN" ]; then
echo "::error::Missing required secret: VERCEL_TOKEN (required when vanity domains are enabled)"
missing=1
fi
feature_enabled="${DEN_POLAR_FEATURE_GATE_ENABLED:-false}"
feature_enabled="$(echo "$feature_enabled" | tr '[:upper:]' '[:lower:]')"
provisioner_mode="${DEN_PROVISIONER_MODE:-daytona}"
provisioner_mode="$(echo "$provisioner_mode" | tr '[:upper:]' '[:lower:]')"
if [ "$provisioner_mode" = "daytona" ]; then
if [ -z "$DAYTONA_API_KEY" ]; then
echo "::error::Missing required secret: DAYTONA_API_KEY (required when DEN_PROVISIONER_MODE=daytona)"
missing=1
fi
if [ -z "$DEN_DAYTONA_WORKER_PROXY_BASE_URL" ]; then
echo "::error::Missing required variable: DEN_DAYTONA_WORKER_PROXY_BASE_URL (required when DEN_PROVISIONER_MODE=daytona)"
missing=1
fi
fi
if [ "$feature_enabled" = "true" ]; then
for key in POLAR_ACCESS_TOKEN POLAR_PRODUCT_ID POLAR_BENEFIT_ID; do
if [ -z "${!key}" ]; then
echo "::error::Missing required paywall secret: $key"
missing=1
fi
done
fi
if [ -n "$DEN_GITHUB_CLIENT_ID" ] && [ -z "$DEN_GITHUB_CLIENT_SECRET" ]; then
echo "::error::Missing required secret: DEN_GITHUB_CLIENT_SECRET (required when DEN_GITHUB_CLIENT_ID is set)"
missing=1
fi
if [ -n "$DEN_GITHUB_CLIENT_SECRET" ] && [ -z "$DEN_GITHUB_CLIENT_ID" ]; then
echo "::error::Missing required secret: DEN_GITHUB_CLIENT_ID (required when DEN_GITHUB_CLIENT_SECRET is set)"
missing=1
fi
if [ -n "$DEN_GOOGLE_CLIENT_ID" ] && [ -z "$DEN_GOOGLE_CLIENT_SECRET" ]; then
echo "::error::Missing required secret: DEN_GOOGLE_CLIENT_SECRET (required when DEN_GOOGLE_CLIENT_ID is set)"
missing=1
fi
if [ -n "$DEN_GOOGLE_CLIENT_SECRET" ] && [ -z "$DEN_GOOGLE_CLIENT_ID" ]; then
echo "::error::Missing required secret: DEN_GOOGLE_CLIENT_ID (required when DEN_GOOGLE_CLIENT_SECRET is set)"
missing=1
fi
if [ "$missing" -ne 0 ]; then
exit 1
fi
- name: Sync Render env vars and deploy latest commit
env:
RENDER_API_KEY: ${{ secrets.RENDER_API_KEY }}
RENDER_DEN_CONTROL_PLANE_SERVICE_ID: ${{ inputs.render_service_id || 'srv-d6sajsua2pns7383mis0' }}
RENDER_OWNER_ID: ${{ secrets.RENDER_OWNER_ID }}
DEN_DATABASE_URL: ${{ secrets.DEN_DATABASE_URL || secrets.DATABASE_URL }}
DEN_DATABASE_HOST: ${{ secrets.DEN_DATABASE_HOST || secrets.DATABASE_HOST }}
DEN_DATABASE_USERNAME: ${{ secrets.DEN_DATABASE_USERNAME || secrets.DATABASE_USERNAME }}
DEN_DATABASE_PASSWORD: ${{ secrets.DEN_DATABASE_PASSWORD || secrets.DATABASE_PASSWORD }}
DEN_BETTER_AUTH_SECRET: ${{ secrets.DEN_BETTER_AUTH_SECRET }}
DEN_GITHUB_CLIENT_ID: ${{ secrets.DEN_GITHUB_CLIENT_ID }}
DEN_GITHUB_CLIENT_SECRET: ${{ secrets.DEN_GITHUB_CLIENT_SECRET }}
DEN_GOOGLE_CLIENT_ID: ${{ secrets.DEN_GOOGLE_CLIENT_ID }}
DEN_GOOGLE_CLIENT_SECRET: ${{ secrets.DEN_GOOGLE_CLIENT_SECRET }}
DAYTONA_API_KEY: ${{ secrets.DAYTONA_API_KEY }}
DEN_BETTER_AUTH_URL: ${{ vars.DEN_BETTER_AUTH_URL }}
DEN_PROVISIONER_MODE: ${{ vars.DEN_PROVISIONER_MODE }}
DEN_RENDER_WORKER_PLAN: ${{ vars.DEN_RENDER_WORKER_PLAN }}
DEN_RENDER_WORKER_OPENWORK_VERSION: ${{ vars.DEN_RENDER_WORKER_OPENWORK_VERSION }}
DEN_CORS_ORIGINS: ${{ vars.DEN_CORS_ORIGINS }}
DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: ${{ vars.DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX }}
DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS: ${{ vars.DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS }}
DEN_VERCEL_API_BASE: ${{ vars.DEN_VERCEL_API_BASE }}
DEN_VERCEL_TEAM_ID: ${{ vars.DEN_VERCEL_TEAM_ID }}
DEN_VERCEL_TEAM_SLUG: ${{ vars.DEN_VERCEL_TEAM_SLUG }}
DEN_VERCEL_DNS_DOMAIN: ${{ vars.DEN_VERCEL_DNS_DOMAIN }}
DEN_DAYTONA_API_URL: ${{ vars.DEN_DAYTONA_API_URL }}
DEN_DAYTONA_TARGET: ${{ vars.DEN_DAYTONA_TARGET }}
DEN_DAYTONA_SNAPSHOT: ${{ vars.DEN_DAYTONA_SNAPSHOT }}
DEN_DAYTONA_WORKER_PROXY_BASE_URL: ${{ vars.DEN_DAYTONA_WORKER_PROXY_BASE_URL }}
DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: ${{ vars.DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS }}
DEN_DAYTONA_OPENWORK_VERSION: ${{ vars.DEN_DAYTONA_OPENWORK_VERSION }}
DEN_POLAR_FEATURE_GATE_ENABLED: ${{ vars.DEN_POLAR_FEATURE_GATE_ENABLED }}
DEN_POLAR_API_BASE: ${{ vars.DEN_POLAR_API_BASE }}
DEN_POLAR_SUCCESS_URL: ${{ vars.DEN_POLAR_SUCCESS_URL }}
DEN_POLAR_RETURN_URL: ${{ vars.DEN_POLAR_RETURN_URL }}
VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
POLAR_ACCESS_TOKEN: ${{ secrets.POLAR_ACCESS_TOKEN }}
POLAR_PRODUCT_ID: ${{ secrets.POLAR_PRODUCT_ID }}
POLAR_BENEFIT_ID: ${{ secrets.POLAR_BENEFIT_ID }}
run: |
python3 <<'PY'
import json
import os
import time
import urllib.error
import urllib.parse
import urllib.request
api_key = os.environ["RENDER_API_KEY"]
service_id = os.environ["RENDER_DEN_CONTROL_PLANE_SERVICE_ID"]
owner_id = os.environ["RENDER_OWNER_ID"]
openwork_version = os.environ.get("DEN_RENDER_WORKER_OPENWORK_VERSION")
worker_plan = os.environ.get("DEN_RENDER_WORKER_PLAN") or "standard"
provisioner_mode = (os.environ.get("DEN_PROVISIONER_MODE") or "daytona").strip().lower() or "daytona"
configured_cors_origins = os.environ.get("DEN_CORS_ORIGINS") or ""
worker_public_domain_suffix = os.environ.get("DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX") or "openwork.studio"
custom_domain_ready_timeout_ms = os.environ.get("DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS") or "240000"
vercel_api_base = os.environ.get("DEN_VERCEL_API_BASE") or "https://api.vercel.com"
vercel_team_id = os.environ.get("DEN_VERCEL_TEAM_ID") or ""
vercel_team_slug = os.environ.get("DEN_VERCEL_TEAM_SLUG") or "prologe"
vercel_dns_domain = os.environ.get("DEN_VERCEL_DNS_DOMAIN") or worker_public_domain_suffix
vercel_token = os.environ.get("VERCEL_TOKEN") or ""
daytona_api_url = os.environ.get("DEN_DAYTONA_API_URL") or "https://app.daytona.io/api"
daytona_api_key = os.environ.get("DAYTONA_API_KEY") or ""
daytona_target = os.environ.get("DEN_DAYTONA_TARGET") or ""
daytona_snapshot = os.environ.get("DEN_DAYTONA_SNAPSHOT") or ""
daytona_worker_proxy_base_url = os.environ.get("DEN_DAYTONA_WORKER_PROXY_BASE_URL") or ""
daytona_signed_preview_expires_seconds = os.environ.get("DEN_DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS") or "86400"
daytona_openwork_version = os.environ.get("DEN_DAYTONA_OPENWORK_VERSION") or ""
paywall_enabled = (os.environ.get("DEN_POLAR_FEATURE_GATE_ENABLED") or "false").lower() == "true"
polar_api_base = os.environ.get("DEN_POLAR_API_BASE") or "https://api.polar.sh"
polar_success_url = os.environ.get("DEN_POLAR_SUCCESS_URL") or "https://app.openwork.software"
polar_return_url = os.environ.get("DEN_POLAR_RETURN_URL") or polar_success_url
polar_access_token = os.environ.get("POLAR_ACCESS_TOKEN") or ""
polar_product_id = os.environ.get("POLAR_PRODUCT_ID") or ""
polar_benefit_id = os.environ.get("POLAR_BENEFIT_ID") or ""
github_client_id = os.environ.get("DEN_GITHUB_CLIENT_ID") or ""
github_client_secret = os.environ.get("DEN_GITHUB_CLIENT_SECRET") or ""
google_client_id = os.environ.get("DEN_GOOGLE_CLIENT_ID") or ""
google_client_secret = os.environ.get("DEN_GOOGLE_CLIENT_SECRET") or ""
better_auth_url = os.environ.get("DEN_BETTER_AUTH_URL") or "https://app.openwork.software"
if bool(github_client_id) != bool(github_client_secret):
raise RuntimeError(
"DEN_GITHUB_CLIENT_ID and DEN_GITHUB_CLIENT_SECRET must either both be set or both be empty"
)
if bool(google_client_id) != bool(google_client_secret):
raise RuntimeError(
"DEN_GOOGLE_CLIENT_ID and DEN_GOOGLE_CLIENT_SECRET must either both be set or both be empty"
)
def validate_redirect_url(name: str, value: str):
parsed = urllib.parse.urlparse(value)
if parsed.scheme not in {"http", "https"} or not parsed.netloc:
raise RuntimeError(f"{name} must be an absolute http(s) URL, got: {value}")
validate_redirect_url("DEN_POLAR_SUCCESS_URL", polar_success_url)
validate_redirect_url("DEN_POLAR_RETURN_URL", polar_return_url)
validate_redirect_url("DEN_BETTER_AUTH_URL", better_auth_url)
if provisioner_mode == "daytona":
if not daytona_api_key:
raise RuntimeError("DEN_PROVISIONER_MODE=daytona requires DAYTONA_API_KEY")
if not daytona_worker_proxy_base_url:
raise RuntimeError("DEN_PROVISIONER_MODE=daytona requires DEN_DAYTONA_WORKER_PROXY_BASE_URL")
validate_redirect_url("DEN_DAYTONA_WORKER_PROXY_BASE_URL", daytona_worker_proxy_base_url)
if paywall_enabled and (not polar_access_token or not polar_product_id or not polar_benefit_id):
raise RuntimeError(
"DEN_POLAR_FEATURE_GATE_ENABLED=true requires POLAR_ACCESS_TOKEN, POLAR_PRODUCT_ID, and POLAR_BENEFIT_ID"
)
def normalize_origin(value: str) -> str:
trimmed = value.strip()
if trimmed == "*":
return trimmed
return trimmed.rstrip("/")
def build_cors_origins(raw: str, defaults: list[str]) -> str:
candidates: list[str] = []
if raw.strip():
candidates.extend(raw.split(","))
candidates.extend(defaults)
seen = set()
normalized = []
for value in candidates:
origin = normalize_origin(value)
if not origin or origin in seen:
continue
seen.add(origin)
normalized.append(origin)
if not normalized:
raise RuntimeError("Unable to derive CORS_ORIGINS for Den deployment")
return ",".join(normalized)
headers = {
"Authorization": f"Bearer {api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
}
def request(method: str, path: str, body=None):
url = f"https://api.render.com/v1{path}"
data = None
if body is not None:
data = json.dumps(body).encode("utf-8")
req = urllib.request.Request(url, data=data, method=method, headers=headers)
try:
with urllib.request.urlopen(req, timeout=60) as resp:
text = resp.read().decode("utf-8")
return resp.status, json.loads(text) if text else None
except urllib.error.HTTPError as err:
text = err.read().decode("utf-8", "replace")
raise RuntimeError(f"{method} {path} failed ({err.code}): {text[:600]}")
_, service = request("GET", f"/services/{service_id}")
service_url = (service.get("serviceDetails") or {}).get("url")
if not service_url:
raise RuntimeError(f"Render service {service_id} has no public URL")
cors_origins = build_cors_origins(
configured_cors_origins,
[
"https://app.openwork.software",
"https://api.openwork.software",
service_url,
],
)
env_vars = [
{"key": "BETTER_AUTH_SECRET", "value": os.environ["DEN_BETTER_AUTH_SECRET"]},
{"key": "BETTER_AUTH_URL", "value": better_auth_url},
{"key": "GITHUB_CLIENT_ID", "value": github_client_id},
{"key": "GITHUB_CLIENT_SECRET", "value": github_client_secret},
{"key": "GOOGLE_CLIENT_ID", "value": google_client_id},
{"key": "GOOGLE_CLIENT_SECRET", "value": google_client_secret},
{"key": "CORS_ORIGINS", "value": cors_origins},
{"key": "PROVISIONER_MODE", "value": provisioner_mode},
{"key": "RENDER_API_BASE", "value": "https://api.render.com/v1"},
{"key": "RENDER_API_KEY", "value": api_key},
{"key": "RENDER_OWNER_ID", "value": owner_id},
{"key": "RENDER_WORKER_REPO", "value": "https://github.com/different-ai/openwork"},
{"key": "RENDER_WORKER_BRANCH", "value": "dev"},
{"key": "RENDER_WORKER_ROOT_DIR", "value": "services/den-worker-runtime"},
{"key": "RENDER_WORKER_PLAN", "value": worker_plan},
{"key": "RENDER_WORKER_REGION", "value": "oregon"},
{"key": "RENDER_WORKER_NAME_PREFIX", "value": "den-worker-openwork"},
{"key": "RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX", "value": worker_public_domain_suffix},
{"key": "RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS", "value": custom_domain_ready_timeout_ms},
{"key": "RENDER_PROVISION_TIMEOUT_MS", "value": "900000"},
{"key": "RENDER_HEALTHCHECK_TIMEOUT_MS", "value": "180000"},
{"key": "RENDER_POLL_INTERVAL_MS", "value": "5000"},
{"key": "VERCEL_API_BASE", "value": vercel_api_base},
{"key": "VERCEL_TOKEN", "value": vercel_token},
{"key": "VERCEL_TEAM_ID", "value": vercel_team_id},
{"key": "VERCEL_TEAM_SLUG", "value": vercel_team_slug},
{"key": "VERCEL_DNS_DOMAIN", "value": vercel_dns_domain},
{"key": "POLAR_FEATURE_GATE_ENABLED", "value": "true" if paywall_enabled else "false"},
{"key": "POLAR_API_BASE", "value": polar_api_base},
{"key": "POLAR_ACCESS_TOKEN", "value": polar_access_token},
{"key": "POLAR_PRODUCT_ID", "value": polar_product_id},
{"key": "POLAR_BENEFIT_ID", "value": polar_benefit_id},
{"key": "POLAR_SUCCESS_URL", "value": polar_success_url},
{"key": "POLAR_RETURN_URL", "value": polar_return_url},
]
database_url = os.environ.get("DEN_DATABASE_URL") or ""
database_host = os.environ.get("DEN_DATABASE_HOST") or ""
database_username = os.environ.get("DEN_DATABASE_USERNAME") or ""
database_password = os.environ.get("DEN_DATABASE_PASSWORD") or ""
if database_url:
env_vars.append({"key": "DATABASE_URL", "value": database_url})
else:
env_vars.extend(
[
{"key": "DATABASE_HOST", "value": database_host},
{"key": "DATABASE_USERNAME", "value": database_username},
{"key": "DATABASE_PASSWORD", "value": database_password},
]
)
if provisioner_mode == "daytona":
env_vars.extend(
[
{"key": "DAYTONA_API_URL", "value": daytona_api_url},
{"key": "DAYTONA_API_KEY", "value": daytona_api_key},
{"key": "DAYTONA_TARGET", "value": daytona_target},
{"key": "DAYTONA_SNAPSHOT", "value": daytona_snapshot},
{"key": "DAYTONA_WORKER_PROXY_BASE_URL", "value": daytona_worker_proxy_base_url},
{"key": "DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS", "value": daytona_signed_preview_expires_seconds},
]
)
if daytona_openwork_version:
env_vars.append({"key": "DAYTONA_OPENWORK_VERSION", "value": daytona_openwork_version})
if openwork_version:
env_vars.append({"key": "RENDER_WORKER_OPENWORK_VERSION", "value": openwork_version})
request("PUT", f"/services/{service_id}/env-vars", env_vars)
_, deploy = request("POST", f"/services/{service_id}/deploys", {})
deploy_id = deploy.get("id") or (deploy.get("deploy") or {}).get("id")
if not deploy_id:
raise RuntimeError(f"Unexpected deploy response: {deploy}")
terminal = {"live", "update_failed", "build_failed", "canceled"}
started = time.time()
while time.time() - started < 1800:
_, deploys = request("GET", f"/services/{service_id}/deploys?limit=1")
latest = deploys[0]["deploy"] if deploys else None
if latest and latest.get("id") == deploy_id and latest.get("status") in terminal:
status = latest.get("status")
if status != "live":
raise RuntimeError(f"Render deploy {deploy_id} ended with {status}")
print(f"Render deploy {deploy_id} is live at {service_url}")
break
time.sleep(10)
else:
raise RuntimeError(f"Timed out waiting for deploy {deploy_id}")
PY

17
.github/workflows/deploy-den.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Deploy Den (disabled)
on:
workflow_dispatch:
jobs:
disabled:
if: false
runs-on: ubuntu-latest
steps:
- name: Disabled
run: |
echo "deploy-den.yml is intentionally disabled"
# Historical workflow intentionally commented out.
# The previous Render deployment automation for the legacy Den service
# should not run anymore.

View File

@@ -0,0 +1,10 @@
# MySQL mode: if DATABASE_URL is set, den-db uses mysql/mysql2.
DATABASE_URL=
# PlanetScale mode: used when DATABASE_URL is not set.
DATABASE_HOST=
DATABASE_USERNAME=
DATABASE_PASSWORD=
# Optional explicit env file path for Drizzle commands.
# OPENWORK_DEN_DB_ENV_PATH=/absolute/path/to/.env.production

View File

@@ -0,0 +1,36 @@
import "./src/load-env.ts"
import path from "node:path"
import { fileURLToPath } from "node:url"
import { defineConfig } from "drizzle-kit"
import { parseMySqlConnectionConfig } from "./src/mysql-config.ts"
const currentDir = path.dirname(fileURLToPath(import.meta.url))
const databaseUrl = process.env.DATABASE_URL?.trim()
function resolveDrizzleDbCredentials() {
if (databaseUrl) {
return parseMySqlConnectionConfig(databaseUrl)
}
const host = process.env.DATABASE_HOST?.trim()
const user = process.env.DATABASE_USERNAME?.trim()
const password = process.env.DATABASE_PASSWORD ?? ""
if (!host || !user) {
throw new Error("Provide DATABASE_URL for mysql or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD for planetscale")
}
return {
host,
user,
password,
}
}
export default defineConfig({
dialect: "mysql",
schema: path.join(currentDir, "src", "schema.ts"),
out: path.join(currentDir, "..", "..", "services", "den", "drizzle"),
dbCredentials: resolveDrizzleDbCredentials(),
})

View File

@@ -0,0 +1,31 @@
{
"name": "@openwork/den-db",
"private": true,
"type": "module",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": "./dist/index.js",
"./drizzle": "./dist/drizzle.js",
"./typeid": "./dist/typeid.js"
},
"scripts": {
"build": "pnpm run build:utils && tsc -p tsconfig.json",
"build:utils": "pnpm --dir ../utils run build",
"db:generate": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs generate --config drizzle.config.ts",
"db:migrate": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs migrate --config drizzle.config.ts",
"db:push": "pnpm run build && node --import tsx ./node_modules/drizzle-kit/bin.cjs push --config drizzle.config.ts"
},
"dependencies": {
"@different-ai/openwork-utils": "workspace:*",
"@planetscale/database": "^1.19.0",
"drizzle-orm": "^0.45.1",
"mysql2": "^3.11.3"
},
"devDependencies": {
"@types/node": "^20.11.30",
"drizzle-kit": "^0.31.9",
"tsx": "^4.21.0",
"typescript": "^5.5.4"
}
}

View File

@@ -0,0 +1,182 @@
import { Client } from "@planetscale/database"
import { drizzle } from "drizzle-orm/mysql2"
import { drizzle as drizzlePlanetScale } from "drizzle-orm/planetscale-serverless"
import type { FieldPacket, QueryOptions, QueryResult } from "mysql2"
import mysql from "mysql2/promise"
import { parseMySqlConnectionConfig } from "./mysql-config.js"
import * as schema from "./schema.js"
export type DenDbMode = "mysql" | "planetscale"
type DenDb = ReturnType<typeof drizzlePlanetScale>
export type PlanetScaleCredentials = {
host: string
username: string
password: string
}
const TRANSIENT_DB_ERROR_CODES = new Set([
"ECONNRESET",
"EPIPE",
"ETIMEDOUT",
"PROTOCOL_CONNECTION_LOST",
"PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR",
])
const RETRYABLE_QUERY_PREFIXES = ["select", "show", "describe", "explain"]
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
}
function getErrorCode(error: unknown): string | null {
if (!isRecord(error)) {
return null
}
if (typeof error.code === "string") {
return error.code
}
return getErrorCode(error.cause)
}
export function isTransientDbConnectionError(error: unknown): boolean {
const code = getErrorCode(error)
if (!code) {
return false
}
return TRANSIENT_DB_ERROR_CODES.has(code)
}
function extractSql(value: unknown): string | null {
if (typeof value === "string") {
return value
}
if (!isRecord(value)) {
return null
}
if (typeof value.sql === "string") {
return value.sql
}
return null
}
function isRetryableReadQuery(sql: string | null): boolean {
if (!sql) {
return false
}
const normalized = sql.trimStart().toLowerCase()
return RETRYABLE_QUERY_PREFIXES.some((prefix) => normalized.startsWith(prefix))
}
async function retryReadQuery<T>(label: "query" | "execute", sql: string | null, run: () => Promise<T>): Promise<T> {
try {
return await run()
} catch (error) {
if (!isRetryableReadQuery(sql) || !isTransientDbConnectionError(error)) {
throw error
}
const queryType = sql?.trimStart().split(/\s+/, 1)[0]?.toUpperCase() ?? "QUERY"
console.warn(`[db] transient mysql error on ${label} (${queryType}); retrying once`)
return run()
}
}
function parsePlanetScaleConfigFromDatabaseUrl(databaseUrl: string): PlanetScaleCredentials {
const parsed = new URL(databaseUrl)
if (!parsed.hostname || !parsed.username) {
throw new Error("DATABASE_URL must include host and username when DB_MODE=planetscale")
}
return {
host: parsed.hostname,
username: decodeURIComponent(parsed.username),
password: decodeURIComponent(parsed.password),
}
}
function resolveDbMode(input: { mode?: DenDbMode; databaseUrl?: string | null }): DenDbMode {
if (input.mode) {
return input.mode
}
return input.databaseUrl ? "mysql" : "planetscale"
}
export function createDenDb(input: {
databaseUrl?: string | null
mode?: DenDbMode
planetscale?: PlanetScaleCredentials | null
}) {
const mode = resolveDbMode(input)
if (mode === "planetscale") {
const credentials = input.planetscale ?? (input.databaseUrl ? parsePlanetScaleConfigFromDatabaseUrl(input.databaseUrl) : null)
if (!credentials) {
throw new Error("PlanetScale mode requires DATABASE_HOST, DATABASE_USERNAME, and DATABASE_PASSWORD")
}
const client = new Client(credentials)
return {
client,
db: drizzlePlanetScale(client, { schema }) as unknown as DenDb,
}
}
if (!input.databaseUrl) {
throw new Error("MySQL mode requires DATABASE_URL")
}
const client = mysql.createPool({
...parseMySqlConnectionConfig(input.databaseUrl),
waitForConnections: true,
connectionLimit: 10,
maxIdle: 10,
idleTimeout: 60_000,
queueLimit: 0,
enableKeepAlive: true,
keepAliveInitialDelay: 0,
})
const query = client.query.bind(client)
async function retryingQuery<T extends QueryResult>(sql: string): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(sql: string, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(options: QueryOptions): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(options: QueryOptions, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(
sqlOrOptions: string | QueryOptions,
values?: unknown,
): Promise<[T, FieldPacket[]]> {
const sql = extractSql(sqlOrOptions)
return retryReadQuery("query", sql, () => query<T>(sqlOrOptions as never, values as never))
}
client.query = retryingQuery
const execute = client.execute.bind(client)
async function retryingExecute<T extends QueryResult>(sql: string): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(sql: string, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(options: QueryOptions): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(options: QueryOptions, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(
sqlOrOptions: string | QueryOptions,
values?: unknown,
): Promise<[T, FieldPacket[]]> {
const sql = extractSql(sqlOrOptions)
return retryReadQuery("execute", sql, () => execute<T>(sqlOrOptions as never, values as never))
}
client.execute = retryingExecute
return {
client,
db: drizzle(client, { schema, mode: "default" }) as unknown as DenDb,
}
}

View File

@@ -0,0 +1,28 @@
import { customType, varchar } from "drizzle-orm/mysql-core"
import {
type DenTypeId,
type DenTypeIdName,
normalizeDenTypeId,
} from "@different-ai/openwork-utils/typeid"
const INTERNAL_ID_LENGTH = 64
const AUTH_EXTERNAL_ID_LENGTH = 36
export const authExternalIdColumn = (columnName: string) =>
varchar(columnName, { length: AUTH_EXTERNAL_ID_LENGTH })
export const denTypeIdColumn = <TName extends DenTypeIdName>(
name: TName,
columnName: string,
) =>
customType<{ data: DenTypeId<TName>; driverData: string }>({
dataType() {
return `varchar(${INTERNAL_ID_LENGTH})`
},
toDriver(value) {
return normalizeDenTypeId(name, value)
},
fromDriver(value) {
return normalizeDenTypeId(name, value)
},
})(columnName)

View File

@@ -0,0 +1 @@
export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "drizzle-orm"

View File

@@ -0,0 +1,5 @@
export * from "./client.js"
export * from "./columns.js"
export * from "./mysql-config.js"
export * from "./schema.js"
export * from "./typeid.js"

View File

@@ -0,0 +1,75 @@
import { existsSync, readFileSync } from "node:fs"
import path from "node:path"
import { fileURLToPath } from "node:url"
function findUpwards(startDir: string, fileName: string, maxDepth = 6) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = path.join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = path.dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
function parseEnvFile(contents: string) {
for (const rawLine of contents.split(/\r?\n/)) {
const line = rawLine.trim()
if (!line || line.startsWith("#")) {
continue
}
const match = line.match(/^([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)$/)
if (!match) {
continue
}
const key = match[1]
let value = match[2] ?? ""
if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) {
value = value.slice(1, -1)
}
if (process.env[key] === undefined) {
process.env[key] = value
}
}
}
function loadEnvFile(filePath: string) {
if (!existsSync(filePath)) {
return
}
parseEnvFile(readFileSync(filePath, "utf8"))
}
const currentDir = path.dirname(fileURLToPath(import.meta.url))
const packageDir = path.resolve(currentDir, "..")
for (const filePath of [
path.join(packageDir, ".env.local"),
path.join(packageDir, ".env"),
]) {
loadEnvFile(filePath)
}
const explicitEnvPath =
process.env.OPENWORK_DEN_DB_ENV_PATH?.trim() ||
process.env.DATABASE_ENV_FILE?.trim()
const detectedRootEnvPath = findUpwards(path.resolve(packageDir, "..", ".."), ".env")
const envPath = explicitEnvPath || detectedRootEnvPath
if (envPath) {
loadEnvFile(envPath)
}

View File

@@ -0,0 +1,48 @@
type ParsedMySqlConfig = {
host: string
port: number
user: string
password: string
database: string
ssl?: {
rejectUnauthorized: boolean
}
}
function readSslSettings(parsed: URL) {
const sslAccept = parsed.searchParams.get("sslaccept")?.trim().toLowerCase()
const sslMode =
parsed.searchParams.get("sslmode")?.trim().toLowerCase() ??
parsed.searchParams.get("ssl-mode")?.trim().toLowerCase()
const needsSsl = Boolean(sslAccept || sslMode)
if (!needsSsl) {
return undefined
}
const rejectUnauthorized =
sslAccept === "strict" ||
sslMode === "verify-ca" ||
sslMode === "verify-full" ||
sslMode === "require"
return { rejectUnauthorized }
}
export function parseMySqlConnectionConfig(databaseUrl: string): ParsedMySqlConfig {
const parsed = new URL(databaseUrl)
const database = parsed.pathname.replace(/^\//, "")
if (!parsed.hostname || !parsed.username || !database) {
throw new Error("DATABASE_URL must include host, username, and database for mysql mode")
}
return {
host: parsed.hostname,
port: Number(parsed.port || "3306"),
user: decodeURIComponent(parsed.username),
password: decodeURIComponent(parsed.password),
database,
ssl: readSslSettings(parsed),
}
}

View File

@@ -0,0 +1,235 @@
import { sql } from "drizzle-orm"
import {
boolean,
index,
json,
mysqlEnum,
mysqlTable,
text,
timestamp,
uniqueIndex,
varchar,
} from "drizzle-orm/mysql-core"
import { denTypeIdColumn } from "./columns.js"
const timestamps = {
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updated_at: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
}
export const OrgRole = ["owner", "member"] as const
export const WorkerDestination = ["local", "cloud"] as const
export const WorkerStatus = ["provisioning", "healthy", "failed", "stopped"] as const
export const TokenScope = ["client", "host"] as const
export const AuthUserTable = mysqlTable(
"user",
{
id: denTypeIdColumn("user", "id").notNull().primaryKey(),
name: varchar("name", { length: 255 }).notNull(),
email: varchar("email", { length: 255 }).notNull(),
emailVerified: boolean("email_verified").notNull().default(false),
image: text("image"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [uniqueIndex("user_email").on(table.email)],
)
export const AuthSessionTable = mysqlTable(
"session",
{
id: denTypeIdColumn("session", "id").notNull().primaryKey(),
userId: denTypeIdColumn("user", "user_id").notNull(),
token: varchar("token", { length: 255 }).notNull(),
expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(),
ipAddress: text("ip_address"),
userAgent: text("user_agent"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
uniqueIndex("session_token").on(table.token),
index("session_user_id").on(table.userId),
],
)
export const AuthAccountTable = mysqlTable(
"account",
{
id: denTypeIdColumn("account", "id").notNull().primaryKey(),
userId: denTypeIdColumn("user", "user_id").notNull(),
accountId: text("account_id").notNull(),
providerId: text("provider_id").notNull(),
accessToken: text("access_token"),
refreshToken: text("refresh_token"),
accessTokenExpiresAt: timestamp("access_token_expires_at", { fsp: 3 }),
refreshTokenExpiresAt: timestamp("refresh_token_expires_at", { fsp: 3 }),
scope: text("scope"),
idToken: text("id_token"),
password: text("password"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [index("account_user_id").on(table.userId)],
)
export const AuthVerificationTable = mysqlTable(
"verification",
{
id: denTypeIdColumn("verification", "id").notNull().primaryKey(),
identifier: varchar("identifier", { length: 255 }).notNull(),
value: text("value").notNull(),
expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [index("verification_identifier").on(table.identifier)],
)
export const user = AuthUserTable
export const session = AuthSessionTable
export const account = AuthAccountTable
export const verification = AuthVerificationTable
export const OrgTable = mysqlTable(
"org",
{
id: denTypeIdColumn("org", "id").notNull().primaryKey(),
name: varchar("name", { length: 255 }).notNull(),
slug: varchar("slug", { length: 255 }).notNull(),
owner_user_id: denTypeIdColumn("user", "owner_user_id").notNull(),
...timestamps,
},
(table) => [uniqueIndex("org_slug").on(table.slug), index("org_owner_user_id").on(table.owner_user_id)],
)
export const OrgMembershipTable = mysqlTable(
"org_membership",
{
id: denTypeIdColumn("orgMembership", "id").notNull().primaryKey(),
org_id: denTypeIdColumn("org", "org_id").notNull(),
user_id: denTypeIdColumn("user", "user_id").notNull(),
role: mysqlEnum("role", OrgRole).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("org_membership_org_id").on(table.org_id), index("org_membership_user_id").on(table.user_id)],
)
export const AdminAllowlistTable = mysqlTable(
"admin_allowlist",
{
id: denTypeIdColumn("adminAllowlist", "id").notNull().primaryKey(),
email: varchar("email", { length: 255 }).notNull(),
note: varchar("note", { length: 255 }),
...timestamps,
},
(table) => [uniqueIndex("admin_allowlist_email").on(table.email)],
)
export const WorkerTable = mysqlTable(
"worker",
{
id: denTypeIdColumn("worker", "id").notNull().primaryKey(),
org_id: denTypeIdColumn("org", "org_id").notNull(),
created_by_user_id: denTypeIdColumn("user", "created_by_user_id"),
name: varchar("name", { length: 255 }).notNull(),
description: varchar("description", { length: 1024 }),
destination: mysqlEnum("destination", WorkerDestination).notNull(),
status: mysqlEnum("status", WorkerStatus).notNull(),
image_version: varchar("image_version", { length: 128 }),
workspace_path: varchar("workspace_path", { length: 1024 }),
sandbox_backend: varchar("sandbox_backend", { length: 64 }),
...timestamps,
},
(table) => [
index("worker_org_id").on(table.org_id),
index("worker_created_by_user_id").on(table.created_by_user_id),
index("worker_status").on(table.status),
],
)
export const WorkerInstanceTable = mysqlTable(
"worker_instance",
{
id: denTypeIdColumn("workerInstance", "id").notNull().primaryKey(),
worker_id: denTypeIdColumn("worker", "worker_id").notNull(),
provider: varchar("provider", { length: 64 }).notNull(),
region: varchar("region", { length: 64 }),
url: varchar("url", { length: 2048 }).notNull(),
status: mysqlEnum("status", WorkerStatus).notNull(),
...timestamps,
},
(table) => [index("worker_instance_worker_id").on(table.worker_id)],
)
export const DaytonaSandboxTable = mysqlTable(
"daytona_sandbox",
{
id: denTypeIdColumn("daytonaSandbox", "id").notNull().primaryKey(),
worker_id: denTypeIdColumn("worker", "worker_id").notNull(),
sandbox_id: varchar("sandbox_id", { length: 128 }).notNull(),
workspace_volume_id: varchar("workspace_volume_id", { length: 128 }).notNull(),
data_volume_id: varchar("data_volume_id", { length: 128 }).notNull(),
signed_preview_url: varchar("signed_preview_url", { length: 2048 }).notNull(),
signed_preview_url_expires_at: timestamp("signed_preview_url_expires_at", { fsp: 3 }).notNull(),
region: varchar("region", { length: 64 }),
...timestamps,
},
(table) => [
uniqueIndex("daytona_sandbox_worker_id").on(table.worker_id),
uniqueIndex("daytona_sandbox_sandbox_id").on(table.sandbox_id),
],
)
export const WorkerTokenTable = mysqlTable(
"worker_token",
{
id: denTypeIdColumn("workerToken", "id").notNull().primaryKey(),
worker_id: denTypeIdColumn("worker", "worker_id").notNull(),
scope: mysqlEnum("scope", TokenScope).notNull(),
token: varchar("token", { length: 128 }).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
revoked_at: timestamp("revoked_at", { fsp: 3 }),
},
(table) => [
index("worker_token_worker_id").on(table.worker_id),
uniqueIndex("worker_token_token").on(table.token),
],
)
export const WorkerBundleTable = mysqlTable(
"worker_bundle",
{
id: denTypeIdColumn("workerBundle", "id").notNull().primaryKey(),
worker_id: denTypeIdColumn("worker", "worker_id").notNull(),
storage_url: varchar("storage_url", { length: 2048 }).notNull(),
status: varchar("status", { length: 64 }).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("worker_bundle_worker_id").on(table.worker_id)],
)
export const AuditEventTable = mysqlTable(
"audit_event",
{
id: denTypeIdColumn("auditEvent", "id").notNull().primaryKey(),
org_id: denTypeIdColumn("org", "org_id").notNull(),
worker_id: denTypeIdColumn("worker", "worker_id"),
actor_user_id: denTypeIdColumn("user", "actor_user_id").notNull(),
action: varchar("action", { length: 128 }).notNull(),
payload: json("payload"),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("audit_event_org_id").on(table.org_id), index("audit_event_worker_id").on(table.worker_id)],
)

View File

@@ -0,0 +1 @@
export * from "@different-ai/openwork-utils/typeid"

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "Bundler",
"rootDir": "src",
"outDir": "dist",
"declaration": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"resolveJsonModule": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,21 @@
{
"name": "@different-ai/openwork-utils",
"private": true,
"type": "module",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": "./dist/index.js",
"./typeid": "./dist/typeid.js"
},
"scripts": {
"build": "tsc -p tsconfig.json"
},
"dependencies": {
"typeid-js": "^1.2.0"
},
"devDependencies": {
"@types/node": "^20.11.30",
"typescript": "^5.5.4"
}
}

View File

@@ -0,0 +1 @@
export * from "./typeid.js"

View File

@@ -0,0 +1,55 @@
import { fromString, getType, typeid } from "typeid-js"
export const denTypeIdPrefixes = {
user: "usr",
session: "ses",
account: "acc",
verification: "ver",
org: "org",
orgMembership: "om",
adminAllowlist: "aal",
worker: "wrk",
workerInstance: "wki",
daytonaSandbox: "dts",
workerToken: "wkt",
workerBundle: "wkb",
auditEvent: "aev",
} as const
export type DenTypeIdName = keyof typeof denTypeIdPrefixes
export type DenTypeIdPrefix<TName extends DenTypeIdName> = (typeof denTypeIdPrefixes)[TName]
export type DenTypeId<TName extends DenTypeIdName> = `${DenTypeIdPrefix<TName>}_${string}`
export function createDenTypeId<TName extends DenTypeIdName>(name: TName): DenTypeId<TName> {
return typeid(denTypeIdPrefixes[name]).toString() as DenTypeId<TName>
}
export function normalizeDenTypeId<TName extends DenTypeIdName>(
name: TName,
value: string,
): DenTypeId<TName> {
const parsed = fromString(value)
const expectedPrefix = denTypeIdPrefixes[name]
if (getType(parsed) !== expectedPrefix) {
throw new Error(`invalid_den_typeid_prefix:${name}:${getType(parsed)}`)
}
return parsed as DenTypeId<TName>
}
export function isDenTypeId<TName extends DenTypeIdName>(
name: TName,
value: unknown,
): value is DenTypeId<TName> {
if (typeof value !== "string") {
return false
}
try {
normalizeDenTypeId(name, value)
return true
} catch {
return false
}
}

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "Bundler",
"rootDir": "src",
"outDir": "dist",
"declaration": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"resolveJsonModule": true
},
"include": ["src"]
}

View File

@@ -463,7 +463,7 @@ function getWorker(payload: unknown): WorkerLaunch | null {
return {
workerId: worker.id,
workerName: worker.name,
status: typeof worker.status === "string" ? worker.status : "unknown",
status: getEffectiveWorkerStatus(worker.status, instance),
provider: instance && typeof instance.provider === "string" ? instance.provider : null,
instanceUrl: instance && typeof instance.url === "string" ? instance.url : null,
openworkUrl: instance && typeof instance.url === "string" ? instance.url : null,
@@ -489,7 +489,7 @@ function getWorkerSummary(payload: unknown): WorkerSummary | null {
return {
workerId: worker.id,
workerName: worker.name,
status: typeof worker.status === "string" ? worker.status : "unknown",
status: getEffectiveWorkerStatus(worker.status, instance),
instanceUrl: instance && typeof instance.url === "string" ? instance.url : null,
provider: instance && typeof instance.provider === "string" ? instance.provider : null,
isMine: worker.isMine === true
@@ -667,7 +667,7 @@ function parseWorkerListItem(value: unknown): WorkerListItem | null {
return {
workerId,
workerName,
status: typeof value.status === "string" ? value.status : "unknown",
status: getEffectiveWorkerStatus(value.status, instance),
instanceUrl: instance && typeof instance.url === "string" ? instance.url : null,
provider: instance && typeof instance.provider === "string" ? instance.provider : null,
isMine: value.isMine === true,
@@ -728,6 +728,22 @@ function getWorkerStatusCopy(status: string): string {
}
}
function getEffectiveWorkerStatus(workerStatus: unknown, instance: Record<string, unknown> | null): string {
const normalizedWorkerStatus = typeof workerStatus === "string" ? workerStatus : "unknown";
const normalized = normalizedWorkerStatus.trim().toLowerCase();
const instanceStatus = instance && typeof instance.status === "string" ? instance.status.trim().toLowerCase() : null;
if (!instanceStatus) {
return normalizedWorkerStatus;
}
if (normalized === "provisioning" || normalized === "starting") {
return instanceStatus;
}
return normalizedWorkerStatus;
}
function isWorkerLaunch(value: unknown): value is WorkerLaunch {
if (!isRecord(value)) {
return false;

View File

@@ -0,0 +1,26 @@
FROM node:22-bookworm-slim
RUN corepack enable
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml /app/
COPY .npmrc /app/.npmrc
COPY patches /app/patches
COPY packages/utils/package.json /app/packages/utils/package.json
COPY packages/den-db/package.json /app/packages/den-db/package.json
COPY services/den/package.json /app/services/den/package.json
RUN pnpm install --frozen-lockfile
COPY packages/utils /app/packages/utils
COPY packages/den-db /app/packages/den-db
COPY services/den /app/services/den
RUN pnpm --dir /app/packages/utils run build
RUN pnpm --dir /app/packages/den-db run build
RUN pnpm --dir /app/services/den run build
EXPOSE 8788
CMD ["sh", "-lc", "node dist/index.js"]

View File

@@ -0,0 +1,13 @@
FROM node:22-bookworm-slim
WORKDIR /app/packages/web
COPY packages/web/package.json /app/packages/web/package.json
RUN npm install --no-package-lock --no-fund --no-audit
COPY packages/web /app/packages/web
EXPOSE 3005
CMD ["npm", "run", "dev"]

View File

@@ -0,0 +1,26 @@
FROM node:22-bookworm-slim
RUN corepack enable
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml /app/
COPY .npmrc /app/.npmrc
COPY patches /app/patches
COPY packages/utils/package.json /app/packages/utils/package.json
COPY packages/den-db/package.json /app/packages/den-db/package.json
COPY services/den-worker-proxy/package.json /app/services/den-worker-proxy/package.json
RUN pnpm install --frozen-lockfile
COPY packages/utils /app/packages/utils
COPY packages/den-db /app/packages/den-db
COPY services/den-worker-proxy /app/services/den-worker-proxy
RUN pnpm --dir /app/packages/utils run build
RUN pnpm --dir /app/packages/den-db run build
RUN pnpm --dir /app/services/den-worker-proxy run build
EXPOSE 8789
CMD ["sh", "-lc", "node dist/server.js"]

View File

@@ -14,6 +14,7 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
COMPOSE_FILE="$ROOT_DIR/packaging/docker/docker-compose.den-dev.yml"
RUNTIME_DIR="$ROOT_DIR/tmp/docker-den-dev"
DAYTONA_ENV_FILE="${DAYTONA_ENV_FILE:-$ROOT_DIR/.env.daytona}"
if ! command -v docker >/dev/null 2>&1; then
echo "docker is required" >&2
@@ -37,72 +38,42 @@ random_hex() {
node -e "console.log(require('crypto').randomBytes(${bytes}).toString('hex'))"
}
detect_public_host() {
if [ -n "${DEN_PUBLIC_HOST:-}" ]; then
printf '%s\n' "$DEN_PUBLIC_HOST"
return
fi
local host
host="$(hostname -s 2>/dev/null || hostname 2>/dev/null || true)"
host="${host//$'\n'/}"
host="${host// /}"
if [ -n "$host" ]; then
printf '%s\n' "$host"
return
fi
printf '%s\n' "localhost"
}
detect_lan_ipv4() {
node -e '
const os = require("os");
const nets = os.networkInterfaces();
for (const entries of Object.values(nets)) {
for (const entry of entries || []) {
if (!entry || entry.internal || entry.family !== "IPv4") continue;
if (entry.address.startsWith("127.")) continue;
process.stdout.write(entry.address);
process.exit(0);
}
}
'
}
join_csv_unique() {
printf "%s\n" "$@" | awk 'NF && !seen[$0]++' | paste -sd, -
}
DEV_ID="$(node -e "console.log(require('crypto').randomUUID().slice(0, 8))")"
PROJECT="openwork-den-dev-$DEV_ID"
DEN_API_PORT="${DEN_API_PORT:-$(pick_port)}"
DEN_WEB_PORT="${DEN_WEB_PORT:-$(pick_port)}"
DEN_WORKER_PROXY_PORT="${DEN_WORKER_PROXY_PORT:-$(pick_port)}"
DEN_MYSQL_PORT="${DEN_MYSQL_PORT:-$(pick_port)}"
if [ "$DEN_WEB_PORT" = "$DEN_API_PORT" ]; then
DEN_WEB_PORT="$(pick_port)"
fi
PUBLIC_HOST="$(detect_public_host)"
LAN_IPV4="$(detect_lan_ipv4 || true)"
DEN_BETTER_AUTH_SECRET="${DEN_BETTER_AUTH_SECRET:-$(random_hex 32)}"
DEN_BETTER_AUTH_URL="${DEN_BETTER_AUTH_URL:-http://$PUBLIC_HOST:$DEN_WEB_PORT}"
DEN_PROVISIONER_MODE="${DEN_PROVISIONER_MODE:-stub}"
DEN_WORKER_URL_TEMPLATE="${DEN_WORKER_URL_TEMPLATE:-https://workers.local/{workerId}}"
if [ -z "${DEN_CORS_ORIGINS:-}" ]; then
DEN_CORS_ORIGINS="$(join_csv_unique \
"http://$PUBLIC_HOST:$DEN_WEB_PORT" \
"http://$PUBLIC_HOST:$DEN_API_PORT" \
"http://localhost:$DEN_WEB_PORT" \
"http://127.0.0.1:$DEN_WEB_PORT" \
"http://localhost:$DEN_API_PORT" \
"http://127.0.0.1:$DEN_API_PORT" \
"${LAN_IPV4:+http://$LAN_IPV4:$DEN_WEB_PORT}" \
"${LAN_IPV4:+http://$LAN_IPV4:$DEN_API_PORT}")"
if [ "$DEN_WORKER_PROXY_PORT" = "$DEN_API_PORT" ] || [ "$DEN_WORKER_PROXY_PORT" = "$DEN_WEB_PORT" ]; then
DEN_WORKER_PROXY_PORT="$(pick_port)"
fi
if [ "$DEN_MYSQL_PORT" = "$DEN_API_PORT" ] || [ "$DEN_MYSQL_PORT" = "$DEN_WEB_PORT" ] || [ "$DEN_MYSQL_PORT" = "$DEN_WORKER_PROXY_PORT" ]; then
DEN_MYSQL_PORT="$(pick_port)"
fi
DEN_BETTER_AUTH_TRUSTED_ORIGINS="${DEN_BETTER_AUTH_TRUSTED_ORIGINS:-$DEN_CORS_ORIGINS}"
DEN_BETTER_AUTH_SECRET="${DEN_BETTER_AUTH_SECRET:-$(random_hex 32)}"
DEN_BETTER_AUTH_URL="${DEN_BETTER_AUTH_URL:-http://localhost:$DEN_WEB_PORT}"
DEN_PROVISIONER_MODE="${DEN_PROVISIONER_MODE:-stub}"
DEN_WORKER_URL_TEMPLATE="${DEN_WORKER_URL_TEMPLATE:-https://workers.local/{workerId}}"
DEN_DAYTONA_WORKER_PROXY_BASE_URL="${DEN_DAYTONA_WORKER_PROXY_BASE_URL:-http://localhost:$DEN_WORKER_PROXY_PORT}"
DEN_CORS_ORIGINS="${DEN_CORS_ORIGINS:-http://localhost:$DEN_WEB_PORT,http://127.0.0.1:$DEN_WEB_PORT,http://localhost:$DEN_API_PORT,http://127.0.0.1:$DEN_API_PORT}"
if [ "$DEN_PROVISIONER_MODE" = "daytona" ] && [ -f "$DAYTONA_ENV_FILE" ]; then
set -a
# shellcheck disable=SC1090
source "$DAYTONA_ENV_FILE"
set +a
fi
if [ "$DEN_PROVISIONER_MODE" = "daytona" ] && [ -z "${DAYTONA_API_KEY:-}" ]; then
echo "DAYTONA_API_KEY is required when DEN_PROVISIONER_MODE=daytona" >&2
echo "Set DAYTONA_ENV_FILE to your .env.daytona path or export DAYTONA_API_KEY before running den-dev-up.sh" >&2
exit 1
fi
mkdir -p "$RUNTIME_DIR"
RUNTIME_FILE="$ROOT_DIR/tmp/.den-dev-env-$DEV_ID"
@@ -111,9 +82,12 @@ cat > "$RUNTIME_FILE" <<EOF
PROJECT=$PROJECT
DEN_API_PORT=$DEN_API_PORT
DEN_WEB_PORT=$DEN_WEB_PORT
DEN_WORKER_PROXY_PORT=$DEN_WORKER_PROXY_PORT
DEN_MYSQL_PORT=$DEN_MYSQL_PORT
DEN_API_URL=http://localhost:$DEN_API_PORT
DEN_WEB_URL=http://localhost:$DEN_WEB_PORT
DEN_WEB_URL_PUBLIC=http://$PUBLIC_HOST:$DEN_WEB_PORT
DEN_WORKER_PROXY_URL=http://localhost:$DEN_WORKER_PROXY_PORT
DEN_MYSQL_URL=mysql://root:password@127.0.0.1:$DEN_MYSQL_PORT/openwork_den
DEN_BETTER_AUTH_URL=$DEN_BETTER_AUTH_URL
COMPOSE_FILE=$COMPOSE_FILE
EOF
@@ -121,20 +95,33 @@ EOF
echo "Starting Docker Compose project: $PROJECT" >&2
echo "- DEN_API_PORT=$DEN_API_PORT" >&2
echo "- DEN_WEB_PORT=$DEN_WEB_PORT" >&2
echo "- DEN_WORKER_PROXY_PORT=$DEN_WORKER_PROXY_PORT" >&2
echo "- DEN_MYSQL_PORT=$DEN_MYSQL_PORT" >&2
echo "- DEN_BETTER_AUTH_URL=$DEN_BETTER_AUTH_URL" >&2
echo "- DEN_BETTER_AUTH_TRUSTED_ORIGINS=$DEN_BETTER_AUTH_TRUSTED_ORIGINS" >&2
echo "- DEN_CORS_ORIGINS=$DEN_CORS_ORIGINS" >&2
echo "- DEN_PROVISIONER_MODE=$DEN_PROVISIONER_MODE" >&2
if [ "$DEN_PROVISIONER_MODE" = "daytona" ]; then
echo "- DAYTONA_API_URL=${DAYTONA_API_URL:-https://app.daytona.io/api}" >&2
if [ -n "${DAYTONA_TARGET:-}" ]; then
echo "- DAYTONA_TARGET=$DAYTONA_TARGET" >&2
fi
fi
if ! DEN_API_PORT="$DEN_API_PORT" \
DEN_WEB_PORT="$DEN_WEB_PORT" \
DEN_WORKER_PROXY_PORT="$DEN_WORKER_PROXY_PORT" \
DEN_MYSQL_PORT="$DEN_MYSQL_PORT" \
DEN_BETTER_AUTH_SECRET="$DEN_BETTER_AUTH_SECRET" \
DEN_BETTER_AUTH_URL="$DEN_BETTER_AUTH_URL" \
DEN_BETTER_AUTH_TRUSTED_ORIGINS="$DEN_BETTER_AUTH_TRUSTED_ORIGINS" \
DEN_CORS_ORIGINS="$DEN_CORS_ORIGINS" \
DEN_PROVISIONER_MODE="$DEN_PROVISIONER_MODE" \
DEN_WORKER_URL_TEMPLATE="$DEN_WORKER_URL_TEMPLATE" \
docker compose -p "$PROJECT" -f "$COMPOSE_FILE" up -d --wait; then
DEN_DAYTONA_WORKER_PROXY_BASE_URL="$DEN_DAYTONA_WORKER_PROXY_BASE_URL" \
DAYTONA_API_URL="${DAYTONA_API_URL:-}" \
DAYTONA_API_KEY="${DAYTONA_API_KEY:-}" \
DAYTONA_TARGET="${DAYTONA_TARGET:-}" \
DAYTONA_SNAPSHOT="${DAYTONA_SNAPSHOT:-}" \
DAYTONA_OPENWORK_VERSION="${DAYTONA_OPENWORK_VERSION:-}" \
docker compose -p "$PROJECT" -f "$COMPOSE_FILE" up -d --build --wait; then
echo "Den Docker stack failed to start. Recent logs:" >&2
docker compose -p "$PROJECT" -f "$COMPOSE_FILE" logs --tail=200 >&2 || true
exit 1
@@ -142,15 +129,9 @@ fi
echo "" >&2
echo "OpenWork Cloud web UI: http://localhost:$DEN_WEB_PORT" >&2
echo "OpenWork Cloud web UI (LAN/public): http://$PUBLIC_HOST:$DEN_WEB_PORT" >&2
if [ -n "$LAN_IPV4" ]; then
echo "OpenWork Cloud web UI (LAN IP): http://$LAN_IPV4:$DEN_WEB_PORT" >&2
fi
echo "Den demo/API: http://localhost:$DEN_API_PORT" >&2
echo "Den demo/API (LAN/public): http://$PUBLIC_HOST:$DEN_API_PORT" >&2
if [ -n "$LAN_IPV4" ]; then
echo "Den demo/API (LAN IP): http://$LAN_IPV4:$DEN_API_PORT" >&2
fi
echo "Worker proxy: http://localhost:$DEN_WORKER_PROXY_PORT" >&2
echo "MySQL: mysql://root:password@127.0.0.1:$DEN_MYSQL_PORT/openwork_den" >&2
echo "Health check: http://localhost:$DEN_API_PORT/health" >&2
echo "Runtime env file: $RUNTIME_FILE" >&2
echo "" >&2

View File

@@ -8,80 +8,54 @@
# Env overrides (optional, via export or .env):
# DEN_API_PORT — host port to map to Den control plane :8788
# DEN_WEB_PORT — host port to map to the cloud web app :3005
# DEN_WORKER_PROXY_PORT — host port to map to the worker proxy :8789
# DEN_MYSQL_PORT — host port to map to MySQL :3306
# DEN_BETTER_AUTH_SECRET — Better Auth secret (auto-generated by den-dev-up.sh)
# DEN_PUBLIC_HOST — host used to build default auth origin + LAN/public URLs
# DEN_BETTER_AUTH_URL — browser-facing auth origin (default: http://<DEN_PUBLIC_HOST>:<DEN_WEB_PORT>)
# DEN_BETTER_AUTH_TRUSTED_ORIGINS — Better Auth trusted origins (defaults to DEN_CORS_ORIGINS)
# DEN_CORS_ORIGINS — comma-separated trusted origins for Express CORS (auto-populated by den-dev-up.sh)
# DEN_PROVISIONER_MODE — stub or render (default: stub)
# DEN_BETTER_AUTH_URL — browser-facing auth origin (default: http://localhost:<DEN_WEB_PORT>)
# DEN_CORS_ORIGINS — comma-separated trusted origins for Better Auth + CORS
# DEN_PROVISIONER_MODE — stub, render, or daytona (default: stub)
# DEN_WORKER_URL_TEMPLATE — worker URL template used by stub provisioning
# DAYTONA_API_URL / DAYTONA_API_KEY / DAYTONA_TARGET / DAYTONA_SNAPSHOT / DAYTONA_OPENWORK_VERSION
# — optional Daytona passthrough vars when DEN_PROVISIONER_MODE=daytona
x-shared: &shared
image: node:22-bookworm-slim
working_dir: /app
volumes:
- ../../:/app
- pnpm-store:/root/.local/share/pnpm/store
restart: unless-stopped
services:
mysql:
image: mysql:8.4
command:
- --performance_schema=OFF
- --innodb_buffer_pool_size=128M
- --innodb_log_buffer_size=8M
- --max_connections=30
- --table_open_cache=128
- --tmp_table_size=16M
- --max_heap_table_size=16M
- --innodb-buffer-pool-size=64M
- --innodb-log-buffer-size=8M
- --tmp-table-size=16M
- --max-heap-table-size=16M
environment:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: openwork_den
MYSQL_INITDB_SKIP_TZINFO: "1"
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -ppassword --silent"]
interval: 5s
timeout: 5s
retries: 30
start_period: 10s
ports:
- "${DEN_MYSQL_PORT:-3306}:3306"
volumes:
- den-mysql-data:/var/lib/mysql
den:
<<: *shared
build:
context: ../../
dockerfile: packaging/docker/Dockerfile.den
depends_on:
mysql:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command:
- |
set -e
apt-get update -qq && apt-get install -y -qq --no-install-recommends \
curl ca-certificates >/dev/null 2>&1
corepack enable && corepack prepare pnpm@10.27.0 --activate
echo "[den] Installing dependencies..."
pnpm install --no-frozen-lockfile --network-concurrency 1 --child-concurrency 1
echo "[den] Running migrations..."
pnpm --filter @openwork/den db:migrate
echo ""
echo "============================================"
echo " Den control plane"
echo " Demo/API: http://localhost:${DEN_API_PORT:-8788}"
echo " Health: http://localhost:${DEN_API_PORT:-8788}/health"
echo " Auth URL: ${DEN_BETTER_AUTH_URL:-http://localhost:3005}"
echo "============================================"
echo ""
exec pnpm --filter @openwork/den dev
ports:
- "${DEN_API_PORT:-8788}:8788"
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8788/health || exit 1"]
test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:8788/health').then((res)=>process.exit(res.ok?0:1)).catch(()=>process.exit(1))"]
interval: 5s
timeout: 5s
retries: 30
@@ -92,44 +66,57 @@ services:
DATABASE_URL: mysql://root:password@mysql:3306/openwork_den
BETTER_AUTH_SECRET: ${DEN_BETTER_AUTH_SECRET:-dev-den-local-auth-secret-please-override-1234567890}
BETTER_AUTH_URL: ${DEN_BETTER_AUTH_URL:-http://localhost:3005}
DEN_BETTER_AUTH_TRUSTED_ORIGINS: ${DEN_BETTER_AUTH_TRUSTED_ORIGINS:-${DEN_CORS_ORIGINS:-http://localhost:3005,http://127.0.0.1:3005,http://localhost:8788,http://127.0.0.1:8788}}
PORT: "8788"
CORS_ORIGINS: ${DEN_CORS_ORIGINS:-http://localhost:3005,http://127.0.0.1:3005,http://localhost:8788,http://127.0.0.1:8788}
PROVISIONER_MODE: ${DEN_PROVISIONER_MODE:-stub}
WORKER_URL_TEMPLATE: ${DEN_WORKER_URL_TEMPLATE:-}
POLAR_FEATURE_GATE_ENABLED: "false"
DAYTONA_API_URL: ${DAYTONA_API_URL:-}
DAYTONA_API_KEY: ${DAYTONA_API_KEY:-}
DAYTONA_TARGET: ${DAYTONA_TARGET:-}
DAYTONA_SNAPSHOT: ${DAYTONA_SNAPSHOT:-}
DAYTONA_OPENWORK_VERSION: ${DAYTONA_OPENWORK_VERSION:-}
DAYTONA_WORKER_PROXY_BASE_URL: ${DEN_DAYTONA_WORKER_PROXY_BASE_URL:-http://localhost:8789}
worker-proxy:
<<: *shared
build:
context: ../../
dockerfile: packaging/docker/Dockerfile.den-worker-proxy
depends_on:
mysql:
condition: service_healthy
ports:
- "${DEN_WORKER_PROXY_PORT:-8789}:8789"
healthcheck:
test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:8789/unknown').then((res)=>process.exit([404,502].includes(res.status)?0:1)).catch(()=>process.exit(1))"]
interval: 5s
timeout: 5s
retries: 30
start_period: 90s
environment:
CI: "true"
DATABASE_URL: mysql://root:password@mysql:3306/openwork_den
PORT: "8789"
OPENWORK_DAYTONA_ENV_PATH: ${OPENWORK_DAYTONA_ENV_PATH:-}
DAYTONA_API_URL: ${DAYTONA_API_URL:-}
DAYTONA_API_KEY: ${DAYTONA_API_KEY:-}
DAYTONA_TARGET: ${DAYTONA_TARGET:-}
DAYTONA_OPENWORK_PORT: ${DAYTONA_OPENWORK_PORT:-8787}
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: ${DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS:-86400}
web:
<<: *shared
build:
context: ../../
dockerfile: packaging/docker/Dockerfile.den-web
depends_on:
den:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command:
- |
set -e
apt-get update -qq && apt-get install -y -qq --no-install-recommends \
curl ca-certificates >/dev/null 2>&1
corepack enable && corepack prepare pnpm@10.27.0 --activate
echo "[den-web] Installing dependencies..."
pnpm install --no-frozen-lockfile --network-concurrency 1 --child-concurrency 1
echo ""
echo "============================================"
echo " OpenWork Cloud web app"
echo " URL: http://localhost:${DEN_WEB_PORT:-3005}"
echo " Den API: http://localhost:${DEN_API_PORT:-8788}"
echo "============================================"
echo ""
exec pnpm --filter @different-ai/openwork-web dev
ports:
- "${DEN_WEB_PORT:-3005}:3005"
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:3005/api/den/health || exit 1"]
test: ["CMD", "node", "-e", "fetch('http://127.0.0.1:3005/api/den/health').then((res)=>process.exit(res.ok?0:1)).catch(()=>process.exit(1))"]
interval: 5s
timeout: 10s
retries: 30
@@ -144,4 +131,3 @@ services:
volumes:
den-mysql-data:
pnpm-store:

2278
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
DOCKERFILE="$ROOT_DIR/services/den-worker-runtime/Dockerfile.daytona-snapshot"
DAYTONA_ENV_FILE="${DAYTONA_ENV_FILE:-$ROOT_DIR/.env.daytona}"
if ! command -v docker >/dev/null 2>&1; then
echo "docker is required" >&2
exit 1
fi
if ! command -v daytona >/dev/null 2>&1; then
echo "daytona CLI is required" >&2
exit 1
fi
if [ -f "$DAYTONA_ENV_FILE" ]; then
set -a
# shellcheck disable=SC1090
source "$DAYTONA_ENV_FILE"
set +a
fi
SNAPSHOT_NAME="${1:-${DAYTONA_SNAPSHOT_NAME:-openwork-runtime}}"
SNAPSHOT_REGION="${DAYTONA_SNAPSHOT_REGION:-${DAYTONA_TARGET:-}}"
SNAPSHOT_CPU="${DAYTONA_SNAPSHOT_CPU:-1}"
SNAPSHOT_MEMORY="${DAYTONA_SNAPSHOT_MEMORY:-2}"
SNAPSHOT_DISK="${DAYTONA_SNAPSHOT_DISK:-8}"
LOCAL_IMAGE_TAG="${DAYTONA_LOCAL_IMAGE_TAG:-openwork-daytona-snapshot:${SNAPSHOT_NAME//[^a-zA-Z0-9_.-]/-}}"
OPENWORK_ORCHESTRATOR_VERSION="${OPENWORK_ORCHESTRATOR_VERSION:-$(node -e 'const fs=require("fs"); const pkg=JSON.parse(fs.readFileSync(process.argv[1], "utf8")); process.stdout.write(String(pkg.version));' "$ROOT_DIR/packages/orchestrator/package.json")}"
OPENCODE_VERSION="${OPENCODE_VERSION:-$(node -e 'const fs=require("fs"); const pkg=JSON.parse(fs.readFileSync(process.argv[1], "utf8")); process.stdout.write(String(pkg.opencodeVersion));' "$ROOT_DIR/packages/orchestrator/package.json")}"
echo "Building local image $LOCAL_IMAGE_TAG" >&2
echo "- openwork-orchestrator@$OPENWORK_ORCHESTRATOR_VERSION" >&2
echo "- opencode@$OPENCODE_VERSION" >&2
docker buildx build \
--platform linux/amd64 \
-t "$LOCAL_IMAGE_TAG" \
-f "$DOCKERFILE" \
--build-arg "OPENWORK_ORCHESTRATOR_VERSION=$OPENWORK_ORCHESTRATOR_VERSION" \
--build-arg "OPENCODE_VERSION=$OPENCODE_VERSION" \
--load \
"$ROOT_DIR"
args=(snapshot push "$LOCAL_IMAGE_TAG" --name "$SNAPSHOT_NAME" --cpu "$SNAPSHOT_CPU" --memory "$SNAPSHOT_MEMORY" --disk "$SNAPSHOT_DISK")
if [ -n "$SNAPSHOT_REGION" ]; then
args+=(--region "$SNAPSHOT_REGION")
fi
echo "Pushing Daytona snapshot $SNAPSHOT_NAME" >&2
daytona "${args[@]}"
echo >&2
echo "Snapshot ready: $SNAPSHOT_NAME" >&2
echo "Set DAYTONA_SNAPSHOT=$SNAPSHOT_NAME in .env.daytona before starting Den." >&2

View File

@@ -0,0 +1,73 @@
DATABASE_URL=
DATABASE_HOST=
DATABASE_USERNAME=
DATABASE_PASSWORD=
DB_MODE=
BETTER_AUTH_SECRET=
BETTER_AUTH_URL=http://localhost:8788
DEN_BETTER_AUTH_TRUSTED_ORIGINS=http://localhost:3005,http://localhost:5173
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
PORT=8788
WORKER_PROXY_PORT=8789
CORS_ORIGINS=http://localhost:3005,http://localhost:5173
PROVISIONER_MODE=stub
OPENWORK_DAYTONA_ENV_PATH=
WORKER_URL_TEMPLATE=https://workers.example.com/{workerId}
RENDER_API_BASE=https://api.render.com/v1
RENDER_API_KEY=
RENDER_OWNER_ID=
RENDER_WORKER_REPO=https://github.com/different-ai/openwork
RENDER_WORKER_BRANCH=dev
RENDER_WORKER_ROOT_DIR=services/den-worker-runtime
RENDER_WORKER_PLAN=standard
RENDER_WORKER_REGION=oregon
RENDER_WORKER_OPENWORK_VERSION=0.11.113
RENDER_WORKER_NAME_PREFIX=den-worker
RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX=openwork.studio
RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS=240000
RENDER_PROVISION_TIMEOUT_MS=900000
RENDER_HEALTHCHECK_TIMEOUT_MS=180000
RENDER_POLL_INTERVAL_MS=5000
VERCEL_API_BASE=https://api.vercel.com
VERCEL_TOKEN=
VERCEL_TEAM_ID=
VERCEL_TEAM_SLUG=prologe
VERCEL_DNS_DOMAIN=openwork.studio
POLAR_FEATURE_GATE_ENABLED=false
POLAR_API_BASE=https://api.polar.sh
POLAR_ACCESS_TOKEN=
POLAR_PRODUCT_ID=
POLAR_BENEFIT_ID=
POLAR_SUCCESS_URL=http://localhost:8788
POLAR_RETURN_URL=http://localhost:8788
DAYTONA_API_URL=https://app.daytona.io/api
DAYTONA_API_KEY=
DAYTONA_TARGET=
DAYTONA_SNAPSHOT=
DAYTONA_SANDBOX_IMAGE=node:20-bookworm
DAYTONA_SANDBOX_CPU=2
DAYTONA_SANDBOX_MEMORY=4
DAYTONA_SANDBOX_DISK=8
DAYTONA_SANDBOX_PUBLIC=false
DAYTONA_SANDBOX_AUTO_STOP_INTERVAL=0
DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL=10080
DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL=-1
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400
DAYTONA_WORKER_PROXY_BASE_URL=https://workers.den.openworklabs
DAYTONA_SANDBOX_NAME_PREFIX=den-daytona-worker
DAYTONA_VOLUME_NAME_PREFIX=den-daytona-worker
DAYTONA_WORKSPACE_MOUNT_PATH=/workspace
DAYTONA_DATA_MOUNT_PATH=/persist/openwork
DAYTONA_RUNTIME_WORKSPACE_PATH=/tmp/openwork-workspace
DAYTONA_RUNTIME_DATA_PATH=/tmp/openwork-data
DAYTONA_SIDECAR_DIR=/tmp/openwork-sidecars
DAYTONA_OPENWORK_PORT=8787
DAYTONA_OPENCODE_PORT=4096
DAYTONA_OPENWORK_VERSION=
DAYTONA_CREATE_TIMEOUT_SECONDS=300
DAYTONA_DELETE_TIMEOUT_SECONDS=120
DAYTONA_HEALTHCHECK_TIMEOUT_MS=300000
DAYTONA_POLL_INTERVAL_MS=5000

218
services/den-v2/README.md Normal file
View File

@@ -0,0 +1,218 @@
# Den v2 Service
Control plane for hosted workers. Provides Better Auth, worker CRUD, and provisioning hooks.
## Quick start
```bash
pnpm install
cp .env.example .env
pnpm dev
```
## Docker dev stack
For a one-command local stack with MySQL + the Den cloud web app, run this from the repo root:
```bash
./packaging/docker/den-dev-up.sh
```
That brings up:
- local MySQL for Den
- the Den control plane on a randomized host port
- the OpenWork Cloud web app on a randomized host port
The script prints the exact URLs and `docker compose ... down` command to use for cleanup.
## Environment
- `DATABASE_URL` MySQL connection URL
- `BETTER_AUTH_SECRET` 32+ char secret
- `BETTER_AUTH_URL` public base URL Better Auth uses for OAuth redirects and callbacks
- `DEN_BETTER_AUTH_TRUSTED_ORIGINS` optional comma-separated trusted origins for Better Auth origin validation (defaults to `CORS_ORIGINS`)
- `GITHUB_CLIENT_ID` optional OAuth app client ID for GitHub sign-in
- `GITHUB_CLIENT_SECRET` optional OAuth app client secret for GitHub sign-in
- `GOOGLE_CLIENT_ID` optional OAuth app client ID for Google sign-in
- `GOOGLE_CLIENT_SECRET` optional OAuth app client secret for Google sign-in
- `PORT` server port
<<<<<<< HEAD
- `CORS_ORIGINS` comma-separated list of trusted browser origins (used for Better Auth origin validation + Express CORS)
- `PROVISIONER_MODE` `stub`, `render`, or `daytona`
- `OPENWORK_DAYTONA_ENV_PATH` optional path to a shared `.env.daytona` file; when unset, Den searches upwards from the repo for `.env.daytona`
- `WORKER_URL_TEMPLATE` template string with `{workerId}`
- `RENDER_API_BASE` Render API base URL (default `https://api.render.com/v1`)
- `RENDER_API_KEY` Render API key (required for `PROVISIONER_MODE=render`)
- `RENDER_OWNER_ID` Render workspace owner id (required for `PROVISIONER_MODE=render`)
- `RENDER_WORKER_REPO` repository URL used to create worker services
- `RENDER_WORKER_BRANCH` branch used for worker services
- `RENDER_WORKER_ROOT_DIR` render `rootDir` for worker services
- `RENDER_WORKER_PLAN` Render plan for worker services
- `RENDER_WORKER_REGION` Render region for worker services
- `RENDER_WORKER_OPENWORK_VERSION` `openwork-orchestrator` npm version installed in workers; the worker build uses its `opencodeVersion` metadata to bundle a matching `opencode` binary into the Render deploy
- `RENDER_WORKER_NAME_PREFIX` service name prefix
- `RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX` optional domain suffix for worker custom URLs (e.g. `openwork.studio` -> `<worker-id>.openwork.studio`)
- `RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS` max time to wait for vanity URL health before falling back to Render URL
- `RENDER_PROVISION_TIMEOUT_MS` max time to wait for deploy to become live
- `RENDER_HEALTHCHECK_TIMEOUT_MS` max time to wait for worker health checks
- `RENDER_POLL_INTERVAL_MS` polling interval for deploy + health checks
- `VERCEL_API_BASE` Vercel API base URL (default `https://api.vercel.com`)
- `VERCEL_TOKEN` Vercel API token used to upsert worker DNS records
- `VERCEL_TEAM_ID` optional Vercel team id for scoped API calls
- `VERCEL_TEAM_SLUG` optional Vercel team slug for scoped API calls (used when `VERCEL_TEAM_ID` is unset)
- `VERCEL_DNS_DOMAIN` Vercel-managed DNS zone used for worker records (default `openwork.studio`)
- `POLAR_FEATURE_GATE_ENABLED` enable cloud-worker paywall (`true` or `false`)
- `POLAR_API_BASE` Polar API base URL (default `https://api.polar.sh`)
- `POLAR_ACCESS_TOKEN` Polar organization access token (required when paywall enabled)
- `POLAR_PRODUCT_ID` Polar product ID used for checkout sessions (required when paywall enabled)
- `POLAR_BENEFIT_ID` Polar benefit ID required to unlock cloud workers (required when paywall enabled)
- `POLAR_SUCCESS_URL` redirect URL after successful checkout (required when paywall enabled)
- `POLAR_RETURN_URL` return URL shown in checkout (required when paywall enabled)
- Daytona:
- `DAYTONA_API_KEY` API key used to create sandboxes and volumes
- `DAYTONA_API_URL` Daytona API base URL (default `https://app.daytona.io/api`)
- `DAYTONA_TARGET` optional Daytona region/target
- `DAYTONA_SNAPSHOT` optional snapshot name; if omitted Den creates workers from `DAYTONA_SANDBOX_IMAGE`
- `DAYTONA_SANDBOX_IMAGE` sandbox base image when no snapshot is provided (default `node:20-bookworm`)
- `DAYTONA_SANDBOX_CPU`, `DAYTONA_SANDBOX_MEMORY`, `DAYTONA_SANDBOX_DISK` resource sizing when image-backed sandboxes are used
- `DAYTONA_SANDBOX_AUTO_STOP_INTERVAL`, `DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL`, `DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL` lifecycle controls
- `DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS` TTL for the signed OpenWork preview URL returned to Den clients (Daytona currently caps this at 24 hours)
- `DAYTONA_SANDBOX_NAME_PREFIX`, `DAYTONA_VOLUME_NAME_PREFIX` resource naming prefixes
- `DAYTONA_WORKSPACE_MOUNT_PATH`, `DAYTONA_DATA_MOUNT_PATH` volume mount paths inside the sandbox
- `DAYTONA_RUNTIME_WORKSPACE_PATH`, `DAYTONA_RUNTIME_DATA_PATH`, `DAYTONA_SIDECAR_DIR` local sandbox paths used for the live OpenWork runtime; the mounted Daytona volumes are linked into the runtime workspace under `volumes/`
- `DAYTONA_OPENWORK_PORT`, `DAYTONA_OPENCODE_PORT` ports used when launching `openwork serve`
- `DAYTONA_OPENWORK_VERSION` optional npm version to install instead of latest `openwork-orchestrator`
- `DAYTONA_CREATE_TIMEOUT_SECONDS`, `DAYTONA_DELETE_TIMEOUT_SECONDS`, `DAYTONA_HEALTHCHECK_TIMEOUT_MS`, `DAYTONA_POLL_INTERVAL_MS` provisioning timeouts
For local Daytona development, place your Daytona API credentials in `/_repos/openwork/.env.daytona` and Den will pick them up automatically, including from task worktrees.
## Building a Daytona snapshot
If you want Daytona workers to start from a prebuilt runtime instead of a generic base image, create a snapshot and point Den at it.
The snapshot builder for this repo lives at:
- `scripts/create-daytona-openwork-snapshot.sh`
- `services/den-worker-runtime/Dockerfile.daytona-snapshot`
It builds a Linux image with:
- `openwork-orchestrator`
- `opencode`
Prerequisites:
- Docker running locally
- Daytona CLI installed and logged in
- a valid `.env.daytona` with at least `DAYTONA_API_KEY`
From the OpenWork repo root:
```bash
./scripts/create-daytona-openwork-snapshot.sh
```
To publish a custom-named snapshot:
```bash
./scripts/create-daytona-openwork-snapshot.sh openwork-runtime
```
Useful optional overrides:
- `DAYTONA_SNAPSHOT_NAME`
- `DAYTONA_SNAPSHOT_REGION`
- `DAYTONA_SNAPSHOT_CPU`
- `DAYTONA_SNAPSHOT_MEMORY`
- `DAYTONA_SNAPSHOT_DISK`
- `OPENWORK_ORCHESTRATOR_VERSION`
- `OPENCODE_VERSION`
After the snapshot is pushed, set it in `.env.daytona`:
```env
DAYTONA_SNAPSHOT=openwork-runtime
```
Then start Den in Daytona mode:
```bash
DEN_PROVISIONER_MODE=daytona packaging/docker/den-dev-up.sh
```
If you do not set `DAYTONA_SNAPSHOT`, Den falls back to `DAYTONA_SANDBOX_IMAGE` and installs runtime dependencies at sandbox startup.
## Auth setup (Better Auth)
Generate Better Auth schema (Drizzle):
```bash
npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes
```
Apply migrations:
```bash
pnpm db:generate
pnpm db:migrate
# or use the SQL migration runner used by Docker
pnpm db:migrate:sql
```
## API
- `GET /health`
- `GET /` demo web app (sign-up + auth + worker launch)
- `GET /v1/me`
- `GET /v1/workers` (list recent workers for signed-in user/org)
- `POST /v1/workers`
- Cloud launches return `202` quickly with worker `status=provisioning` and continue provisioning asynchronously.
- Returns `402 payment_required` with Polar checkout URL when paywall is enabled and entitlement is missing.
- Existing Polar customers are matched by `external_customer_id` first, then by email to preserve access for pre-existing paid users.
- `GET /v1/workers/:id`
- Includes latest instance metadata when available.
- `POST /v1/workers/:id/tokens`
- `DELETE /v1/workers/:id`
- Deletes worker records and attempts to tear down the backing cloud runtime when destination is `cloud`.
## CI deployment (dev == prod)
The workflow `.github/workflows/deploy-den.yml` updates Render env vars and deploys the service on every push to `dev` when this service changes.
Required GitHub Actions secrets:
- `RENDER_API_KEY`
- `RENDER_DEN_CONTROL_PLANE_SERVICE_ID`
- `RENDER_OWNER_ID`
- `DEN_DATABASE_URL`
- `DEN_BETTER_AUTH_SECRET`
Optional GitHub Actions secrets (enable GitHub social sign-in):
- `DEN_GITHUB_CLIENT_ID`
- `DEN_GITHUB_CLIENT_SECRET`
- `DEN_GOOGLE_CLIENT_ID`
- `DEN_GOOGLE_CLIENT_SECRET`
Optional GitHub Actions variable:
- `DEN_RENDER_WORKER_PLAN` (defaults to `standard`)
- `DEN_RENDER_WORKER_OPENWORK_VERSION` pins the `openwork-orchestrator` npm version installed in workers; the worker build bundles the matching `opencode` release asset into the Render image
- `DEN_CORS_ORIGINS` (defaults to `https://app.openwork.software,https://api.openwork.software,<render-service-url>`)
- `DEN_BETTER_AUTH_TRUSTED_ORIGINS` (defaults to `DEN_CORS_ORIGINS`)
- `DEN_RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX` (defaults to `openwork.studio`)
- `DEN_RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS` (defaults to `240000`)
- `DEN_BETTER_AUTH_URL` (defaults to `https://app.openwork.software`)
- `DEN_VERCEL_API_BASE` (defaults to `https://api.vercel.com`)
- `DEN_VERCEL_TEAM_ID` (optional)
- `DEN_VERCEL_TEAM_SLUG` (optional, defaults to `prologe`)
- `DEN_VERCEL_DNS_DOMAIN` (defaults to `openwork.studio`)
- `DEN_POLAR_FEATURE_GATE_ENABLED` (`true`/`false`, defaults to `false`)
- `DEN_POLAR_API_BASE` (defaults to `https://api.polar.sh`)
- `DEN_POLAR_SUCCESS_URL` (defaults to `https://app.openwork.software`)
- `DEN_POLAR_RETURN_URL` (defaults to `DEN_POLAR_SUCCESS_URL`)
Required additional secret when using vanity worker domains:
- `VERCEL_TOKEN`

View File

@@ -0,0 +1,36 @@
{
"name": "@openwork/den-v2",
"private": true,
"type": "module",
"scripts": {
"dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/index.ts",
"build": "npm run build:den-db && tsc -p tsconfig.json",
"build:den-db": "npm --prefix ../../packages/den-db run build",
"start": "node dist/index.js",
"db:migrate:sql": "node scripts/run-sql-migrations.mjs",
"test:smoke:daytona": "pnpm build && node scripts/daytona-provisioner-smoke.mjs",
"test:e2e:daytona": "node scripts/e2e-daytona-worker.mjs",
"test:e2e:worker-limit": "node scripts/e2e-worker-limit.mjs",
"db:generate": "drizzle-kit generate",
"db:migrate": "drizzle-kit migrate",
"auth:generate": "npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes"
},
"dependencies": {
"@daytonaio/sdk": "^0.150.0",
"better-auth": "^1.4.18",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.45.1",
"express": "^4.19.2",
"mysql2": "^3.11.3",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/node": "^20.11.30",
"drizzle-kit": "^0.31.9",
"tsx": "^4.15.7",
"typescript": "^5.5.4"
}
}

View File

@@ -0,0 +1,248 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Den Control Plane</title>
<style>
body {
font-family: Arial, sans-serif;
margin: 24px;
background: #f8fafc;
color: #0f172a;
}
h1 {
margin-top: 0;
}
.grid {
display: grid;
gap: 12px;
grid-template-columns: repeat(auto-fit, minmax(320px, 1fr));
}
.card {
background: #fff;
border: 1px solid #cbd5e1;
border-radius: 8px;
padding: 16px;
}
label {
display: block;
margin-top: 8px;
font-size: 13px;
font-weight: 600;
}
input,
select {
width: 100%;
box-sizing: border-box;
margin-top: 4px;
border: 1px solid #cbd5e1;
border-radius: 6px;
padding: 8px;
}
button {
margin-top: 10px;
border: 0;
border-radius: 6px;
padding: 8px 10px;
background: #1d4ed8;
color: #fff;
cursor: pointer;
}
pre {
white-space: pre-wrap;
word-break: break-word;
background: #0f172a;
color: #f8fafc;
border-radius: 6px;
padding: 10px;
min-height: 90px;
}
.muted {
color: #475569;
font-size: 13px;
}
</style>
</head>
<body>
<h1>Den Control Plane Demo</h1>
<p class="muted">Sign up, verify auth, and launch a cloud worker end-to-end.</p>
<div class="grid">
<section class="card">
<h2>1) Sign up</h2>
<label>
Name
<input id="signup-name" value="Den Demo" />
</label>
<label>
Email
<input id="signup-email" placeholder="demo@example.com" />
</label>
<label>
Password
<input id="signup-password" type="password" value="TestPass123!" />
</label>
<button id="signup-button" type="button">Create account</button>
</section>
<section class="card">
<h2>2) Verify auth/session</h2>
<button id="me-button" type="button">GET /v1/me (cookie session)</button>
<button id="me-bearer-button" type="button">GET /v1/me (Bearer token)</button>
<p class="muted">Bearer token comes from sign-up/sign-in response.</p>
</section>
<section class="card">
<h2>3) Launch worker</h2>
<label>
Name
<input id="worker-name" value="web-flow-worker" />
</label>
<label>
Destination
<select id="worker-destination">
<option value="cloud" selected>cloud</option>
<option value="local">local</option>
</select>
</label>
<label>
Workspace path (required for local)
<input id="worker-workspace-path" value="/tmp/workspace" />
</label>
<button id="worker-button" type="button">POST /v1/workers</button>
</section>
</div>
<section class="card" style="margin-top: 12px">
<h2>Output</h2>
<pre id="output">ready</pre>
</section>
<script>
const output = document.getElementById("output")
let bearerToken = ""
const print = (value) => {
output.textContent = typeof value === "string" ? value : JSON.stringify(value, null, 2)
}
const redact = (value) => {
const clone = typeof value === "string" ? value : JSON.parse(JSON.stringify(value))
if (typeof clone === "string") {
return clone
}
if (clone?.body?.token) {
clone.body.token = "REDACTED_SESSION_TOKEN"
}
if (clone?.body?.session?.token) {
clone.body.session.token = "REDACTED_SESSION_TOKEN"
}
if (clone?.body?.session?.id) {
clone.body.session.id = "REDACTED_SESSION_ID"
}
if (clone?.body?.session?.ipAddress) {
clone.body.session.ipAddress = "REDACTED_IP"
}
if (clone?.body?.tokens?.host) {
clone.body.tokens.host = "REDACTED_HOST_TOKEN"
}
if (clone?.body?.tokens?.client) {
clone.body.tokens.client = "REDACTED_CLIENT_TOKEN"
}
return clone
}
const request = async (path, options = {}) => {
const response = await fetch(path, {
credentials: "include",
headers: {
"Content-Type": "application/json",
...(options.headers || {}),
},
...options,
})
let body = null
const text = await response.text()
if (text) {
try {
body = JSON.parse(text)
} catch {
body = text
}
}
return { status: response.status, body }
}
document.getElementById("signup-button").addEventListener("click", async () => {
const emailInput = document.getElementById("signup-email")
if (!emailInput.value) {
emailInput.value = `den-web-${Date.now()}@example.com`
}
const result = await request("/api/auth/sign-up/email", {
method: "POST",
body: JSON.stringify({
name: document.getElementById("signup-name").value,
email: emailInput.value,
password: document.getElementById("signup-password").value,
}),
})
bearerToken = result.body?.token || ""
print(redact({ step: "signup", ...result, bearerTokenPresent: Boolean(bearerToken) }))
})
document.getElementById("me-button").addEventListener("click", async () => {
const result = await request("/v1/me", { method: "GET", headers: {} })
print(redact({ step: "me-cookie", ...result }))
})
document.getElementById("me-bearer-button").addEventListener("click", async () => {
if (!bearerToken) {
print({ error: "missing_bearer_token", hint: "Sign up first to capture token." })
return
}
const result = await request("/v1/me", {
method: "GET",
headers: {
Authorization: `Bearer ${bearerToken}`,
},
})
print(redact({ step: "me-bearer", ...result }))
})
document.getElementById("worker-button").addEventListener("click", async () => {
const destination = document.getElementById("worker-destination").value
const payload = {
name: document.getElementById("worker-name").value,
destination,
workspacePath: document.getElementById("worker-workspace-path").value || undefined,
imageVersion: "den-worker-v1",
}
if (destination !== "local") {
delete payload.workspacePath
}
const result = await request("/v1/workers", {
method: "POST",
body: JSON.stringify(payload),
})
print(redact({ step: "create-worker", payload, ...result }))
})
</script>
</body>
</html>

View File

@@ -0,0 +1,129 @@
import { randomUUID } from "node:crypto"
import { existsSync } from "node:fs"
import { dirname, join, resolve } from "node:path"
import { fileURLToPath } from "node:url"
import { setTimeout as delay } from "node:timers/promises"
import dotenv from "dotenv"
import { Daytona } from "@daytonaio/sdk"
const __dirname = dirname(fileURLToPath(import.meta.url))
const serviceDir = resolve(__dirname, "..")
const repoRoot = resolve(serviceDir, "..", "..")
function findUpwards(startDir, fileName, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona")
if (daytonaEnvPath) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
process.env.DATABASE_URL ||= "mysql://unused"
process.env.BETTER_AUTH_SECRET ||= "openwork-daytona-local-secret-000000000"
process.env.BETTER_AUTH_URL ||= "http://127.0.0.1"
process.env.CORS_ORIGINS ||= "http://127.0.0.1"
process.env.PROVISIONER_MODE ||= "daytona"
function log(message, detail) {
if (detail === undefined) {
console.log(message)
return
}
console.log(message, detail)
}
function fail(message, detail) {
if (detail !== undefined) {
console.error(message, detail)
} else {
console.error(message)
}
process.exit(1)
}
async function waitForCleanup(daytona, workerId, attempts = 24) {
for (let index = 0; index < attempts; index += 1) {
const sandboxes = await daytona.list(
{
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
},
1,
20,
)
if (sandboxes.items.length === 0) {
return
}
await delay(5000)
}
throw new Error(`cleanup_timeout:${workerId}`)
}
async function main() {
if (!process.env.DAYTONA_API_KEY) {
fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the smoke test.")
}
const { provisionWorker, deprovisionWorker } = await import("../dist/workers/provisioner.js")
const workerId = randomUUID()
const clientToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "")
const hostToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "")
const instance = await provisionWorker({
workerId,
name: "daytona-smoke",
hostToken,
clientToken,
})
log("Provisioned Daytona worker", instance)
const workspacesResponse = await fetch(`${instance.url.replace(/\/$/, "")}/workspaces`, {
headers: {
Accept: "application/json",
Authorization: `Bearer ${clientToken}`,
},
})
const workspacesPayload = await workspacesResponse.text()
if (!workspacesResponse.ok) {
fail("Worker /workspaces check failed", {
status: workspacesResponse.status,
body: workspacesPayload,
})
}
log("Worker /workspaces responded", workspacesPayload)
await deprovisionWorker({
workerId,
instanceUrl: instance.url,
})
const daytona = new Daytona({
apiKey: process.env.DAYTONA_API_KEY,
apiUrl: process.env.DAYTONA_API_URL,
...(process.env.DAYTONA_TARGET ? { target: process.env.DAYTONA_TARGET } : {}),
})
await waitForCleanup(daytona, workerId)
log("Daytona worker cleanup completed", workerId)
}
main().catch((error) => {
fail(error instanceof Error ? error.message : String(error))
})

View File

@@ -0,0 +1,489 @@
import { randomUUID } from "node:crypto"
import { once } from "node:events"
import { existsSync } from "node:fs"
import net from "node:net"
import { dirname, join, resolve } from "node:path"
import { fileURLToPath } from "node:url"
import { setTimeout as delay } from "node:timers/promises"
import { spawn } from "node:child_process"
import dotenv from "dotenv"
import mysql from "mysql2/promise"
import { Daytona } from "@daytonaio/sdk"
const __dirname = dirname(fileURLToPath(import.meta.url))
const serviceDir = resolve(__dirname, "..")
const repoRoot = resolve(serviceDir, "..", "..")
function log(message) {
process.stdout.write(`${message}\n`)
}
function fail(message, detail) {
if (detail !== undefined) {
console.error(message, detail)
} else {
console.error(message)
}
process.exit(1)
}
function findUpwards(startDir, fileName, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona")
if (daytonaEnvPath) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
function slug(value) {
return value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "")
}
function workerHint(workerId) {
return workerId.replace(/-/g, "").slice(0, 12)
}
function sandboxLabels(workerId) {
return {
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
}
}
function workspaceVolumeName(workerId) {
const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker"
return slug(`${prefix}-${workerHint(workerId)}-workspace`).slice(0, 63)
}
function dataVolumeName(workerId) {
const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker"
return slug(`${prefix}-${workerHint(workerId)}-data`).slice(0, 63)
}
async function getFreePort() {
return await new Promise((resolvePort, reject) => {
const server = net.createServer()
server.listen(0, "127.0.0.1", () => {
const address = server.address()
if (!address || typeof address === "string") {
reject(new Error("failed_to_resolve_free_port"))
return
}
server.close((error) => (error ? reject(error) : resolvePort(address.port)))
})
server.on("error", reject)
})
}
function spawnCommand(command, args, options = {}) {
return spawn(command, args, {
cwd: serviceDir,
env: process.env,
stdio: "pipe",
...options,
})
}
async function runCommand(command, args, options = {}) {
const child = spawnCommand(command, args, options)
let stdout = ""
let stderr = ""
child.stdout?.on("data", (chunk) => {
stdout += chunk.toString()
})
child.stderr?.on("data", (chunk) => {
stderr += chunk.toString()
})
const [code] = await once(child, "exit")
if (code !== 0) {
throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`)
}
return { stdout, stderr }
}
async function waitForMysqlConnection(databaseUrl, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
try {
const connection = await mysql.createConnection(databaseUrl)
await connection.query("SELECT 1")
await connection.end()
return
} catch {
await delay(1000)
}
}
throw new Error("mysql_not_ready")
}
async function waitForHttp(url, attempts = 60, intervalMs = 500) {
for (let index = 0; index < attempts; index += 1) {
try {
const response = await fetch(url)
if (response.ok) {
return response
}
} catch {
// ignore until retries are exhausted
}
await delay(intervalMs)
}
throw new Error(`http_not_ready:${url}`)
}
async function waitForWorkerReady(baseUrl, workerId, auth, attempts = 180) {
for (let index = 0; index < attempts; index += 1) {
const result = await requestJson(baseUrl, `/v1/workers/${workerId}`, auth)
if (result.response.ok && result.payload?.instance?.url && result.payload?.worker?.status === "healthy") {
return result.payload
}
await delay(5000)
}
throw new Error(`worker_not_ready:${workerId}`)
}
async function waitForDaytonaCleanup(daytona, workerId, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
const volumes = await daytona.volume.list()
const remainingVolumes = volumes.filter((volume) =>
[workspaceVolumeName(workerId), dataVolumeName(workerId)].includes(volume.name),
)
if (sandboxes.items.length === 0 && remainingVolumes.length === 0) {
return
}
await delay(5000)
}
throw new Error(`daytona_cleanup_incomplete:${workerId}`)
}
async function forceDeleteDaytonaResources(daytona, workerId) {
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
for (const sandbox of sandboxes.items) {
await sandbox.delete(120).catch(() => {})
}
const volumes = await daytona.volume.list()
for (const volumeName of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) {
const volume = volumes.find((entry) => entry.name === volumeName)
if (volume) {
await daytona.volume.delete(volume).catch(() => {})
}
}
}
function extractAuthToken(payload) {
if (!payload || typeof payload !== "object") {
return null
}
if (typeof payload.token === "string" && payload.token.trim()) {
return payload.token
}
if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") {
return payload.session.token
}
return null
}
async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) {
const headers = new Headers()
const origin = process.env.DEN_BROWSER_ORIGIN?.trim() || new URL(baseUrl).origin
headers.set("Accept", "application/json")
headers.set("Origin", origin)
headers.set("Referer", `${origin}/`)
if (body !== undefined) {
headers.set("Content-Type", "application/json")
}
if (token) {
headers.set("Authorization", `Bearer ${token}`)
}
if (cookie) {
headers.set("Cookie", cookie)
}
const response = await fetch(`${baseUrl}${path}`, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
})
const text = await response.text()
let payload = null
if (text) {
try {
payload = JSON.parse(text)
} catch {
payload = text
}
}
return {
response,
payload,
cookie: response.headers.get("set-cookie"),
}
}
async function main() {
if (!process.env.DAYTONA_API_KEY) {
fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the test.")
}
const existingBaseUrl = process.env.DEN_BASE_URL?.trim() || process.env.DEN_API_URL?.trim() || ""
const mysqlPort = existingBaseUrl ? null : await getFreePort()
const appPort = existingBaseUrl ? null : await getFreePort()
const containerName = existingBaseUrl
? null
: `openwork-den-daytona-${randomUUID().slice(0, 8)}`
const dbName = "openwork_den_daytona_e2e"
const dbPassword = "openwork-root"
const baseUrl = existingBaseUrl || `http://127.0.0.1:${appPort}`
const databaseUrl = mysqlPort
? `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}`
: null
const runtimeEnv = {
...process.env,
...(databaseUrl ? { DATABASE_URL: databaseUrl } : {}),
BETTER_AUTH_SECRET: "openwork-den-daytona-secret-0000000000",
BETTER_AUTH_URL: baseUrl,
...(appPort ? { PORT: String(appPort) } : {}),
CORS_ORIGINS: baseUrl,
PROVISIONER_MODE: "daytona",
POLAR_FEATURE_GATE_ENABLED: "false",
OPENWORK_DAYTONA_ENV_PATH: daytonaEnvPath || process.env.OPENWORK_DAYTONA_ENV_PATH || "",
}
const daytona = new Daytona({
apiKey: runtimeEnv.DAYTONA_API_KEY,
apiUrl: runtimeEnv.DAYTONA_API_URL,
...(runtimeEnv.DAYTONA_TARGET ? { target: runtimeEnv.DAYTONA_TARGET } : {}),
})
let serviceProcess = null
let workerId = null
const cleanup = async () => {
if (workerId) {
try {
await forceDeleteDaytonaResources(daytona, workerId)
} catch {
// cleanup best effort only
}
}
if (serviceProcess && !serviceProcess.killed) {
serviceProcess.kill("SIGINT")
await once(serviceProcess, "exit").catch(() => {})
}
if (containerName) {
await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {})
}
}
process.on("SIGINT", async () => {
await cleanup()
process.exit(130)
})
try {
if (containerName && mysqlPort && databaseUrl && appPort) {
log("Starting disposable MySQL container...")
await runCommand("docker", [
"run",
"-d",
"--rm",
"--name",
containerName,
"-e",
`MYSQL_ROOT_PASSWORD=${dbPassword}`,
"-e",
`MYSQL_DATABASE=${dbName}`,
"-p",
`${mysqlPort}:3306`,
"mysql:8.4",
])
log("Waiting for MySQL...")
await waitForMysqlConnection(databaseUrl)
log("Running Den migrations...")
await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env: runtimeEnv })
log("Starting Den service with Daytona provisioner...")
serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], {
cwd: serviceDir,
env: runtimeEnv,
stdio: "pipe",
})
let serviceOutput = ""
serviceProcess.stdout?.on("data", (chunk) => {
serviceOutput += chunk.toString()
})
serviceProcess.stderr?.on("data", (chunk) => {
serviceOutput += chunk.toString()
})
serviceProcess.on("exit", (code) => {
if (code !== 0) {
console.error(serviceOutput)
}
})
} else {
log(`Using existing Den API at ${baseUrl}`)
}
await waitForHttp(`${baseUrl}/health`)
const email = `den-daytona-${Date.now()}@example.com`
const password = "TestPass123!"
log("Creating account...")
const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", {
method: "POST",
body: {
name: "Den Daytona E2E",
email,
password,
},
})
if (!signup.response.ok) {
fail("Signup failed", signup.payload)
}
const token = extractAuthToken(signup.payload)
const cookie = signup.cookie
if (!token && !cookie) {
fail("Signup did not return a bearer token or session cookie", signup.payload)
}
const auth = { token, cookie }
log("Validating authenticated session...")
const me = await requestJson(baseUrl, "/v1/me", auth)
if (!me.response.ok) {
fail("Session lookup failed", me.payload)
}
log("Creating Daytona-backed cloud worker...")
const createWorker = await requestJson(baseUrl, "/v1/workers", {
method: "POST",
...auth,
body: {
name: "daytona-worker",
destination: "cloud",
},
})
if (createWorker.response.status !== 202) {
fail("Worker creation did not return async launch", {
status: createWorker.response.status,
payload: createWorker.payload,
})
}
workerId = createWorker.payload?.worker?.id || null
if (!workerId) {
fail("Worker response did not include an id", createWorker.payload)
}
log("Waiting for worker provisioning to finish...")
const workerPayload = await waitForWorkerReady(baseUrl, workerId, auth)
if (workerPayload.instance.provider !== "daytona") {
fail("Worker instance did not report the Daytona provider", workerPayload)
}
log("Checking worker health endpoint...")
await waitForHttp(`${workerPayload.instance.url.replace(/\/$/, "")}/health`, 120, 5000)
log("Checking OpenWork connect metadata...")
const tokensResponse = await requestJson(baseUrl, `/v1/workers/${workerId}/tokens`, {
method: "POST",
...auth,
})
if (!tokensResponse.response.ok || !tokensResponse.payload?.connect?.openworkUrl) {
fail("Worker tokens/connect payload missing", tokensResponse.payload)
}
const clientToken = tokensResponse.payload.tokens?.client
if (!clientToken) {
fail("Client token missing from worker token payload", tokensResponse.payload)
}
const connectHeaders = {
Accept: "application/json",
Authorization: `Bearer ${clientToken}`,
}
const statusResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/status`, {
headers: connectHeaders,
})
if (!statusResponse.ok) {
fail("Connected worker /status failed", await statusResponse.text())
}
const capabilitiesResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/capabilities`, {
headers: connectHeaders,
})
if (!capabilitiesResponse.ok) {
fail("Connected worker /capabilities failed", await capabilitiesResponse.text())
}
log("Verifying Daytona resources exist...")
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
if (sandboxes.items.length === 0) {
fail("Expected a Daytona sandbox for the worker but none were found")
}
const volumes = await daytona.volume.list()
const expectedVolumeNames = [workspaceVolumeName(workerId), dataVolumeName(workerId)]
const missingVolumes = expectedVolumeNames.filter(
(name) => !volumes.some((volume) => volume.name === name),
)
if (missingVolumes.length > 0) {
fail("Expected Daytona volumes were not created", missingVolumes)
}
log("Deleting worker and waiting for Daytona cleanup...")
const deleteResponse = await requestJson(baseUrl, `/v1/workers/${workerId}`, {
method: "DELETE",
...auth,
})
if (deleteResponse.response.status !== 204) {
fail("Worker deletion failed", {
status: deleteResponse.response.status,
payload: deleteResponse.payload,
})
}
await waitForDaytonaCleanup(daytona, workerId)
workerId = null
log("Daytona worker flow passed.")
} finally {
await cleanup()
}
}
main().catch((error) => {
fail(error instanceof Error ? error.message : String(error))
})

View File

@@ -0,0 +1,340 @@
import { randomUUID } from "node:crypto";
import { once } from "node:events";
import net from "node:net";
import { dirname, resolve } from "node:path";
import { fileURLToPath } from "node:url";
import { setTimeout as delay } from "node:timers/promises";
import { spawn } from "node:child_process";
import mysql from "mysql2/promise";
const __dirname = dirname(fileURLToPath(import.meta.url));
const serviceDir = resolve(__dirname, "..");
function log(message) {
process.stdout.write(`${message}\n`);
}
function fail(message, detail) {
if (detail !== undefined) {
console.error(message, detail);
} else {
console.error(message);
}
process.exit(1);
}
async function getFreePort() {
return await new Promise((resolvePort, reject) => {
const server = net.createServer();
server.listen(0, "127.0.0.1", () => {
const address = server.address();
if (!address || typeof address === "string") {
reject(new Error("failed_to_resolve_free_port"));
return;
}
const { port } = address;
server.close((error) => (error ? reject(error) : resolvePort(port)));
});
server.on("error", reject);
});
}
function spawnCommand(command, args, options = {}) {
return spawn(command, args, {
cwd: serviceDir,
env: process.env,
stdio: "pipe",
...options,
});
}
async function runCommand(command, args, options = {}) {
const child = spawnCommand(command, args, options);
let stdout = "";
let stderr = "";
child.stdout?.on("data", (chunk) => {
stdout += chunk.toString();
});
child.stderr?.on("data", (chunk) => {
stderr += chunk.toString();
});
const [code] = await once(child, "exit");
if (code !== 0) {
throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`);
}
return { stdout, stderr };
}
async function waitForMysqlConnection(databaseUrl, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
try {
const connection = await mysql.createConnection(databaseUrl);
await connection.query("SELECT 1");
await connection.end();
return;
} catch {
await delay(1000);
}
}
throw new Error("mysql_not_ready");
}
async function waitForHttp(url, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
try {
const response = await fetch(url);
if (response.ok) {
return;
}
} catch {
// ignore until retries are exhausted
}
await delay(500);
}
throw new Error(`http_not_ready:${url}`);
}
function extractAuthToken(payload) {
if (!payload || typeof payload !== "object") {
return null;
}
if (typeof payload.token === "string" && payload.token.trim()) {
return payload.token;
}
if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") {
return payload.session.token;
}
return null;
}
async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) {
const headers = new Headers();
const origin = new URL(baseUrl).origin;
headers.set("Accept", "application/json");
headers.set("Origin", origin);
headers.set("Referer", `${origin}/`);
if (body !== undefined) {
headers.set("Content-Type", "application/json");
}
if (token) {
headers.set("Authorization", `Bearer ${token}`);
}
if (cookie) {
headers.set("Cookie", cookie);
}
const response = await fetch(`${baseUrl}${path}`, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
const text = await response.text();
let payload = null;
if (text) {
try {
payload = JSON.parse(text);
} catch {
payload = text;
}
}
return {
response,
payload,
cookie: response.headers.get("set-cookie"),
};
}
async function main() {
const mysqlPort = await getFreePort();
const appPort = await getFreePort();
const containerName = `openwork-den-e2e-${randomUUID().slice(0, 8)}`;
const dbName = "openwork_den_e2e";
const dbPassword = "openwork-root";
const baseUrl = `http://127.0.0.1:${appPort}`;
const databaseUrl = `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}`;
const env = {
...process.env,
DATABASE_URL: databaseUrl,
BETTER_AUTH_SECRET: "openwork-den-e2e-secret-000000000000",
BETTER_AUTH_URL: baseUrl,
PORT: String(appPort),
OPENWORK_DEV_MODE: "1",
CORS_ORIGINS: baseUrl,
PROVISIONER_MODE: "stub",
WORKER_URL_TEMPLATE: "https://workers.example.com/{workerId}",
POLAR_FEATURE_GATE_ENABLED: "false",
};
let serviceProcess = null;
const cleanup = async () => {
if (serviceProcess && !serviceProcess.killed) {
serviceProcess.kill("SIGINT");
await once(serviceProcess, "exit").catch(() => {});
}
await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {});
};
process.on("SIGINT", async () => {
await cleanup();
process.exit(130);
});
try {
log("Starting disposable MySQL container...");
await runCommand("docker", [
"run",
"-d",
"--rm",
"--name",
containerName,
"-e",
`MYSQL_ROOT_PASSWORD=${dbPassword}`,
"-e",
`MYSQL_DATABASE=${dbName}`,
"-p",
`${mysqlPort}:3306`,
"mysql:8.4",
]);
log("Waiting for MySQL...");
await waitForMysqlConnection(databaseUrl);
log("Running Den migrations...");
await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env });
log("Starting Den service...");
serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], {
cwd: serviceDir,
env,
stdio: "pipe",
});
let serviceOutput = "";
serviceProcess.stdout?.on("data", (chunk) => {
serviceOutput += chunk.toString();
});
serviceProcess.stderr?.on("data", (chunk) => {
serviceOutput += chunk.toString();
});
serviceProcess.on("exit", (code) => {
if (code !== 0) {
console.error(serviceOutput);
}
});
await waitForHttp(`${baseUrl}/health`);
const email = `den-e2e-${Date.now()}@example.com`;
const password = "TestPass123!";
log("Creating account...");
const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", {
method: "POST",
body: {
name: "Den E2E",
email,
password,
},
});
if (!signup.response.ok) {
fail("Signup failed", signup.payload);
}
const token = extractAuthToken(signup.payload);
const cookie = signup.cookie;
if (!token && !cookie) {
fail("Signup did not return a bearer token or session cookie", signup.payload);
}
log("Validating authenticated session...");
const me = await requestJson(baseUrl, "/v1/me", { token, cookie });
if (!me.response.ok) {
fail("Session lookup failed", me.payload);
}
log("Checking billing summary is disabled...");
const billing = await requestJson(baseUrl, "/v1/workers/billing", { token, cookie });
if (!billing.response.ok) {
fail("Billing summary request failed", billing.payload);
}
if (
!billing.payload?.billing ||
billing.payload.billing.featureGateEnabled !== false ||
billing.payload.billing.checkoutRequired !== false ||
billing.payload.billing.checkoutUrl !== null
) {
fail("Billing summary should be disabled for the experiment", billing.payload);
}
log("Creating first cloud worker...");
const firstWorker = await requestJson(baseUrl, "/v1/workers", {
method: "POST",
token,
cookie,
body: {
name: "first-worker",
destination: "cloud",
},
});
if (firstWorker.response.status !== 202) {
fail("First worker did not launch successfully", {
status: firstWorker.response.status,
payload: firstWorker.payload,
});
}
log("Attempting second cloud worker...");
const secondWorker = await requestJson(baseUrl, "/v1/workers", {
method: "POST",
token,
cookie,
body: {
name: "second-worker",
destination: "cloud",
},
});
if (secondWorker.response.status !== 202) {
fail("Second worker should be allowed in dev mode", {
status: secondWorker.response.status,
payload: secondWorker.payload,
});
}
if (!secondWorker.payload?.worker?.id) {
fail("Second worker did not return a worker payload", secondWorker.payload);
}
log("Listing workers...");
const workers = await requestJson(baseUrl, "/v1/workers?limit=20", { token, cookie });
if (!workers.response.ok) {
fail("Worker list request failed", workers.payload);
}
const items = Array.isArray(workers.payload?.workers) ? workers.payload.workers : null;
if (!items || items.length !== 2) {
fail("Expected two cloud workers in dev mode", workers.payload);
}
log("E2E dev worker limit check passed.");
} finally {
await cleanup();
}
}
await main().catch((error) => {
fail(error instanceof Error ? error.message : String(error));
});

View File

@@ -0,0 +1,87 @@
import { readdir, readFile } from "node:fs/promises"
import path from "node:path"
import { fileURLToPath } from "node:url"
import mysql from "mysql2/promise"
const __dirname = path.dirname(fileURLToPath(import.meta.url))
const drizzleDir = path.resolve(__dirname, "..", "drizzle")
function splitStatements(sql) {
return sql
.split(/--> statement-breakpoint/g)
.map((part) => part.trim())
.filter(Boolean)
}
async function ensureMigrationsTable(connection) {
await connection.query(`
CREATE TABLE IF NOT EXISTS __den_migrations (
id varchar(255) NOT NULL PRIMARY KEY,
applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
)
`)
}
async function appliedMigrations(connection) {
const [rows] = await connection.query("SELECT id FROM __den_migrations")
return new Set(rows.map((row) => row.id))
}
function connectionConfigFromEnv() {
const databaseUrl = process.env.DATABASE_URL?.trim()
if (databaseUrl) {
return databaseUrl
}
const host = process.env.DATABASE_HOST?.trim()
const user = process.env.DATABASE_USERNAME?.trim()
const password = process.env.DATABASE_PASSWORD ?? ""
if (!host || !user) {
throw new Error("DATABASE_URL or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD is required")
}
return {
host,
user,
password,
ssl: {
rejectUnauthorized: true,
},
}
}
async function run() {
const connection = await mysql.createConnection(connectionConfigFromEnv())
try {
await ensureMigrationsTable(connection)
const completed = await appliedMigrations(connection)
const files = (await readdir(drizzleDir))
.filter((file) => file.endsWith(".sql"))
.sort((left, right) => left.localeCompare(right))
for (const file of files) {
if (completed.has(file)) {
continue
}
const sql = await readFile(path.join(drizzleDir, file), "utf8")
const statements = splitStatements(sql)
for (const statement of statements) {
await connection.query(statement)
}
await connection.query("INSERT INTO __den_migrations (id) VALUES (?)", [file])
process.stdout.write(`[den] Applied migration ${file}\n`)
}
} finally {
await connection.end()
}
}
run().catch((error) => {
console.error(error instanceof Error ? error.stack ?? error.message : String(error))
process.exit(1)
})

View File

@@ -0,0 +1,53 @@
import { sql } from "./db/drizzle.js"
import { db } from "./db/index.js"
import { AdminAllowlistTable } from "./db/schema.js"
import { createDenTypeId } from "./db/typeid.js"
const ADMIN_ALLOWLIST_SEEDS = [
{
email: "ben@openworklabs.com",
note: "Seeded internal admin",
},
{
email: "jan@openworklabs.com",
note: "Seeded internal admin",
},
{
email: "omar@openworklabs.com",
note: "Seeded internal admin",
},
{
email: "berk@openworklabs.com",
note: "Seeded internal admin",
},
] as const
let ensureAdminAllowlistSeededPromise: Promise<void> | null = null
async function seedAdminAllowlist() {
for (const entry of ADMIN_ALLOWLIST_SEEDS) {
await db
.insert(AdminAllowlistTable)
.values({
id: createDenTypeId("adminAllowlist"),
...entry,
})
.onDuplicateKeyUpdate({
set: {
note: entry.note,
updated_at: sql`CURRENT_TIMESTAMP(3)`,
},
})
}
}
export async function ensureAdminAllowlistSeeded() {
if (!ensureAdminAllowlistSeededPromise) {
ensureAdminAllowlistSeededPromise = seedAdminAllowlist().catch((error) => {
ensureAdminAllowlistSeededPromise = null
throw error
})
}
await ensureAdminAllowlistSeededPromise
}

View File

@@ -0,0 +1,68 @@
import { betterAuth } from "better-auth"
import { drizzleAdapter } from "better-auth/adapters/drizzle"
import { db } from "./db/index.js"
import * as schema from "./db/schema.js"
import { createDenTypeId, normalizeDenTypeId } from "./db/typeid.js"
import { env } from "./env.js"
import { ensureDefaultOrg } from "./orgs.js"
const socialProviders = {
...(env.github.clientId && env.github.clientSecret
? {
github: {
clientId: env.github.clientId,
clientSecret: env.github.clientSecret,
},
}
: {}),
...(env.google.clientId && env.google.clientSecret
? {
google: {
clientId: env.google.clientId,
clientSecret: env.google.clientSecret,
},
}
: {}),
}
export const auth = betterAuth({
baseURL: env.betterAuthUrl,
secret: env.betterAuthSecret,
trustedOrigins: env.betterAuthTrustedOrigins.length > 0 ? env.betterAuthTrustedOrigins : undefined,
socialProviders: Object.keys(socialProviders).length > 0 ? socialProviders : undefined,
database: drizzleAdapter(db, {
provider: "mysql",
schema,
}),
advanced: {
database: {
generateId: (options) => {
switch (options.model) {
case "user":
return createDenTypeId("user")
case "session":
return createDenTypeId("session")
case "account":
return createDenTypeId("account")
case "verification":
return createDenTypeId("verification")
default:
return false
}
},
},
},
emailAndPassword: {
enabled: true,
},
databaseHooks: {
user: {
create: {
after: async (user) => {
const name = user.name ?? user.email ?? "Personal"
await ensureDefaultOrg(normalizeDenTypeId("user", user.id), name)
},
},
},
},
})

View File

@@ -0,0 +1,822 @@
import { env } from "../env.js"
type PolarCustomerState = {
granted_benefits?: Array<{
benefit_id?: string
}>
}
type PolarCheckoutSession = {
url?: string
}
type PolarCustomerSession = {
customer_portal_url?: string
}
type PolarCustomer = {
id?: string
email?: string
external_id?: string | null
}
type PolarListResource<T> = {
items?: T[]
}
type PolarSubscription = {
id?: string
status?: string
amount?: number
currency?: string
recurring_interval?: string | null
recurring_interval_count?: number | null
current_period_start?: string | null
current_period_end?: string | null
cancel_at_period_end?: boolean
canceled_at?: string | null
ended_at?: string | null
}
type PolarOrder = {
id?: string
created_at?: string
status?: string
total_amount?: number
net_amount?: number
currency?: string
invoice_number?: string
is_invoice_generated?: boolean
}
type PolarOrderInvoice = {
url?: string
}
type PolarProductPrice = {
amount_type?: string
price_currency?: string
price_amount?: number
minimum_amount?: number
preset_amount?: number | null
is_archived?: boolean
seat_tiers?: {
tiers?: Array<{
price_per_seat?: number
}>
}
}
type PolarProduct = {
recurring_interval?: string | null
recurring_interval_count?: number | null
prices?: PolarProductPrice[]
}
export type CloudWorkerAccess =
| {
allowed: true
}
| {
allowed: false
checkoutUrl: string
}
export type CloudWorkerBillingPrice = {
amount: number | null
currency: string | null
recurringInterval: string | null
recurringIntervalCount: number | null
}
export type CloudWorkerBillingSubscription = {
id: string
status: string
amount: number | null
currency: string | null
recurringInterval: string | null
recurringIntervalCount: number | null
currentPeriodStart: string | null
currentPeriodEnd: string | null
cancelAtPeriodEnd: boolean
canceledAt: string | null
endedAt: string | null
}
export type CloudWorkerBillingInvoice = {
id: string
createdAt: string | null
status: string
totalAmount: number | null
currency: string | null
invoiceNumber: string | null
invoiceUrl: string | null
}
export type CloudWorkerBillingStatus = {
featureGateEnabled: boolean
hasActivePlan: boolean
checkoutRequired: boolean
checkoutUrl: string | null
portalUrl: string | null
price: CloudWorkerBillingPrice | null
subscription: CloudWorkerBillingSubscription | null
invoices: CloudWorkerBillingInvoice[]
}
export type CloudWorkerAdminBillingStatus = {
status: "paid" | "unpaid" | "unavailable"
featureGateEnabled: boolean
subscriptionId: string | null
subscriptionStatus: string | null
currentPeriodEnd: string | null
source: "benefit" | "subscription" | "unavailable"
note: string | null
}
type CloudAccessInput = {
userId: string
email: string
name: string
}
type BillingStatusOptions = {
includeCheckoutUrl?: boolean
includePortalUrl?: boolean
includeInvoices?: boolean
}
function sanitizeApiBase(value: string) {
return value.replace(/\/+$/, "")
}
function parseJson<T>(text: string): T | null {
if (!text) {
return null
}
return JSON.parse(text) as T
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
}
async function polarFetch(path: string, init: RequestInit = {}) {
const headers = new Headers(init.headers)
headers.set("Authorization", `Bearer ${env.polar.accessToken}`)
headers.set("Accept", "application/json")
if (init.body && !headers.has("Content-Type")) {
headers.set("Content-Type", "application/json")
}
return fetch(`${sanitizeApiBase(env.polar.apiBase)}${path}`, {
...init,
headers,
})
}
async function polarFetchJson<T>(path: string, init: RequestInit = {}) {
const response = await polarFetch(path, init)
const text = await response.text()
const payload = parseJson<T>(text)
return { response, text, payload }
}
function assertPaywallConfig() {
if (!env.polar.accessToken) {
throw new Error("POLAR_ACCESS_TOKEN is required when POLAR_FEATURE_GATE_ENABLED=true")
}
if (!env.polar.productId) {
throw new Error("POLAR_PRODUCT_ID is required when POLAR_FEATURE_GATE_ENABLED=true")
}
if (!env.polar.benefitId) {
throw new Error("POLAR_BENEFIT_ID is required when POLAR_FEATURE_GATE_ENABLED=true")
}
if (!env.polar.successUrl) {
throw new Error("POLAR_SUCCESS_URL is required when POLAR_FEATURE_GATE_ENABLED=true")
}
if (!env.polar.returnUrl) {
throw new Error("POLAR_RETURN_URL is required when POLAR_FEATURE_GATE_ENABLED=true")
}
}
async function getCustomerStateByExternalId(externalCustomerId: string): Promise<PolarCustomerState | null> {
const encodedExternalId = encodeURIComponent(externalCustomerId)
const { response, payload, text } = await polarFetchJson<PolarCustomerState>(`/v1/customers/external/${encodedExternalId}/state`, {
method: "GET",
})
if (response.status === 404) {
return null
}
if (!response.ok) {
throw new Error(`Polar customer state lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return payload
}
async function getCustomerStateById(customerId: string): Promise<PolarCustomerState | null> {
const encodedCustomerId = encodeURIComponent(customerId)
const { response, payload, text } = await polarFetchJson<PolarCustomerState>(`/v1/customers/${encodedCustomerId}/state`, {
method: "GET",
})
if (response.status === 404) {
return null
}
if (!response.ok) {
throw new Error(`Polar customer state lookup by ID failed (${response.status}): ${text.slice(0, 400)}`)
}
return payload
}
async function getCustomerByEmail(email: string): Promise<PolarCustomer | null> {
const normalizedEmail = email.trim().toLowerCase()
if (!normalizedEmail) {
return null
}
const encodedEmail = encodeURIComponent(normalizedEmail)
const { response, payload, text } = await polarFetchJson<PolarListResource<PolarCustomer>>(`/v1/customers/?email=${encodedEmail}`, {
method: "GET",
})
if (!response.ok) {
throw new Error(`Polar customer lookup by email failed (${response.status}): ${text.slice(0, 400)}`)
}
const customers = payload?.items ?? []
const exact = customers.find((customer) => customer.email?.trim().toLowerCase() === normalizedEmail)
return exact ?? customers[0] ?? null
}
async function linkCustomerExternalId(customer: PolarCustomer, externalCustomerId: string): Promise<void> {
if (!customer.id) {
return
}
if (typeof customer.external_id === "string" && customer.external_id.length > 0) {
return
}
const encodedCustomerId = encodeURIComponent(customer.id)
await polarFetch(`/v1/customers/${encodedCustomerId}`, {
method: "PATCH",
body: JSON.stringify({
external_id: externalCustomerId,
}),
})
}
function hasRequiredBenefit(state: PolarCustomerState | null) {
if (!state?.granted_benefits || !env.polar.benefitId) {
return false
}
return state.granted_benefits.some((grant) => grant.benefit_id === env.polar.benefitId)
}
async function createCheckoutSession(input: CloudAccessInput): Promise<string> {
const payload = {
products: [env.polar.productId],
success_url: env.polar.successUrl,
return_url: env.polar.returnUrl,
external_customer_id: input.userId,
customer_email: input.email,
customer_name: input.name,
}
const { response, payload: checkout, text } = await polarFetchJson<PolarCheckoutSession>("/v1/checkouts/", {
method: "POST",
body: JSON.stringify(payload),
})
if (!response.ok) {
throw new Error(`Polar checkout creation failed (${response.status}): ${text.slice(0, 400)}`)
}
if (!checkout?.url) {
throw new Error("Polar checkout response missing URL")
}
return checkout.url
}
type CloudWorkerAccessEvaluation = {
featureGateEnabled: boolean
hasActivePlan: boolean
checkoutUrl: string | null
}
async function evaluateCloudWorkerAccess(
input: CloudAccessInput,
options: { includeCheckoutUrl?: boolean } = {},
): Promise<CloudWorkerAccessEvaluation> {
if (!env.polar.featureGateEnabled) {
return {
featureGateEnabled: false,
hasActivePlan: true,
checkoutUrl: null,
}
}
assertPaywallConfig()
const externalState = await getCustomerStateByExternalId(input.userId)
if (hasRequiredBenefit(externalState)) {
return {
featureGateEnabled: true,
hasActivePlan: true,
checkoutUrl: null,
}
}
const customer = await getCustomerByEmail(input.email)
if (customer?.id) {
const emailState = await getCustomerStateById(customer.id)
if (hasRequiredBenefit(emailState)) {
await linkCustomerExternalId(customer, input.userId).catch(() => undefined)
return {
featureGateEnabled: true,
hasActivePlan: true,
checkoutUrl: null,
}
}
}
return {
featureGateEnabled: true,
hasActivePlan: false,
checkoutUrl: options.includeCheckoutUrl ? await createCheckoutSession(input) : null,
}
}
function normalizeRecurringInterval(value: string | null | undefined): string | null {
return typeof value === "string" && value.trim().length > 0 ? value : null
}
function normalizeRecurringIntervalCount(value: number | null | undefined): number | null {
return typeof value === "number" && Number.isFinite(value) ? value : null
}
function isActiveSubscriptionStatus(status: string | null | undefined) {
const normalized = typeof status === "string" ? status.trim().toLowerCase() : ""
return normalized === "active" || normalized === "trialing"
}
function toBillingSubscription(subscription: PolarSubscription | null): CloudWorkerBillingSubscription | null {
if (!subscription?.id) {
return null
}
return {
id: subscription.id,
status: typeof subscription.status === "string" ? subscription.status : "unknown",
amount: typeof subscription.amount === "number" ? subscription.amount : null,
currency: typeof subscription.currency === "string" ? subscription.currency : null,
recurringInterval: normalizeRecurringInterval(subscription.recurring_interval),
recurringIntervalCount: normalizeRecurringIntervalCount(subscription.recurring_interval_count),
currentPeriodStart: typeof subscription.current_period_start === "string" ? subscription.current_period_start : null,
currentPeriodEnd: typeof subscription.current_period_end === "string" ? subscription.current_period_end : null,
cancelAtPeriodEnd: subscription.cancel_at_period_end === true,
canceledAt: typeof subscription.canceled_at === "string" ? subscription.canceled_at : null,
endedAt: typeof subscription.ended_at === "string" ? subscription.ended_at : null,
}
}
function toBillingPriceFromSubscription(subscription: CloudWorkerBillingSubscription | null): CloudWorkerBillingPrice | null {
if (!subscription) {
return null
}
return {
amount: subscription.amount,
currency: subscription.currency,
recurringInterval: subscription.recurringInterval,
recurringIntervalCount: subscription.recurringIntervalCount,
}
}
async function getSubscriptionById(subscriptionId: string): Promise<PolarSubscription | null> {
const encodedId = encodeURIComponent(subscriptionId)
const { response, payload, text } = await polarFetchJson<PolarSubscription>(`/v1/subscriptions/${encodedId}`, {
method: "GET",
})
if (response.status === 404) {
return null
}
if (!response.ok) {
throw new Error(`Polar subscription lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return payload
}
async function listSubscriptionsByExternalCustomer(
externalCustomerId: string,
options: { activeOnly?: boolean; limit?: number } = {},
): Promise<PolarSubscription[]> {
const params = new URLSearchParams()
params.set("external_customer_id", externalCustomerId)
if (env.polar.productId) {
params.set("product_id", env.polar.productId)
}
params.set("limit", String(options.limit ?? 1))
params.set("sorting", "-started_at")
if (options.activeOnly === true) {
params.set("active", "true")
}
const lookup = await polarFetchJson<PolarListResource<PolarSubscription>>(`/v1/subscriptions/?${params.toString()}`, {
method: "GET",
})
let response = lookup.response
let payload = lookup.payload
let text = lookup.text
if (response.status === 422 && params.has("sorting")) {
params.delete("sorting")
const fallbackLookup = await polarFetchJson<PolarListResource<PolarSubscription>>(`/v1/subscriptions/?${params.toString()}`, {
method: "GET",
})
response = fallbackLookup.response
payload = fallbackLookup.payload
text = fallbackLookup.text
}
if (!response.ok) {
throw new Error(`Polar subscriptions lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return payload?.items ?? []
}
async function getPrimarySubscriptionForCustomer(externalCustomerId: string): Promise<PolarSubscription | null> {
const active = await listSubscriptionsByExternalCustomer(externalCustomerId, { activeOnly: true, limit: 1 })
if (active[0]) {
return active[0]
}
const recent = await listSubscriptionsByExternalCustomer(externalCustomerId, { activeOnly: false, limit: 1 })
return recent[0] ?? null
}
async function listRecentOrdersByExternalCustomer(externalCustomerId: string, limit = 6): Promise<PolarOrder[]> {
const params = new URLSearchParams()
params.set("external_customer_id", externalCustomerId)
if (env.polar.productId) {
params.set("product_id", env.polar.productId)
}
params.set("limit", String(limit))
params.set("sorting", "-created_at")
const { response, payload, text } = await polarFetchJson<PolarListResource<PolarOrder>>(`/v1/orders/?${params.toString()}`, {
method: "GET",
})
if (!response.ok) {
throw new Error(`Polar orders lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return payload?.items ?? []
}
async function getOrderInvoiceUrl(orderId: string): Promise<string | null> {
const encodedId = encodeURIComponent(orderId)
const { response, payload, text } = await polarFetchJson<PolarOrderInvoice>(`/v1/orders/${encodedId}/invoice`, {
method: "GET",
})
if (response.status === 404) {
return null
}
if (!response.ok) {
throw new Error(`Polar invoice lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return typeof payload?.url === "string" ? payload.url : null
}
function toBillingInvoice(order: PolarOrder, invoiceUrl: string | null): CloudWorkerBillingInvoice | null {
if (!order.id) {
return null
}
const totalAmount =
typeof order.total_amount === "number"
? order.total_amount
: typeof order.net_amount === "number"
? order.net_amount
: null
return {
id: order.id,
createdAt: typeof order.created_at === "string" ? order.created_at : null,
status: typeof order.status === "string" ? order.status : "unknown",
totalAmount,
currency: typeof order.currency === "string" ? order.currency : null,
invoiceNumber: typeof order.invoice_number === "string" ? order.invoice_number : null,
invoiceUrl,
}
}
async function listBillingInvoices(externalCustomerId: string, limit = 6): Promise<CloudWorkerBillingInvoice[]> {
const orders = await listRecentOrdersByExternalCustomer(externalCustomerId, limit)
const invoices = await Promise.all(
orders.map(async (order) => {
const invoiceUrl = order.id && order.is_invoice_generated === true ? await getOrderInvoiceUrl(order.id).catch(() => null) : null
return toBillingInvoice(order, invoiceUrl)
}),
)
return invoices.filter((invoice): invoice is CloudWorkerBillingInvoice => invoice !== null)
}
async function createCustomerPortalUrl(externalCustomerId: string): Promise<string | null> {
const body = {
external_customer_id: externalCustomerId,
return_url: env.polar.returnUrl ?? env.polar.successUrl ?? null,
}
const { response, payload, text } = await polarFetchJson<PolarCustomerSession>("/v1/customer-sessions/", {
method: "POST",
body: JSON.stringify(body),
})
if (response.status === 404 || response.status === 422) {
return null
}
if (!response.ok) {
throw new Error(`Polar customer portal session failed (${response.status}): ${text.slice(0, 400)}`)
}
return typeof payload?.customer_portal_url === "string" ? payload.customer_portal_url : null
}
function extractAmountFromProductPrice(price: PolarProductPrice): number | null {
if (price.amount_type === "fixed" && typeof price.price_amount === "number") {
return price.price_amount
}
if (price.amount_type === "seat_based") {
const firstTier = Array.isArray(price.seat_tiers?.tiers) ? price.seat_tiers?.tiers[0] : null
if (firstTier && typeof firstTier.price_per_seat === "number") {
return firstTier.price_per_seat
}
}
if (price.amount_type === "custom") {
if (typeof price.preset_amount === "number") {
return price.preset_amount
}
if (typeof price.minimum_amount === "number") {
return price.minimum_amount
}
}
if (price.amount_type === "free") {
return 0
}
return null
}
function extractBillingPriceFromProduct(product: PolarProduct | null): CloudWorkerBillingPrice | null {
if (!product || !Array.isArray(product.prices)) {
return null
}
for (const price of product.prices) {
if (!isRecord(price) || price.is_archived === true) {
continue
}
const amount = extractAmountFromProductPrice(price as PolarProductPrice)
if (amount === null) {
continue
}
const currency = typeof price.price_currency === "string" ? price.price_currency : null
return {
amount,
currency,
recurringInterval: normalizeRecurringInterval(product.recurring_interval),
recurringIntervalCount: normalizeRecurringIntervalCount(product.recurring_interval_count),
}
}
return null
}
async function getProductBillingPrice(productId: string): Promise<CloudWorkerBillingPrice | null> {
const encodedId = encodeURIComponent(productId)
const { response, payload, text } = await polarFetchJson<PolarProduct>(`/v1/products/${encodedId}`, {
method: "GET",
})
if (response.status === 404) {
return null
}
if (!response.ok) {
throw new Error(`Polar product lookup failed (${response.status}): ${text.slice(0, 400)}`)
}
return extractBillingPriceFromProduct(payload)
}
export async function requireCloudWorkerAccess(input: CloudAccessInput): Promise<CloudWorkerAccess> {
const evaluation = await evaluateCloudWorkerAccess(input, { includeCheckoutUrl: true })
if (evaluation.hasActivePlan) {
return { allowed: true }
}
if (!evaluation.checkoutUrl) {
throw new Error("Polar checkout URL unavailable")
}
return {
allowed: false,
checkoutUrl: evaluation.checkoutUrl,
}
}
export async function getCloudWorkerBillingStatus(
input: CloudAccessInput,
options: BillingStatusOptions = {},
): Promise<CloudWorkerBillingStatus> {
const includePortalUrl = options.includePortalUrl !== false
const includeInvoices = options.includeInvoices !== false
const evaluation = await evaluateCloudWorkerAccess(input, {
includeCheckoutUrl: options.includeCheckoutUrl,
})
if (!evaluation.featureGateEnabled) {
return {
featureGateEnabled: false,
hasActivePlan: true,
checkoutRequired: false,
checkoutUrl: null,
portalUrl: null,
price: null,
subscription: null,
invoices: [],
}
}
let subscription: CloudWorkerBillingSubscription | null = null
let productPrice: CloudWorkerBillingPrice | null = null
let portalUrl: string | null = null
let invoices: CloudWorkerBillingInvoice[] = []
const [subscriptionResult, priceResult, portalResult, invoicesResult] = await Promise.all([
getPrimarySubscriptionForCustomer(input.userId).catch(() => null),
env.polar.productId ? getProductBillingPrice(env.polar.productId).catch(() => null) : Promise.resolve<CloudWorkerBillingPrice | null>(null),
includePortalUrl ? createCustomerPortalUrl(input.userId).catch(() => null) : Promise.resolve<string | null>(null),
includeInvoices ? listBillingInvoices(input.userId).catch(() => []) : Promise.resolve<CloudWorkerBillingInvoice[]>([]),
])
subscription = toBillingSubscription(subscriptionResult)
productPrice = priceResult
portalUrl = portalResult
invoices = invoicesResult
return {
featureGateEnabled: evaluation.featureGateEnabled,
hasActivePlan: evaluation.hasActivePlan,
checkoutRequired: evaluation.featureGateEnabled && !evaluation.hasActivePlan,
checkoutUrl: evaluation.checkoutUrl,
portalUrl,
price: productPrice ?? toBillingPriceFromSubscription(subscription),
subscription,
invoices,
}
}
export async function getCloudWorkerAdminBillingStatus(
input: CloudAccessInput,
): Promise<CloudWorkerAdminBillingStatus> {
if (!env.polar.accessToken) {
return {
status: "unavailable",
featureGateEnabled: env.polar.featureGateEnabled,
subscriptionId: null,
subscriptionStatus: null,
currentPeriodEnd: null,
source: "unavailable",
note: "Polar access token is not configured.",
}
}
if (!env.polar.benefitId && !env.polar.productId) {
return {
status: "unavailable",
featureGateEnabled: env.polar.featureGateEnabled,
subscriptionId: null,
subscriptionStatus: null,
currentPeriodEnd: null,
source: "unavailable",
note: "Polar product or benefit configuration is missing.",
}
}
try {
let note: string | null = null
let paidByBenefit = false
if (env.polar.benefitId) {
const externalState = await getCustomerStateByExternalId(input.userId)
if (hasRequiredBenefit(externalState)) {
paidByBenefit = true
note = "Benefit granted via external customer id."
} else {
const customer = await getCustomerByEmail(input.email)
if (customer?.id) {
const emailState = await getCustomerStateById(customer.id)
if (hasRequiredBenefit(emailState)) {
paidByBenefit = true
note = "Benefit granted via matching customer email."
await linkCustomerExternalId(customer, input.userId).catch(() => undefined)
}
}
}
}
const subscription = env.polar.productId ? await getPrimarySubscriptionForCustomer(input.userId) : null
const normalizedSubscription = toBillingSubscription(subscription)
const paidBySubscription = isActiveSubscriptionStatus(normalizedSubscription?.status)
return {
status: paidByBenefit || paidBySubscription ? "paid" : "unpaid",
featureGateEnabled: env.polar.featureGateEnabled,
subscriptionId: normalizedSubscription?.id ?? null,
subscriptionStatus: normalizedSubscription?.status ?? null,
currentPeriodEnd: normalizedSubscription?.currentPeriodEnd ?? null,
source: paidByBenefit ? "benefit" : "subscription",
note:
note ??
(normalizedSubscription
? "Subscription status resolved from Polar."
: "No active billing record was found for this user."),
}
} catch (error) {
return {
status: "unavailable",
featureGateEnabled: env.polar.featureGateEnabled,
subscriptionId: null,
subscriptionStatus: null,
currentPeriodEnd: null,
source: "unavailable",
note: error instanceof Error ? error.message : "Billing lookup failed.",
}
}
}
export async function setCloudWorkerSubscriptionCancellation(
input: CloudAccessInput,
cancelAtPeriodEnd: boolean,
): Promise<CloudWorkerBillingSubscription | null> {
if (!env.polar.featureGateEnabled) {
return null
}
assertPaywallConfig()
const activeSubscriptions = await listSubscriptionsByExternalCustomer(input.userId, {
activeOnly: true,
limit: 1,
})
const active = activeSubscriptions[0]
if (!active?.id) {
return null
}
const encodedId = encodeURIComponent(active.id)
const { response, payload, text } = await polarFetchJson<PolarSubscription>(`/v1/subscriptions/${encodedId}`, {
method: "PATCH",
body: JSON.stringify({
cancel_at_period_end: cancelAtPeriodEnd,
}),
})
if (!response.ok) {
throw new Error(`Polar subscription update failed (${response.status}): ${text.slice(0, 400)}`)
}
if (payload?.id) {
return toBillingSubscription(payload)
}
const refreshed = await getSubscriptionById(active.id)
return toBillingSubscription(refreshed)
}

View File

@@ -0,0 +1 @@
export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "../../../../packages/den-db/dist/drizzle.js"

View File

@@ -0,0 +1,9 @@
import { createDenDb, isTransientDbConnectionError } from "../../../../packages/den-db/dist/index.js"
import { env } from "../env.js"
export const { db } = createDenDb({
databaseUrl: env.databaseUrl,
mode: env.dbMode,
planetscale: env.planetscale,
})
export { isTransientDbConnectionError }

View File

@@ -0,0 +1 @@
export * from "../../../../packages/den-db/dist/schema.js"

View File

@@ -0,0 +1 @@
export * from "../../../../packages/utils/dist/typeid.js"

253
services/den-v2/src/env.ts Normal file
View File

@@ -0,0 +1,253 @@
import { z } from "zod";
const schema = z.object({
DATABASE_URL: z.string().min(1).optional(),
DATABASE_HOST: z.string().min(1).optional(),
DATABASE_USERNAME: z.string().min(1).optional(),
DATABASE_PASSWORD: z.string().optional(),
DB_MODE: z.enum(["mysql", "planetscale"]).optional(),
BETTER_AUTH_SECRET: z.string().min(32),
BETTER_AUTH_URL: z.string().min(1),
DEN_BETTER_AUTH_TRUSTED_ORIGINS: z.string().optional(),
GITHUB_CLIENT_ID: z.string().optional(),
GITHUB_CLIENT_SECRET: z.string().optional(),
GOOGLE_CLIENT_ID: z.string().optional(),
GOOGLE_CLIENT_SECRET: z.string().optional(),
PORT: z.string().optional(),
WORKER_PROXY_PORT: z.string().optional(),
OPENWORK_DEV_MODE: z.string().optional(),
CORS_ORIGINS: z.string().optional(),
PROVISIONER_MODE: z.enum(["stub", "render", "daytona"]).optional(),
WORKER_URL_TEMPLATE: z.string().optional(),
OPENWORK_DAYTONA_ENV_PATH: z.string().optional(),
RENDER_API_BASE: z.string().optional(),
RENDER_API_KEY: z.string().optional(),
RENDER_OWNER_ID: z.string().optional(),
RENDER_WORKER_REPO: z.string().optional(),
RENDER_WORKER_BRANCH: z.string().optional(),
RENDER_WORKER_ROOT_DIR: z.string().optional(),
RENDER_WORKER_PLAN: z.string().optional(),
RENDER_WORKER_REGION: z.string().optional(),
RENDER_WORKER_OPENWORK_VERSION: z.string().optional(),
RENDER_WORKER_NAME_PREFIX: z.string().optional(),
RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX: z.string().optional(),
RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS: z.string().optional(),
RENDER_PROVISION_TIMEOUT_MS: z.string().optional(),
RENDER_HEALTHCHECK_TIMEOUT_MS: z.string().optional(),
RENDER_POLL_INTERVAL_MS: z.string().optional(),
VERCEL_API_BASE: z.string().optional(),
VERCEL_TOKEN: z.string().optional(),
VERCEL_TEAM_ID: z.string().optional(),
VERCEL_TEAM_SLUG: z.string().optional(),
VERCEL_DNS_DOMAIN: z.string().optional(),
POLAR_FEATURE_GATE_ENABLED: z.string().optional(),
POLAR_API_BASE: z.string().optional(),
POLAR_ACCESS_TOKEN: z.string().optional(),
POLAR_PRODUCT_ID: z.string().optional(),
POLAR_BENEFIT_ID: z.string().optional(),
POLAR_SUCCESS_URL: z.string().optional(),
POLAR_RETURN_URL: z.string().optional(),
DAYTONA_API_URL: z.string().optional(),
DAYTONA_API_KEY: z.string().optional(),
DAYTONA_TARGET: z.string().optional(),
DAYTONA_SNAPSHOT: z.string().optional(),
DAYTONA_SANDBOX_IMAGE: z.string().optional(),
DAYTONA_SANDBOX_CPU: z.string().optional(),
DAYTONA_SANDBOX_MEMORY: z.string().optional(),
DAYTONA_SANDBOX_DISK: z.string().optional(),
DAYTONA_SANDBOX_PUBLIC: z.string().optional(),
DAYTONA_SANDBOX_AUTO_STOP_INTERVAL: z.string().optional(),
DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL: z.string().optional(),
DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL: z.string().optional(),
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(),
DAYTONA_WORKER_PROXY_BASE_URL: z.string().optional(),
DAYTONA_SANDBOX_NAME_PREFIX: z.string().optional(),
DAYTONA_VOLUME_NAME_PREFIX: z.string().optional(),
DAYTONA_WORKSPACE_MOUNT_PATH: z.string().optional(),
DAYTONA_DATA_MOUNT_PATH: z.string().optional(),
DAYTONA_RUNTIME_WORKSPACE_PATH: z.string().optional(),
DAYTONA_RUNTIME_DATA_PATH: z.string().optional(),
DAYTONA_SIDECAR_DIR: z.string().optional(),
DAYTONA_OPENWORK_PORT: z.string().optional(),
DAYTONA_OPENCODE_PORT: z.string().optional(),
DAYTONA_OPENWORK_VERSION: z.string().optional(),
DAYTONA_CREATE_TIMEOUT_SECONDS: z.string().optional(),
DAYTONA_DELETE_TIMEOUT_SECONDS: z.string().optional(),
DAYTONA_HEALTHCHECK_TIMEOUT_MS: z.string().optional(),
DAYTONA_POLL_INTERVAL_MS: z.string().optional(),
}).superRefine((value, ctx) => {
const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale")
if (inferredMode === "mysql" && !value.DATABASE_URL) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "DATABASE_URL is required when using mysql mode",
path: ["DATABASE_URL"],
})
}
if (inferredMode === "planetscale") {
for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) {
if (!value[key]) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `${key} is required when using planetscale mode`,
path: [key],
})
}
}
}
});
const parsed = schema.parse(process.env);
function optionalString(value: string | undefined): string | undefined {
const trimmed = value?.trim();
return trimmed ? trimmed : undefined;
}
function normalizeOrigin(origin: string): string {
const value = origin.trim();
if (value === "*") {
return value;
}
return value.replace(/\/+$/, "");
}
const corsOrigins = parsed.CORS_ORIGINS?.split(",")
.map((origin) => normalizeOrigin(origin))
.filter(Boolean);
const betterAuthTrustedOrigins =
parsed.DEN_BETTER_AUTH_TRUSTED_ORIGINS?.split(",")
.map((origin) => normalizeOrigin(origin))
.filter(Boolean) ??
corsOrigins ??
[];
const polarFeatureGateEnabled =
(parsed.POLAR_FEATURE_GATE_ENABLED ?? "false").toLowerCase() === "true";
const daytonaSandboxPublic =
(parsed.DAYTONA_SANDBOX_PUBLIC ?? "false").toLowerCase() === "true";
const planetscaleCredentials =
parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined
? {
host: parsed.DATABASE_HOST,
username: parsed.DATABASE_USERNAME,
password: parsed.DATABASE_PASSWORD,
}
: null
export const env = {
databaseUrl: parsed.DATABASE_URL,
dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"),
planetscale: planetscaleCredentials,
betterAuthSecret: parsed.BETTER_AUTH_SECRET,
betterAuthUrl: parsed.BETTER_AUTH_URL,
betterAuthTrustedOrigins,
devMode: (parsed.OPENWORK_DEV_MODE ?? "0").trim() === "1",
github: {
clientId: parsed.GITHUB_CLIENT_ID?.trim() || undefined,
clientSecret: parsed.GITHUB_CLIENT_SECRET?.trim() || undefined,
},
google: {
clientId: parsed.GOOGLE_CLIENT_ID?.trim() || undefined,
clientSecret: parsed.GOOGLE_CLIENT_SECRET?.trim() || undefined,
},
port: Number(parsed.PORT ?? "8788"),
workerProxyPort: Number(parsed.WORKER_PROXY_PORT ?? "8789"),
corsOrigins: corsOrigins ?? [],
provisionerMode: parsed.PROVISIONER_MODE ?? "daytona",
workerUrlTemplate: parsed.WORKER_URL_TEMPLATE,
render: {
apiBase: parsed.RENDER_API_BASE ?? "https://api.render.com/v1",
apiKey: parsed.RENDER_API_KEY,
ownerId: parsed.RENDER_OWNER_ID,
workerRepo:
parsed.RENDER_WORKER_REPO ?? "https://github.com/different-ai/openwork",
workerBranch: parsed.RENDER_WORKER_BRANCH ?? "dev",
workerRootDir:
parsed.RENDER_WORKER_ROOT_DIR ?? "services/den-worker-runtime",
workerPlan: parsed.RENDER_WORKER_PLAN ?? "standard",
workerRegion: parsed.RENDER_WORKER_REGION ?? "oregon",
workerOpenworkVersion: parsed.RENDER_WORKER_OPENWORK_VERSION,
workerNamePrefix: parsed.RENDER_WORKER_NAME_PREFIX ?? "den-worker",
workerPublicDomainSuffix: parsed.RENDER_WORKER_PUBLIC_DOMAIN_SUFFIX,
customDomainReadyTimeoutMs: Number(
parsed.RENDER_CUSTOM_DOMAIN_READY_TIMEOUT_MS ?? "240000",
),
provisionTimeoutMs: Number(parsed.RENDER_PROVISION_TIMEOUT_MS ?? "900000"),
healthcheckTimeoutMs: Number(
parsed.RENDER_HEALTHCHECK_TIMEOUT_MS ?? "180000",
),
pollIntervalMs: Number(parsed.RENDER_POLL_INTERVAL_MS ?? "5000"),
},
vercel: {
apiBase: parsed.VERCEL_API_BASE ?? "https://api.vercel.com",
token: parsed.VERCEL_TOKEN,
teamId: parsed.VERCEL_TEAM_ID,
teamSlug: parsed.VERCEL_TEAM_SLUG,
dnsDomain: parsed.VERCEL_DNS_DOMAIN,
},
polar: {
featureGateEnabled: polarFeatureGateEnabled,
apiBase: parsed.POLAR_API_BASE ?? "https://api.polar.sh",
accessToken: parsed.POLAR_ACCESS_TOKEN,
productId: parsed.POLAR_PRODUCT_ID,
benefitId: parsed.POLAR_BENEFIT_ID,
successUrl: parsed.POLAR_SUCCESS_URL,
returnUrl: parsed.POLAR_RETURN_URL,
},
daytona: {
envPath: optionalString(parsed.OPENWORK_DAYTONA_ENV_PATH),
apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api",
apiKey: optionalString(parsed.DAYTONA_API_KEY),
target: optionalString(parsed.DAYTONA_TARGET),
snapshot: optionalString(parsed.DAYTONA_SNAPSHOT),
image: optionalString(parsed.DAYTONA_SANDBOX_IMAGE) ?? "node:20-bookworm",
resources: {
cpu: Number(parsed.DAYTONA_SANDBOX_CPU ?? "2"),
memory: Number(parsed.DAYTONA_SANDBOX_MEMORY ?? "4"),
disk: Number(parsed.DAYTONA_SANDBOX_DISK ?? "8"),
},
public: daytonaSandboxPublic,
autoStopInterval: Number(parsed.DAYTONA_SANDBOX_AUTO_STOP_INTERVAL ?? "0"),
autoArchiveInterval: Number(
parsed.DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL ?? "10080",
),
autoDeleteInterval: Number(
parsed.DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL ?? "-1",
),
signedPreviewExpiresSeconds: Number(
parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400",
),
workerProxyBaseUrl:
optionalString(parsed.DAYTONA_WORKER_PROXY_BASE_URL) ?? "https://workers.den.openworklabs",
sandboxNamePrefix:
optionalString(parsed.DAYTONA_SANDBOX_NAME_PREFIX) ?? "den-daytona-worker",
volumeNamePrefix:
optionalString(parsed.DAYTONA_VOLUME_NAME_PREFIX) ?? "den-daytona-worker",
workspaceMountPath:
optionalString(parsed.DAYTONA_WORKSPACE_MOUNT_PATH) ?? "/workspace",
dataMountPath:
optionalString(parsed.DAYTONA_DATA_MOUNT_PATH) ?? "/persist/openwork",
runtimeWorkspacePath:
optionalString(parsed.DAYTONA_RUNTIME_WORKSPACE_PATH) ??
"/tmp/openwork-workspace",
runtimeDataPath:
optionalString(parsed.DAYTONA_RUNTIME_DATA_PATH) ?? "/tmp/openwork-data",
sidecarDir:
optionalString(parsed.DAYTONA_SIDECAR_DIR) ?? "/tmp/openwork-sidecars",
openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"),
opencodePort: Number(parsed.DAYTONA_OPENCODE_PORT ?? "4096"),
openworkVersion: optionalString(parsed.DAYTONA_OPENWORK_VERSION),
createTimeoutSeconds: Number(parsed.DAYTONA_CREATE_TIMEOUT_SECONDS ?? "300"),
deleteTimeoutSeconds: Number(parsed.DAYTONA_DELETE_TIMEOUT_SECONDS ?? "120"),
healthcheckTimeoutMs: Number(
parsed.DAYTONA_HEALTHCHECK_TIMEOUT_MS ?? "300000",
),
pollIntervalMs: Number(parsed.DAYTONA_POLL_INTERVAL_MS ?? "5000"),
},
};

View File

@@ -0,0 +1,335 @@
import express from "express"
import { fromNodeHeaders } from "better-auth/node"
import { asc, desc, eq, isNotNull, sql } from "../db/drizzle.js"
import { ensureAdminAllowlistSeeded } from "../admin-allowlist.js"
import { auth } from "../auth.js"
import { getCloudWorkerAdminBillingStatus } from "../billing/polar.js"
import { db } from "../db/index.js"
import { AdminAllowlistTable, AuthAccountTable, AuthSessionTable, AuthUserTable, WorkerTable } from "../db/schema.js"
import { normalizeDenTypeId } from "../db/typeid.js"
import { asyncRoute } from "./errors.js"
type UserId = typeof AuthUserTable.$inferSelect.id
function normalizeEmail(value: string | null | undefined) {
return value?.trim().toLowerCase() ?? ""
}
function toNumber(value: unknown) {
if (typeof value === "number" && Number.isFinite(value)) {
return value
}
const parsed = Number(value)
return Number.isFinite(parsed) ? parsed : 0
}
function isWithinDays(value: Date | string | null, days: number) {
if (!value) {
return false
}
const date = value instanceof Date ? value : new Date(value)
if (Number.isNaN(date.getTime())) {
return false
}
const windowMs = days * 24 * 60 * 60 * 1000
return Date.now() - date.getTime() <= windowMs
}
function normalizeProvider(providerId: string) {
const normalized = providerId.trim().toLowerCase()
if (!normalized) {
return "unknown"
}
if (normalized === "credential" || normalized === "email-password") {
return "email"
}
return normalized
}
function parseBooleanQuery(value: unknown): boolean {
if (Array.isArray(value)) {
return value.some((entry) => parseBooleanQuery(entry))
}
if (typeof value !== "string") {
return false
}
const normalized = value.trim().toLowerCase()
return normalized === "1" || normalized === "true" || normalized === "yes"
}
async function mapWithConcurrency<T, R>(items: T[], limit: number, mapper: (item: T) => Promise<R>) {
if (items.length === 0) {
return [] as R[]
}
const results = new Array<R>(items.length)
let nextIndex = 0
async function runWorker() {
while (nextIndex < items.length) {
const currentIndex = nextIndex
nextIndex += 1
results[currentIndex] = await mapper(items[currentIndex])
}
}
const workerCount = Math.max(1, Math.min(limit, items.length))
await Promise.all(Array.from({ length: workerCount }, () => runWorker()))
return results
}
async function requireAdminSession(req: express.Request, res: express.Response) {
const session = await auth.api.getSession({
headers: fromNodeHeaders(req.headers),
})
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return null
}
const userId = normalizeDenTypeId("user", session.user.id)
const email = normalizeEmail(session.user.email)
if (!email) {
res.status(403).json({ error: "admin_email_required" })
return null
}
await ensureAdminAllowlistSeeded()
const allowed = await db
.select({ id: AdminAllowlistTable.id })
.from(AdminAllowlistTable)
.where(eq(AdminAllowlistTable.email, email))
.limit(1)
if (allowed.length === 0) {
res.status(403).json({ error: "forbidden" })
return null
}
return {
...session,
user: {
...session.user,
id: userId,
},
}
}
export const adminRouter = express.Router()
adminRouter.get("/overview", asyncRoute(async (req, res) => {
const session = await requireAdminSession(req, res)
if (!session) return
const includeBilling = parseBooleanQuery(req.query.includeBilling)
const [admins, users, workerStatsRows, sessionStatsRows, accountRows] = await Promise.all([
db
.select({
email: AdminAllowlistTable.email,
note: AdminAllowlistTable.note,
createdAt: AdminAllowlistTable.created_at,
})
.from(AdminAllowlistTable)
.orderBy(asc(AdminAllowlistTable.email)),
db.select().from(AuthUserTable).orderBy(desc(AuthUserTable.createdAt)),
db
.select({
userId: WorkerTable.created_by_user_id,
workerCount: sql<number>`count(*)`,
cloudWorkerCount: sql<number>`sum(case when ${WorkerTable.destination} = 'cloud' then 1 else 0 end)`,
localWorkerCount: sql<number>`sum(case when ${WorkerTable.destination} = 'local' then 1 else 0 end)`,
latestWorkerCreatedAt: sql<Date | null>`max(${WorkerTable.created_at})`,
})
.from(WorkerTable)
.where(isNotNull(WorkerTable.created_by_user_id))
.groupBy(WorkerTable.created_by_user_id),
db
.select({
userId: AuthSessionTable.userId,
sessionCount: sql<number>`count(*)`,
lastSeenAt: sql<Date | null>`max(${AuthSessionTable.updatedAt})`,
})
.from(AuthSessionTable)
.groupBy(AuthSessionTable.userId),
db
.select({
userId: AuthAccountTable.userId,
providerId: AuthAccountTable.providerId,
})
.from(AuthAccountTable),
])
const workerStatsByUser = new Map<UserId, {
workerCount: number
cloudWorkerCount: number
localWorkerCount: number
latestWorkerCreatedAt: Date | string | null
}>()
for (const row of workerStatsRows) {
if (!row.userId) {
continue
}
workerStatsByUser.set(row.userId, {
workerCount: toNumber(row.workerCount),
cloudWorkerCount: toNumber(row.cloudWorkerCount),
localWorkerCount: toNumber(row.localWorkerCount),
latestWorkerCreatedAt: row.latestWorkerCreatedAt,
})
}
const sessionStatsByUser = new Map<UserId, {
sessionCount: number
lastSeenAt: Date | string | null
}>()
for (const row of sessionStatsRows) {
sessionStatsByUser.set(row.userId, {
sessionCount: toNumber(row.sessionCount),
lastSeenAt: row.lastSeenAt,
})
}
const providersByUser = new Map<UserId, Set<string>>()
for (const row of accountRows) {
const providerId = normalizeProvider(row.providerId)
const existing = providersByUser.get(row.userId) ?? new Set<string>()
existing.add(providerId)
providersByUser.set(row.userId, existing)
}
const defaultBilling = {
status: "unavailable" as const,
featureGateEnabled: false,
subscriptionId: null,
subscriptionStatus: null,
currentPeriodEnd: null,
source: "unavailable" as const,
note: "Billing lookup unavailable.",
}
const billingRows = includeBilling
? await mapWithConcurrency(users, 4, async (user) => ({
userId: user.id,
billing: await getCloudWorkerAdminBillingStatus({
userId: user.id,
email: user.email,
name: user.name ?? user.email,
}),
}))
: []
const billingByUser = new Map(billingRows.map((row) => [row.userId, row.billing]))
const userRows = users.map((user) => {
const workerStats = workerStatsByUser.get(user.id) ?? {
workerCount: 0,
cloudWorkerCount: 0,
localWorkerCount: 0,
latestWorkerCreatedAt: null,
}
const sessionStats = sessionStatsByUser.get(user.id) ?? {
sessionCount: 0,
lastSeenAt: null,
}
const authProviders = Array.from(providersByUser.get(user.id) ?? []).sort()
return {
id: user.id,
name: user.name,
email: user.email,
emailVerified: user.emailVerified,
createdAt: user.createdAt,
updatedAt: user.updatedAt,
lastSeenAt: sessionStats.lastSeenAt,
sessionCount: sessionStats.sessionCount,
authProviders,
workerCount: workerStats.workerCount,
cloudWorkerCount: workerStats.cloudWorkerCount,
localWorkerCount: workerStats.localWorkerCount,
latestWorkerCreatedAt: workerStats.latestWorkerCreatedAt,
billing: includeBilling ? billingByUser.get(user.id) ?? defaultBilling : null,
}
})
const summary = userRows.reduce(
(accumulator, user) => {
accumulator.totalUsers += 1
accumulator.totalWorkers += user.workerCount
accumulator.cloudWorkers += user.cloudWorkerCount
accumulator.localWorkers += user.localWorkerCount
if (user.emailVerified) {
accumulator.verifiedUsers += 1
}
if (user.workerCount > 0) {
accumulator.usersWithWorkers += 1
}
if (includeBilling && user.billing) {
if (user.billing.status === "paid") {
accumulator.paidUsers += 1
} else if (user.billing.status === "unpaid") {
accumulator.unpaidUsers += 1
} else {
accumulator.billingUnavailableUsers += 1
}
}
if (isWithinDays(user.createdAt, 7)) {
accumulator.recentUsers7d += 1
}
if (isWithinDays(user.createdAt, 30)) {
accumulator.recentUsers30d += 1
}
return accumulator
},
{
totalUsers: 0,
verifiedUsers: 0,
recentUsers7d: 0,
recentUsers30d: 0,
totalWorkers: 0,
cloudWorkers: 0,
localWorkers: 0,
usersWithWorkers: 0,
paidUsers: 0,
unpaidUsers: 0,
billingUnavailableUsers: 0,
},
)
res.json({
viewer: {
id: session.user.id,
email: session.user.email,
name: session.user.name,
},
admins,
summary: {
...summary,
adminCount: admins.length,
billingLoaded: includeBilling,
paidUsers: includeBilling ? summary.paidUsers : null,
unpaidUsers: includeBilling ? summary.unpaidUsers : null,
billingUnavailableUsers: includeBilling ? summary.billingUnavailableUsers : null,
usersWithoutWorkers: summary.totalUsers - summary.usersWithWorkers,
},
users: userRows,
generatedAt: new Date().toISOString(),
})
}))

View File

@@ -0,0 +1,61 @@
import type { ErrorRequestHandler, NextFunction, Request, RequestHandler, Response } from "express"
const TRANSIENT_DB_ERROR_CODES = new Set([
"ECONNRESET",
"EPIPE",
"ETIMEDOUT",
"PROTOCOL_CONNECTION_LOST",
"PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR",
])
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
}
function getErrorCode(error: unknown): string | null {
if (!isRecord(error)) {
return null
}
if (typeof error.code === "string") {
return error.code
}
return getErrorCode(error.cause)
}
export function isTransientDbConnectionError(error: unknown): boolean {
const code = getErrorCode(error)
if (!code) {
return false
}
return TRANSIENT_DB_ERROR_CODES.has(code)
}
export function asyncRoute(
handler: (req: Request, res: Response, next: NextFunction) => Promise<unknown>,
): RequestHandler {
return (req, res, next) => {
void handler(req, res, next).catch(next)
}
}
export const errorMiddleware: ErrorRequestHandler = (error, _req, res, _next) => {
if (res.headersSent) {
return
}
if (isTransientDbConnectionError(error)) {
const message = error instanceof Error ? error.message : "transient database connection failure"
console.warn(`[http] transient db connection error: ${message}`)
res.status(503).json({
error: "service_unavailable",
message: "Database connection was interrupted. Please retry.",
})
return
}
const message = error instanceof Error ? error.stack ?? error.message : String(error)
console.error(`[http] unhandled error: ${message}`)
res.status(500).json({ error: "internal_error" })
}

View File

@@ -0,0 +1,88 @@
import type express from "express"
import { fromNodeHeaders } from "better-auth/node"
import { and, eq, gt } from "../db/drizzle.js"
import { auth } from "../auth.js"
import { db } from "../db/index.js"
import { AuthSessionTable, AuthUserTable } from "../db/schema.js"
import { normalizeDenTypeId } from "../db/typeid.js"
type AuthSessionLike = Awaited<ReturnType<typeof auth.api.getSession>>
function readBearerToken(req: express.Request): string | null {
const header = typeof req.headers.authorization === "string" ? req.headers.authorization.trim() : ""
if (!header) {
return null
}
const match = header.match(/^Bearer\s+(.+)$/i)
if (!match) {
return null
}
const token = match[1]?.trim() ?? ""
return token || null
}
async function getSessionFromBearerToken(token: string): Promise<AuthSessionLike> {
const rows = await db
.select({
session: {
id: AuthSessionTable.id,
token: AuthSessionTable.token,
userId: AuthSessionTable.userId,
expiresAt: AuthSessionTable.expiresAt,
createdAt: AuthSessionTable.createdAt,
updatedAt: AuthSessionTable.updatedAt,
ipAddress: AuthSessionTable.ipAddress,
userAgent: AuthSessionTable.userAgent,
},
user: {
id: AuthUserTable.id,
name: AuthUserTable.name,
email: AuthUserTable.email,
emailVerified: AuthUserTable.emailVerified,
image: AuthUserTable.image,
createdAt: AuthUserTable.createdAt,
updatedAt: AuthUserTable.updatedAt,
},
})
.from(AuthSessionTable)
.innerJoin(AuthUserTable, eq(AuthSessionTable.userId, AuthUserTable.id))
.where(and(eq(AuthSessionTable.token, token), gt(AuthSessionTable.expiresAt, new Date())))
.limit(1)
const row = rows[0]
if (!row) {
return null
}
return {
session: row.session,
user: {
...row.user,
id: normalizeDenTypeId("user", row.user.id),
},
}
}
export async function getRequestSession(req: express.Request): Promise<AuthSessionLike> {
const cookieSession = await auth.api.getSession({
headers: fromNodeHeaders(req.headers),
})
if (cookieSession?.user?.id) {
return {
...cookieSession,
user: {
...cookieSession.user,
id: normalizeDenTypeId("user", cookieSession.user.id),
},
}
}
const bearerToken = readBearerToken(req)
if (!bearerToken) {
return null
}
return getSessionFromBearerToken(bearerToken)
}

View File

@@ -0,0 +1,834 @@
import { randomBytes } from "crypto"
import express from "express"
import { fromNodeHeaders } from "better-auth/node"
import { and, asc, desc, eq, isNull } from "../db/drizzle.js"
import { z } from "zod"
import { auth } from "../auth.js"
// Polar billing is temporarily disabled for the one-worker experiment in hosted mode.
// Keep the old billing integration nearby so it can be restored quickly.
// import { getCloudWorkerBillingStatus, setCloudWorkerSubscriptionCancellation } from "../billing/polar.js"
import { db } from "../db/index.js"
import { AuditEventTable, AuthUserTable, DaytonaSandboxTable, OrgMembershipTable, WorkerBundleTable, WorkerInstanceTable, WorkerTable, WorkerTokenTable } from "../db/schema.js"
import { env } from "../env.js"
import { asyncRoute, isTransientDbConnectionError } from "./errors.js"
import { ensureDefaultOrg } from "../orgs.js"
import { deprovisionWorker, provisionWorker } from "../workers/provisioner.js"
import { customDomainForWorker } from "../workers/vanity-domain.js"
import { createDenTypeId, normalizeDenTypeId } from "../db/typeid.js"
const createSchema = z.object({
name: z.string().min(1),
description: z.string().optional(),
destination: z.enum(["local", "cloud"]),
workspacePath: z.string().optional(),
sandboxBackend: z.string().optional(),
imageVersion: z.string().optional(),
})
const listSchema = z.object({
limit: z.coerce.number().int().min(1).max(50).default(20),
})
const billingSubscriptionSchema = z.object({
cancelAtPeriodEnd: z.boolean().default(true),
})
const token = () => randomBytes(32).toString("hex")
type WorkerRow = typeof WorkerTable.$inferSelect
type WorkerInstanceRow = typeof WorkerInstanceTable.$inferSelect
type WorkerId = WorkerRow["id"]
type OrgId = typeof OrgMembershipTable.$inferSelect.org_id
type UserId = typeof AuthUserTable.$inferSelect.id
function parseWorkerIdParam(value: string): WorkerId {
return normalizeDenTypeId("worker", value)
}
function parseUserId(value: string): UserId {
return normalizeDenTypeId("user", value)
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
}
function normalizeUrl(value: string): string {
return value.trim().replace(/\/+$/, "")
}
function parseWorkspaceSelection(payload: unknown): { workspaceId: string; openworkUrl: string } | null {
if (!isRecord(payload) || !Array.isArray(payload.items)) {
return null
}
const activeId = typeof payload.activeId === "string" ? payload.activeId : null
let workspaceId = activeId
if (!workspaceId) {
for (const item of payload.items) {
if (isRecord(item) && typeof item.id === "string" && item.id.trim()) {
workspaceId = item.id
break
}
}
}
const baseUrl = typeof payload.baseUrl === "string" ? normalizeUrl(payload.baseUrl) : ""
if (!workspaceId || !baseUrl) {
return null
}
return {
workspaceId,
openworkUrl: `${baseUrl}/w/${encodeURIComponent(workspaceId)}`,
}
}
async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: string) {
const baseUrl = normalizeUrl(instanceUrl)
if (!baseUrl || !clientToken.trim()) {
return null
}
try {
const response = await fetch(`${baseUrl}/workspaces`, {
method: "GET",
headers: {
Accept: "application/json",
Authorization: `Bearer ${clientToken.trim()}`,
},
})
if (!response.ok) {
return null
}
const payload = (await response.json()) as unknown
const selected = parseWorkspaceSelection({
...(isRecord(payload) ? payload : {}),
baseUrl,
})
return selected
} catch {
return null
}
}
function getConnectUrlCandidates(workerId: WorkerId, instanceUrl: string | null) {
const candidates: string[] = []
const vanityHostname = customDomainForWorker(workerId, env.render.workerPublicDomainSuffix)
if (vanityHostname) {
candidates.push(`https://${vanityHostname}`)
}
if (instanceUrl) {
const normalized = normalizeUrl(instanceUrl)
if (normalized && !candidates.includes(normalized)) {
candidates.push(normalized)
}
}
return candidates
}
function queryIncludesFlag(value: unknown): boolean {
if (typeof value === "string") {
const normalized = value.trim().toLowerCase()
return normalized === "1" || normalized === "true" || normalized === "yes"
}
if (Array.isArray(value)) {
return value.some((entry) => queryIncludesFlag(entry))
}
return false
}
async function resolveConnectUrlFromCandidates(workerId: WorkerId, instanceUrl: string | null, clientToken: string) {
const candidates = getConnectUrlCandidates(workerId, instanceUrl)
for (const candidate of candidates) {
const resolved = await resolveConnectUrlFromWorker(candidate, clientToken)
if (resolved) {
return resolved
}
}
return null
}
async function getWorkerRuntimeAccess(workerId: WorkerId) {
const instance = await getLatestWorkerInstance(workerId)
const tokenRows = await db
.select()
.from(WorkerTokenTable)
.where(and(eq(WorkerTokenTable.worker_id, workerId), isNull(WorkerTokenTable.revoked_at)))
.orderBy(asc(WorkerTokenTable.created_at))
const hostToken = tokenRows.find((entry) => entry.scope === "host")?.token ?? null
if (!instance?.url || !hostToken) {
return null
}
return {
instance,
hostToken,
candidates: getConnectUrlCandidates(workerId, instance.url),
}
}
async function fetchWorkerRuntimeJson(input: {
workerId: WorkerId
path: string
method?: "GET" | "POST"
body?: unknown
}) {
const access = await getWorkerRuntimeAccess(input.workerId)
if (!access) {
return {
ok: false as const,
status: 409,
payload: {
error: "worker_runtime_unavailable",
message: "Worker runtime access is not ready yet. Wait for provisioning to finish and try again.",
},
}
}
let lastPayload: unknown = null
let lastStatus = 502
for (const candidate of access.candidates) {
try {
const response = await fetch(`${normalizeUrl(candidate)}${input.path}`, {
method: input.method ?? "GET",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
"X-OpenWork-Host-Token": access.hostToken,
},
body: input.body === undefined ? undefined : JSON.stringify(input.body),
})
const text = await response.text()
lastStatus = response.status
try {
lastPayload = text ? JSON.parse(text) : null
} catch {
lastPayload = text ? { message: text } : null
}
if (response.ok) {
return { ok: true as const, status: response.status, payload: lastPayload }
}
} catch (error) {
lastPayload = { message: error instanceof Error ? error.message : "worker_request_failed" }
}
}
return { ok: false as const, status: lastStatus, payload: lastPayload }
}
async function requireSession(req: express.Request, res: express.Response) {
const session = await auth.api.getSession({
headers: fromNodeHeaders(req.headers),
})
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return null
}
return {
...session,
user: {
...session.user,
id: parseUserId(session.user.id),
},
}
}
async function getOrgId(userId: UserId): Promise<OrgId | null> {
const membership = await db
.select()
.from(OrgMembershipTable)
.where(eq(OrgMembershipTable.user_id, userId))
.limit(1)
if (membership.length === 0) {
return null
}
return membership[0].org_id
}
async function countUserCloudWorkers(userId: UserId) {
const rows = await db
.select({ id: WorkerTable.id })
.from(WorkerTable)
.where(and(eq(WorkerTable.created_by_user_id, userId), eq(WorkerTable.destination, "cloud")))
.limit(2)
return rows.length
}
function getExperimentBillingSummary() {
return {
featureGateEnabled: false,
hasActivePlan: false,
checkoutRequired: false,
checkoutUrl: null,
portalUrl: null,
price: null,
subscription: null,
invoices: [],
productId: env.polar.productId,
benefitId: env.polar.benefitId,
}
}
async function getLatestWorkerInstance(workerId: WorkerId) {
for (let attempt = 0; attempt < 2; attempt += 1) {
try {
const rows = await db
.select()
.from(WorkerInstanceTable)
.where(eq(WorkerInstanceTable.worker_id, workerId))
.orderBy(desc(WorkerInstanceTable.created_at))
.limit(1)
return rows[0] ?? null
} catch (error) {
if (!isTransientDbConnectionError(error)) {
throw error
}
if (attempt === 0) {
console.warn(`[workers] transient db error reading instance for ${workerId}; retrying`)
continue
}
console.warn(`[workers] transient db error reading instance for ${workerId}; returning null instance`)
return null
}
}
return null
}
function toInstanceResponse(instance: WorkerInstanceRow | null) {
if (!instance) {
return null
}
return {
provider: instance.provider,
region: instance.region,
url: instance.url,
status: instance.status,
createdAt: instance.created_at,
updatedAt: instance.updated_at,
}
}
function toWorkerResponse(row: WorkerRow, userId: string) {
return {
id: row.id,
orgId: row.org_id,
createdByUserId: row.created_by_user_id,
isMine: row.created_by_user_id === userId,
name: row.name,
description: row.description,
destination: row.destination,
status: row.status,
imageVersion: row.image_version,
workspacePath: row.workspace_path,
sandboxBackend: row.sandbox_backend,
createdAt: row.created_at,
updatedAt: row.updated_at,
}
}
async function continueCloudProvisioning(input: { workerId: WorkerId; name: string; hostToken: string; clientToken: string }) {
try {
const provisioned = await provisionWorker({
workerId: input.workerId,
name: input.name,
hostToken: input.hostToken,
clientToken: input.clientToken,
})
await db
.update(WorkerTable)
.set({ status: provisioned.status })
.where(eq(WorkerTable.id, input.workerId))
await db.insert(WorkerInstanceTable).values({
id: createDenTypeId("workerInstance"),
worker_id: input.workerId,
provider: provisioned.provider,
region: provisioned.region,
url: provisioned.url,
status: provisioned.status,
})
} catch (error) {
await db
.update(WorkerTable)
.set({ status: "failed" })
.where(eq(WorkerTable.id, input.workerId))
const message = error instanceof Error ? error.message : "provisioning_failed"
console.error(`[workers] provisioning failed for ${input.workerId}: ${message}`)
}
}
export const workersRouter = express.Router()
workersRouter.get("/", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.json({ workers: [] })
return
}
const parsed = listSchema.safeParse({ limit: req.query.limit })
if (!parsed.success) {
res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(eq(WorkerTable.org_id, orgId))
.orderBy(desc(WorkerTable.created_at))
.limit(parsed.data.limit)
const workers = await Promise.all(
rows.map(async (row) => {
const instance = await getLatestWorkerInstance(row.id)
return {
...toWorkerResponse(row, session.user.id),
instance: toInstanceResponse(instance),
}
}),
)
res.json({ workers })
}))
workersRouter.post("/", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const parsed = createSchema.safeParse(req.body)
if (!parsed.success) {
res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() })
return
}
if (parsed.data.destination === "local" && !parsed.data.workspacePath) {
res.status(400).json({ error: "workspace_path_required" })
return
}
if (parsed.data.destination === "cloud" && !env.devMode && (await countUserCloudWorkers(session.user.id)) > 0) {
// Polar is temporarily disabled for this experiment.
// Keep the previous paywall block nearby so it can be restored quickly.
//
// const access = await requireCloudWorkerAccess({
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// })
// if (!access.allowed) {
// res.status(402).json({
// error: "payment_required",
// message: "Additional cloud workers require an active Den Cloud plan.",
// polar: {
// checkoutUrl: access.checkoutUrl,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
// return
// }
res.status(409).json({
error: "worker_limit_reached",
message: "You can only create one cloud worker during this experiment.",
})
return
}
const orgId =
(await getOrgId(session.user.id)) ?? (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal"))
const workerId = createDenTypeId("worker")
let workerStatus: WorkerRow["status"] = parsed.data.destination === "cloud" ? "provisioning" : "healthy"
await db.insert(WorkerTable).values({
id: workerId,
org_id: orgId,
created_by_user_id: session.user.id,
name: parsed.data.name,
description: parsed.data.description,
destination: parsed.data.destination,
status: workerStatus,
image_version: parsed.data.imageVersion,
workspace_path: parsed.data.workspacePath,
sandbox_backend: parsed.data.sandboxBackend,
})
const hostToken = token()
const clientToken = token()
await db.insert(WorkerTokenTable).values([
{
id: createDenTypeId("workerToken"),
worker_id: workerId,
scope: "host",
token: hostToken,
},
{
id: createDenTypeId("workerToken"),
worker_id: workerId,
scope: "client",
token: clientToken,
},
])
if (parsed.data.destination === "cloud") {
void continueCloudProvisioning({
workerId,
name: parsed.data.name,
hostToken,
clientToken,
})
}
res.status(parsed.data.destination === "cloud" ? 202 : 201).json({
worker: toWorkerResponse(
{
id: workerId,
org_id: orgId,
created_by_user_id: session.user.id,
name: parsed.data.name,
description: parsed.data.description ?? null,
destination: parsed.data.destination,
status: workerStatus,
image_version: parsed.data.imageVersion ?? null,
workspace_path: parsed.data.workspacePath ?? null,
sandbox_backend: parsed.data.sandboxBackend ?? null,
created_at: new Date(),
updated_at: new Date(),
},
session.user.id,
),
tokens: {
host: hostToken,
client: clientToken,
},
instance: null,
launch: parsed.data.destination === "cloud" ? { mode: "async", pollAfterMs: 5000 } : { mode: "instant", pollAfterMs: 0 },
})
}))
workersRouter.get("/billing", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
res.json({
billing: getExperimentBillingSummary(),
})
// Polar billing is temporarily disabled for the one-worker experiment.
// const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout)
// const includePortalUrl = !queryIncludesFlag(req.query.excludePortal)
// const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices)
//
// const billingInput = {
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// }
//
// const billing = await getCloudWorkerBillingStatus(
// billingInput,
// {
// includeCheckoutUrl,
// includePortalUrl,
// includeInvoices,
// },
// )
//
// res.json({
// billing: {
// ...billing,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
}))
workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const parsed = billingSubscriptionSchema.safeParse(req.body ?? {})
if (!parsed.success) {
res.status(400).json({ error: "invalid_request", details: parsed.error.flatten() })
return
}
res.json({
subscription: null,
billing: getExperimentBillingSummary(),
})
// Polar billing is temporarily disabled for the one-worker experiment.
// const billingInput = {
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// }
//
// const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd)
// const billing = await getCloudWorkerBillingStatus(billingInput, {
// includeCheckoutUrl: false,
// includePortalUrl: true,
// includeInvoices: true,
// })
//
// res.json({
// subscription,
// billing: {
// ...billing,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
}))
workersRouter.get("/:id", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
res.status(404).json({ error: "worker_not_found" })
return
}
const instance = await getLatestWorkerInstance(rows[0].id)
res.json({
worker: toWorkerResponse(rows[0], session.user.id),
instance: toInstanceResponse(instance),
})
}))
workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(eq(WorkerTable.id, workerId))
.limit(1)
if (rows.length === 0 || rows[0].org_id !== orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
const tokenRows = await db
.select()
.from(WorkerTokenTable)
.where(and(eq(WorkerTokenTable.worker_id, rows[0].id), isNull(WorkerTokenTable.revoked_at)))
.orderBy(asc(WorkerTokenTable.created_at))
const hostToken = tokenRows.find((entry) => entry.scope === "host")?.token ?? null
const clientToken = tokenRows.find((entry) => entry.scope === "client")?.token ?? null
if (!hostToken || !clientToken) {
res.status(409).json({
error: "worker_tokens_unavailable",
message: "Worker tokens are missing for this worker. Launch a new worker and try again.",
})
return
}
const instance = await getLatestWorkerInstance(rows[0].id)
const connect = await resolveConnectUrlFromCandidates(rows[0].id, instance?.url ?? null, clientToken)
res.json({
tokens: {
host: hostToken,
client: clientToken,
},
connect: connect ?? (instance?.url ? { openworkUrl: instance.url, workspaceId: null } : null),
})
}))
workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
res.status(404).json({ error: "worker_not_found" })
return
}
const runtime = await fetchWorkerRuntimeJson({
workerId: rows[0].id,
path: "/runtime/versions",
})
res.status(runtime.status).json(runtime.payload)
}))
workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
res.status(404).json({ error: "worker_not_found" })
return
}
const runtime = await fetchWorkerRuntimeJson({
workerId: rows[0].id,
path: "/runtime/upgrade",
method: "POST",
body: req.body ?? {},
})
res.status(runtime.status).json(runtime.payload)
}))
workersRouter.delete("/:id", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
res.status(404).json({ error: "worker_not_found" })
return
}
const worker = rows[0]
const instance = await getLatestWorkerInstance(worker.id)
if (worker.destination === "cloud") {
try {
await deprovisionWorker({
workerId: worker.id,
instanceUrl: instance?.url ?? null,
})
} catch (error) {
const message = error instanceof Error ? error.message : "deprovision_failed"
console.warn(`[workers] deprovision warning for ${worker.id}: ${message}`)
}
}
await db.transaction(async (tx) => {
await tx.delete(WorkerTokenTable).where(eq(WorkerTokenTable.worker_id, worker.id))
await tx.delete(DaytonaSandboxTable).where(eq(DaytonaSandboxTable.worker_id, worker.id))
await tx.delete(WorkerInstanceTable).where(eq(WorkerInstanceTable.worker_id, worker.id))
await tx.delete(WorkerBundleTable).where(eq(WorkerBundleTable.worker_id, worker.id))
await tx.delete(AuditEventTable).where(eq(AuditEventTable.worker_id, worker.id))
await tx.delete(WorkerTable).where(eq(WorkerTable.id, worker.id))
})
res.status(204).end()
}))

View File

@@ -0,0 +1,67 @@
import "./load-env.js"
import cors from "cors"
import express from "express"
import path from "node:path"
import { fileURLToPath } from "node:url"
import { toNodeHandler } from "better-auth/node"
import { auth } from "./auth.js"
import { env } from "./env.js"
import { adminRouter } from "./http/admin.js"
import { asyncRoute, errorMiddleware } from "./http/errors.js"
import { getRequestSession } from "./http/session.js"
import { workersRouter } from "./http/workers.js"
import { normalizeDenTypeId } from "./db/typeid.js"
import { listUserOrgs } from "./orgs.js"
const app = express()
const currentFile = fileURLToPath(import.meta.url)
const publicDir = path.resolve(path.dirname(currentFile), "../public")
if (env.corsOrigins.length > 0) {
app.use(
cors({
origin: env.corsOrigins,
credentials: true,
methods: ["GET", "POST", "PATCH", "DELETE"],
}),
)
}
app.use(express.json())
app.all("/api/auth/*", toNodeHandler(auth))
app.use(express.static(publicDir))
app.get("/health", (_, res) => {
res.json({ ok: true })
})
app.get("/v1/me", asyncRoute(async (req, res) => {
const session = await getRequestSession(req)
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return
}
res.json(session)
}))
app.get("/v1/me/orgs", asyncRoute(async (req, res) => {
const session = await getRequestSession(req)
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return
}
const orgs = await listUserOrgs(normalizeDenTypeId("user", session.user.id))
res.json({
orgs,
defaultOrgId: orgs[0]?.id ?? null,
})
}))
app.use("/v1/admin", adminRouter)
app.use("/v1/workers", workersRouter)
app.use(errorMiddleware)
app.listen(env.port, () => {
console.log(`den listening on ${env.port} (provisioner=${env.provisionerMode})`)
})

View File

@@ -0,0 +1,45 @@
import { existsSync } from "node:fs"
import path from "node:path"
import { fileURLToPath } from "node:url"
import dotenv from "dotenv"
function findUpwards(startDir: string, fileName: string, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = path.join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = path.dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const srcDir = path.dirname(fileURLToPath(import.meta.url))
const serviceDir = path.resolve(srcDir, "..")
for (const filePath of [
path.join(serviceDir, ".env.local"),
path.join(serviceDir, ".env"),
]) {
if (existsSync(filePath)) {
dotenv.config({ path: filePath, override: false })
}
}
const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim()
const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona")
const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath
if (daytonaEnvPath && existsSync(daytonaEnvPath)) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
dotenv.config({ override: false })

View File

@@ -0,0 +1,65 @@
import { eq } from "./db/drizzle.js"
import { db } from "./db/index.js"
import { AuthUserTable, OrgMembershipTable, OrgTable } from "./db/schema.js"
import { createDenTypeId } from "./db/typeid.js"
type UserId = typeof AuthUserTable.$inferSelect.id
type OrgId = typeof OrgTable.$inferSelect.id
export async function ensureDefaultOrg(userId: UserId, name: string): Promise<OrgId> {
const existing = await db
.select()
.from(OrgMembershipTable)
.where(eq(OrgMembershipTable.user_id, userId))
.limit(1)
if (existing.length > 0) {
return existing[0].org_id
}
const orgId = createDenTypeId("org")
const slug = `personal-${orgId.slice(0, 8)}`
await db.insert(OrgTable).values({
id: orgId,
name,
slug,
owner_user_id: userId,
})
await db.insert(OrgMembershipTable).values({
id: createDenTypeId("orgMembership"),
org_id: orgId,
user_id: userId,
role: "owner",
})
return orgId
}
export async function listUserOrgs(userId: UserId) {
const memberships = await db
.select({
membershipId: OrgMembershipTable.id,
role: OrgMembershipTable.role,
org: {
id: OrgTable.id,
name: OrgTable.name,
slug: OrgTable.slug,
ownerUserId: OrgTable.owner_user_id,
createdAt: OrgTable.created_at,
updatedAt: OrgTable.updated_at,
},
})
.from(OrgMembershipTable)
.innerJoin(OrgTable, eq(OrgMembershipTable.org_id, OrgTable.id))
.where(eq(OrgMembershipTable.user_id, userId))
return memberships.map((row) => ({
id: row.org.id,
name: row.org.name,
slug: row.org.slug,
ownerUserId: row.org.ownerUserId,
role: row.role,
membershipId: row.membershipId,
createdAt: row.org.createdAt,
updatedAt: row.org.updatedAt,
}))
}

View File

@@ -0,0 +1,484 @@
import { Daytona, type Sandbox } from "@daytonaio/sdk"
import { eq } from "../db/drizzle.js"
import { db } from "../db/index.js"
import { DaytonaSandboxTable } from "../db/schema.js"
import { createDenTypeId } from "../db/typeid.js"
import { env } from "../env.js"
type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id
type ProvisionInput = {
workerId: WorkerId
name: string
hostToken: string
clientToken: string
}
type ProvisionedInstance = {
provider: string
url: string
status: "provisioning" | "healthy"
region?: string
}
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
const maxSignedPreviewExpirySeconds = 60 * 60 * 24
const signedPreviewRefreshLeadMs = 5 * 60 * 1000
const slug = (value: string) =>
value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "")
function shellQuote(value: string) {
return `'${value.replace(/'/g, `'"'"'`)}'`
}
function createDaytonaClient() {
return new Daytona({
apiKey: env.daytona.apiKey,
apiUrl: env.daytona.apiUrl,
...(env.daytona.target ? { target: env.daytona.target } : {}),
})
}
function normalizedSignedPreviewExpirySeconds() {
return Math.max(
1,
Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds),
)
}
function signedPreviewRefreshAt(expiresInSeconds: number) {
return new Date(
Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs),
)
}
function workerProxyUrl(workerId: WorkerId) {
return `${env.daytona.workerProxyBaseUrl.replace(/\/+$/, "")}/${encodeURIComponent(workerId)}`
}
function assertDaytonaConfig() {
if (!env.daytona.apiKey) {
throw new Error("DAYTONA_API_KEY is required for daytona provisioner")
}
}
function workerHint(workerId: WorkerId) {
return workerId.replace(/-/g, "").slice(0, 12)
}
function sandboxLabels(workerId: WorkerId) {
return {
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
}
}
function sandboxName(input: ProvisionInput) {
return slug(
`${env.daytona.sandboxNamePrefix}-${input.name}-${workerHint(input.workerId)}`,
).slice(0, 63)
}
function workspaceVolumeName(workerId: WorkerId) {
return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-workspace`).slice(0, 63)
}
function dataVolumeName(workerId: WorkerId) {
return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-data`).slice(0, 63)
}
function buildOpenWorkStartCommand(input: ProvisionInput) {
const orchestratorPackage = env.daytona.openworkVersion?.trim()
? `openwork-orchestrator@${env.daytona.openworkVersion.trim()}`
: "openwork-orchestrator"
const installStep = [
`if ! command -v openwork >/dev/null 2>&1; then npm install -g ${shellQuote(orchestratorPackage)}; fi`,
"if ! command -v opencode >/dev/null 2>&1; then echo 'opencode binary missing from Daytona runtime; bake it into the snapshot image and expose it on PATH' >&2; exit 1; fi",
].join("; ")
const openworkServe = [
"OPENWORK_DATA_DIR=",
shellQuote(env.daytona.runtimeDataPath),
" OPENWORK_SIDECAR_DIR=",
shellQuote(env.daytona.sidecarDir),
" OPENWORK_TOKEN=",
shellQuote(input.clientToken),
" OPENWORK_HOST_TOKEN=",
shellQuote(input.hostToken),
" openwork serve",
` --workspace ${shellQuote(env.daytona.runtimeWorkspacePath)}`,
` --openwork-host 0.0.0.0`,
` --openwork-port ${env.daytona.openworkPort}`,
` --opencode-host 127.0.0.1`,
` --opencode-port ${env.daytona.opencodePort}`,
` --connect-host 127.0.0.1`,
` --cors '*'`,
` --approval manual`,
` --allow-external`,
` --opencode-source external`,
` --opencode-bin $(command -v opencode)`,
` --no-opencode-router`,
` --verbose`,
].join("")
const script = `
set -u
mkdir -p ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(env.daytona.runtimeWorkspacePath)} ${shellQuote(env.daytona.runtimeDataPath)} ${shellQuote(env.daytona.sidecarDir)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes`)}
ln -sfn ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/workspace`) }
ln -sfn ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/data`) }
${installStep}
attempt=0
while [ "$attempt" -lt 3 ]; do
attempt=$((attempt + 1))
if ${openworkServe}; then
exit 0
fi
status=$?
echo "openwork serve failed (attempt $attempt, exit $status); retrying in 3s"
sleep 3
done
exit 1
`.trim()
return `sh -lc ${shellQuote(script)}`
}
async function waitForVolumeReady(daytona: Daytona, name: string, timeoutMs: number) {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
const volume = await daytona.volume.get(name)
if (volume.state === "ready") {
return volume
}
await sleep(env.daytona.pollIntervalMs)
}
throw new Error(`Timed out waiting for Daytona volume ${name} to become ready`)
}
async function waitForHealth(url: string, timeoutMs: number, sandbox: Sandbox, sessionId: string, commandId: string) {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
try {
const response = await fetch(`${url.replace(/\/$/, "")}/health`, { method: "GET" })
if (response.ok) {
return
}
} catch {
// ignore transient startup failures
}
try {
const command = await sandbox.process.getSessionCommand(sessionId, commandId)
if (typeof command.exitCode === "number" && command.exitCode !== 0) {
const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId)
throw new Error(
[
`openwork session exited with ${command.exitCode}`,
logs.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "",
logs.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "",
]
.filter(Boolean)
.join("\n\n"),
)
}
} catch (error) {
if (error instanceof Error && error.message.startsWith("openwork session exited")) {
throw error
}
}
await sleep(env.daytona.pollIntervalMs)
}
const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId).catch(
() => null,
)
throw new Error(
[
`Timed out waiting for Daytona worker health at ${url.replace(/\/$/, "")}/health`,
logs?.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "",
logs?.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "",
]
.filter(Boolean)
.join("\n\n"),
)
}
async function upsertDaytonaSandbox(input: {
workerId: WorkerId
sandboxId: string
workspaceVolumeId: string
dataVolumeId: string
signedPreviewUrl: string
signedPreviewUrlExpiresAt: Date
region: string | null
}) {
const existing = await db
.select({ id: DaytonaSandboxTable.id })
.from(DaytonaSandboxTable)
.where(eq(DaytonaSandboxTable.worker_id, input.workerId))
.limit(1)
if (existing.length > 0) {
await db
.update(DaytonaSandboxTable)
.set({
sandbox_id: input.sandboxId,
workspace_volume_id: input.workspaceVolumeId,
data_volume_id: input.dataVolumeId,
signed_preview_url: input.signedPreviewUrl,
signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt,
region: input.region,
})
.where(eq(DaytonaSandboxTable.worker_id, input.workerId))
return
}
await db.insert(DaytonaSandboxTable).values({
id: createDenTypeId("daytonaSandbox"),
worker_id: input.workerId,
sandbox_id: input.sandboxId,
workspace_volume_id: input.workspaceVolumeId,
data_volume_id: input.dataVolumeId,
signed_preview_url: input.signedPreviewUrl,
signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt,
region: input.region,
})
}
export async function getDaytonaSandboxRecord(workerId: WorkerId) {
const rows = await db
.select()
.from(DaytonaSandboxTable)
.where(eq(DaytonaSandboxTable.worker_id, workerId))
.limit(1)
return rows[0] ?? null
}
export async function refreshDaytonaSignedPreview(workerId: WorkerId) {
assertDaytonaConfig()
const record = await getDaytonaSandboxRecord(workerId)
if (!record) {
return null
}
const daytona = createDaytonaClient()
const sandbox = await daytona.get(record.sandbox_id)
await sandbox.refreshData()
const expiresInSeconds = normalizedSignedPreviewExpirySeconds()
const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds)
const expiresAt = signedPreviewRefreshAt(expiresInSeconds)
await db
.update(DaytonaSandboxTable)
.set({
signed_preview_url: preview.url,
signed_preview_url_expires_at: expiresAt,
region: sandbox.target,
})
.where(eq(DaytonaSandboxTable.worker_id, workerId))
return {
...record,
signed_preview_url: preview.url,
signed_preview_url_expires_at: expiresAt,
region: sandbox.target,
}
}
export async function getDaytonaSignedPreviewForProxy(workerId: WorkerId) {
const record = await getDaytonaSandboxRecord(workerId)
if (!record) {
return null
}
if (record.signed_preview_url_expires_at.getTime() > Date.now()) {
return record.signed_preview_url
}
const refreshed = await refreshDaytonaSignedPreview(workerId)
return refreshed?.signed_preview_url ?? null
}
export async function provisionWorkerOnDaytona(
input: ProvisionInput,
): Promise<ProvisionedInstance> {
assertDaytonaConfig()
const daytona = createDaytonaClient()
const labels = sandboxLabels(input.workerId)
const workspaceVolumeNameValue = workspaceVolumeName(input.workerId)
const dataVolumeNameValue = dataVolumeName(input.workerId)
await daytona.volume.get(workspaceVolumeNameValue, true)
await daytona.volume.get(dataVolumeNameValue, true)
const workspaceVolume = await waitForVolumeReady(
daytona,
workspaceVolumeNameValue,
env.daytona.createTimeoutSeconds * 1000,
)
const dataVolume = await waitForVolumeReady(
daytona,
dataVolumeNameValue,
env.daytona.createTimeoutSeconds * 1000,
)
let sandbox: Awaited<ReturnType<typeof daytona.create>> | null = null
try {
sandbox = env.daytona.snapshot
? await daytona.create(
{
name: sandboxName(input),
snapshot: env.daytona.snapshot,
autoStopInterval: env.daytona.autoStopInterval,
autoArchiveInterval: env.daytona.autoArchiveInterval,
autoDeleteInterval: env.daytona.autoDeleteInterval,
public: env.daytona.public,
labels,
envVars: {
DEN_WORKER_ID: input.workerId,
},
volumes: [
{
volumeId: workspaceVolume.id,
mountPath: env.daytona.workspaceMountPath,
},
{
volumeId: dataVolume.id,
mountPath: env.daytona.dataMountPath,
},
],
},
{ timeout: env.daytona.createTimeoutSeconds },
)
: await daytona.create(
{
name: sandboxName(input),
image: env.daytona.image,
autoStopInterval: env.daytona.autoStopInterval,
autoArchiveInterval: env.daytona.autoArchiveInterval,
autoDeleteInterval: env.daytona.autoDeleteInterval,
public: env.daytona.public,
labels,
envVars: {
DEN_WORKER_ID: input.workerId,
},
resources: {
cpu: env.daytona.resources.cpu,
memory: env.daytona.resources.memory,
disk: env.daytona.resources.disk,
},
volumes: [
{
volumeId: workspaceVolume.id,
mountPath: env.daytona.workspaceMountPath,
},
{
volumeId: dataVolume.id,
mountPath: env.daytona.dataMountPath,
},
],
},
{ timeout: env.daytona.createTimeoutSeconds },
)
const sessionId = `openwork-${workerHint(input.workerId)}`
await sandbox.process.createSession(sessionId)
const command = await sandbox.process.executeSessionCommand(
sessionId,
{
command: buildOpenWorkStartCommand(input),
runAsync: true,
},
0,
)
const expiresInSeconds = normalizedSignedPreviewExpirySeconds()
const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds)
await waitForHealth(preview.url, env.daytona.healthcheckTimeoutMs, sandbox, sessionId, command.cmdId)
await upsertDaytonaSandbox({
workerId: input.workerId,
sandboxId: sandbox.id,
workspaceVolumeId: workspaceVolume.id,
dataVolumeId: dataVolume.id,
signedPreviewUrl: preview.url,
signedPreviewUrlExpiresAt: signedPreviewRefreshAt(expiresInSeconds),
region: sandbox.target ?? null,
})
return {
provider: "daytona",
url: workerProxyUrl(input.workerId),
status: "healthy",
region: sandbox.target,
}
} catch (error) {
if (sandbox) {
await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch(() => {})
}
await daytona.volume.delete(workspaceVolume).catch(() => {})
await daytona.volume.delete(dataVolume).catch(() => {})
throw error
}
}
export async function deprovisionWorkerOnDaytona(workerId: WorkerId) {
assertDaytonaConfig()
const daytona = createDaytonaClient()
const record = await getDaytonaSandboxRecord(workerId)
if (record) {
try {
const sandbox = await daytona.get(record.sandbox_id)
await sandbox.delete(env.daytona.deleteTimeoutSeconds)
} catch (error) {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona sandbox ${record.sandbox_id}: ${message}`)
}
const volumes = await daytona.volume.list().catch(() => [])
for (const volumeId of [record.workspace_volume_id, record.data_volume_id]) {
const volume = volumes.find((entry) => entry.id === volumeId)
if (!volume) {
continue
}
await daytona.volume.delete(volume).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona volume ${volumeId}: ${message}`)
})
}
return
}
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
for (const sandbox of sandboxes.items) {
await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona sandbox ${sandbox.id}: ${message}`)
})
}
const volumes = await daytona.volume.list()
for (const name of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) {
const volume = volumes.find((entry) => entry.name === name)
if (!volume) {
continue
}
await daytona.volume.delete(volume).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona volume ${name}: ${message}`)
})
}
}

View File

@@ -0,0 +1,405 @@
import { env } from "../env.js";
import { WorkerTable } from "../db/schema.js";
import {
deprovisionWorkerOnDaytona,
provisionWorkerOnDaytona,
} from "./daytona.js";
import {
customDomainForWorker,
ensureVercelDnsRecord,
} from "./vanity-domain.js";
type WorkerId = typeof WorkerTable.$inferSelect.id;
export type ProvisionInput = {
workerId: WorkerId;
name: string;
hostToken: string;
clientToken: string;
};
export type ProvisionedInstance = {
provider: string;
url: string;
status: "provisioning" | "healthy";
region?: string;
};
type RenderService = {
id: string;
name?: string;
slug?: string;
serviceDetails?: {
url?: string;
region?: string;
};
};
type RenderServiceListRow = {
cursor?: string;
service?: RenderService;
};
type RenderDeploy = {
id: string;
status: string;
};
const terminalDeployStates = new Set([
"live",
"update_failed",
"build_failed",
"canceled",
]);
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
const slug = (value: string) =>
value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "");
const hostFromUrl = (value: string | null | undefined) => {
if (!value) {
return "";
}
try {
return new URL(value).host.toLowerCase();
} catch {
return "";
}
};
async function renderRequest<T>(
path: string,
init: RequestInit = {},
): Promise<T> {
const headers = new Headers(init.headers);
headers.set("Authorization", `Bearer ${env.render.apiKey}`);
headers.set("Accept", "application/json");
if (init.body && !headers.has("Content-Type")) {
headers.set("Content-Type", "application/json");
}
const response = await fetch(`${env.render.apiBase}${path}`, {
...init,
headers,
});
const text = await response.text();
if (!response.ok) {
throw new Error(
`Render API ${path} failed (${response.status}): ${text.slice(0, 400)}`,
);
}
if (!text) {
return null as T;
}
return JSON.parse(text) as T;
}
async function waitForDeployLive(serviceId: string) {
const startedAt = Date.now();
let latest: RenderDeploy | null = null;
while (Date.now() - startedAt < env.render.provisionTimeoutMs) {
const rows = await renderRequest<Array<{ deploy: RenderDeploy }>>(
`/services/${serviceId}/deploys?limit=1`,
);
latest = rows[0]?.deploy ?? null;
if (latest && terminalDeployStates.has(latest.status)) {
if (latest.status !== "live") {
throw new Error(
`Render deploy ${latest.id} ended with ${latest.status}`,
);
}
return latest;
}
await sleep(env.render.pollIntervalMs);
}
throw new Error(
`Timed out waiting for Render deploy for service ${serviceId}`,
);
}
async function waitForHealth(
url: string,
timeoutMs = env.render.healthcheckTimeoutMs,
) {
const healthUrl = `${url.replace(/\/$/, "")}/health`;
const startedAt = Date.now();
while (Date.now() - startedAt < timeoutMs) {
try {
const response = await fetch(healthUrl, { method: "GET" });
if (response.ok) {
return;
}
} catch {
// ignore transient network failures while the instance boots
}
await sleep(env.render.pollIntervalMs);
}
throw new Error(`Timed out waiting for worker health endpoint ${healthUrl}`);
}
async function listRenderServices(limit = 200) {
const rows: RenderService[] = [];
let cursor: string | undefined;
while (rows.length < limit) {
const query = new URLSearchParams({ limit: "100" });
if (cursor) {
query.set("cursor", cursor);
}
const page = await renderRequest<RenderServiceListRow[]>(
`/services?${query.toString()}`,
);
if (page.length === 0) {
break;
}
rows.push(
...page
.map((entry) => entry.service)
.filter((entry): entry is RenderService => Boolean(entry?.id)),
);
const nextCursor = page[page.length - 1]?.cursor;
if (!nextCursor || nextCursor === cursor) {
break;
}
cursor = nextCursor;
}
return rows.slice(0, limit);
}
async function attachRenderCustomDomain(
serviceId: string,
workerId: string,
renderUrl: string,
) {
const hostname = customDomainForWorker(
workerId,
env.render.workerPublicDomainSuffix,
);
if (!hostname) {
return null;
}
try {
await renderRequest(`/services/${serviceId}/custom-domains`, {
method: "POST",
body: JSON.stringify({
name: hostname,
}),
});
const dnsReady = await ensureVercelDnsRecord({
hostname,
targetUrl: renderUrl,
domain: env.vercel.dnsDomain ?? env.render.workerPublicDomainSuffix,
apiBase: env.vercel.apiBase,
token: env.vercel.token,
teamId: env.vercel.teamId,
teamSlug: env.vercel.teamSlug,
});
if (!dnsReady) {
console.warn(
`[provisioner] vanity dns upsert skipped or failed for ${hostname}; using Render URL fallback`,
);
return null;
}
return `https://${hostname}`;
} catch (error) {
const message = error instanceof Error ? error.message : "unknown_error";
console.warn(
`[provisioner] custom domain attach failed for ${serviceId}: ${message}`,
);
return null;
}
}
function assertRenderConfig() {
if (!env.render.apiKey) {
throw new Error("RENDER_API_KEY is required for render provisioner");
}
if (!env.render.ownerId) {
throw new Error("RENDER_OWNER_ID is required for render provisioner");
}
}
async function provisionWorkerOnRender(
input: ProvisionInput,
): Promise<ProvisionedInstance> {
assertRenderConfig();
const serviceName = slug(
`${env.render.workerNamePrefix}-${input.name}-${input.workerId.slice(0, 8)}`,
).slice(0, 62);
const orchestratorPackage = env.render.workerOpenworkVersion?.trim()
? `openwork-orchestrator@${env.render.workerOpenworkVersion.trim()}`
: "openwork-orchestrator";
const buildCommand = [
`npm install -g ${orchestratorPackage}`,
"node ./scripts/install-opencode.mjs",
].join(" && ");
const startCommand = [
"mkdir -p /tmp/workspace",
"attempt=0; while [ $attempt -lt 3 ]; do attempt=$((attempt + 1)); openwork serve --workspace /tmp/workspace --openwork-host 0.0.0.0 --openwork-port ${PORT:-10000} --opencode-host 127.0.0.1 --opencode-port 4096 --connect-host 127.0.0.1 --cors '*' --approval manual --allow-external --opencode-source external --opencode-bin ./bin/opencode --no-opencode-router --verbose && exit 0; echo \"openwork serve failed (attempt $attempt); retrying in 3s\"; sleep 3; done; exit 1",
].join(" && ");
const payload = {
type: "web_service",
name: serviceName,
ownerId: env.render.ownerId,
repo: env.render.workerRepo,
branch: env.render.workerBranch,
autoDeploy: "no",
rootDir: env.render.workerRootDir,
envVars: [
{ key: "OPENWORK_TOKEN", value: input.clientToken },
{ key: "OPENWORK_HOST_TOKEN", value: input.hostToken },
{ key: "DEN_WORKER_ID", value: input.workerId },
],
serviceDetails: {
runtime: "node",
plan: env.render.workerPlan,
region: env.render.workerRegion,
healthCheckPath: "/health",
envSpecificDetails: {
buildCommand,
startCommand,
},
},
};
const created = await renderRequest<{ service: RenderService }>("/services", {
method: "POST",
body: JSON.stringify(payload),
});
const serviceId = created.service.id;
await waitForDeployLive(serviceId);
const service = await renderRequest<RenderService>(`/services/${serviceId}`);
const renderUrl = service.serviceDetails?.url;
if (!renderUrl) {
throw new Error(`Render service ${serviceId} has no public URL`);
}
await waitForHealth(renderUrl);
const customUrl = await attachRenderCustomDomain(
serviceId,
input.workerId,
renderUrl,
);
let url = renderUrl;
if (customUrl) {
try {
await waitForHealth(customUrl, env.render.customDomainReadyTimeoutMs);
url = customUrl;
} catch {
console.warn(
`[provisioner] vanity domain not ready yet for ${input.workerId}; returning Render URL fallback`,
);
}
}
return {
provider: "render",
url,
status: "healthy",
region: service.serviceDetails?.region ?? env.render.workerRegion,
};
}
export async function provisionWorker(
input: ProvisionInput,
): Promise<ProvisionedInstance> {
if (env.provisionerMode === "render") {
return provisionWorkerOnRender(input);
}
if (env.provisionerMode === "daytona") {
return provisionWorkerOnDaytona(input);
}
const template = env.workerUrlTemplate ?? "https://workers.local/{workerId}";
const url = template.replace("{workerId}", input.workerId);
return {
provider: "stub",
url,
status: "provisioning",
};
}
export async function deprovisionWorker(input: {
workerId: WorkerId;
instanceUrl: string | null;
}) {
if (env.provisionerMode === "daytona") {
await deprovisionWorkerOnDaytona(input.workerId);
return;
}
if (env.provisionerMode !== "render") {
return;
}
assertRenderConfig();
const targetHost = hostFromUrl(input.instanceUrl);
const workerHint = input.workerId.slice(0, 8).toLowerCase();
const services = await listRenderServices();
const target =
services.find((service) => {
if (service.name?.toLowerCase().includes(workerHint)) {
return true;
}
if (
targetHost &&
hostFromUrl(service.serviceDetails?.url) === targetHost
) {
return true;
}
return false;
}) ?? null;
if (!target) {
return;
}
try {
await renderRequest(`/services/${target.id}/suspend`, {
method: "POST",
body: JSON.stringify({}),
});
} catch (error) {
const message = error instanceof Error ? error.message : "unknown_error";
console.warn(
`[provisioner] failed to suspend Render service ${target.id}: ${message}`,
);
}
}

View File

@@ -0,0 +1,183 @@
function normalizeUrl(value: string): string {
return value.trim().replace(/\/+$/, "")
}
function slug(value: string) {
return value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "")
}
function splitHostname(hostname: string, domain: string): string | null {
const normalizedHost = hostname.trim().toLowerCase()
const normalizedDomain = domain.trim().toLowerCase()
if (!normalizedHost || !normalizedDomain) {
return null
}
if (normalizedHost === normalizedDomain) {
return ""
}
if (!normalizedHost.endsWith(`.${normalizedDomain}`)) {
return null
}
return normalizedHost.slice(0, -(normalizedDomain.length + 1))
}
function hostFromUrl(value: string): string | null {
try {
return new URL(normalizeUrl(value)).host.toLowerCase()
} catch {
return null
}
}
function withVercelScope(url: URL, teamId?: string, teamSlug?: string) {
if (teamId?.trim()) {
url.searchParams.set("teamId", teamId.trim())
} else if (teamSlug?.trim()) {
url.searchParams.set("slug", teamSlug.trim())
}
return url
}
type VercelDnsRecord = {
id: string
type?: string
name?: string
value?: string
}
async function vercelRequest<T>(input: {
apiBase: string
token: string
path: string
teamId?: string
teamSlug?: string
method?: "GET" | "POST" | "PATCH"
body?: unknown
}): Promise<T> {
const base = normalizeUrl(input.apiBase || "https://api.vercel.com")
const url = withVercelScope(new URL(`${base}${input.path}`), input.teamId, input.teamSlug)
const headers = new Headers({
Authorization: `Bearer ${input.token}`,
Accept: "application/json",
})
const init: RequestInit = {
method: input.method ?? "GET",
headers,
}
if (typeof input.body !== "undefined") {
headers.set("Content-Type", "application/json")
init.body = JSON.stringify(input.body)
}
const response = await fetch(url, init)
const text = await response.text()
if (!response.ok) {
throw new Error(`Vercel API ${input.path} failed (${response.status}): ${text.slice(0, 300)}`)
}
if (!text) {
return null as T
}
return JSON.parse(text) as T
}
export function customDomainForWorker(workerId: string, suffix: string | null | undefined): string | null {
const normalizedSuffix = suffix?.trim().toLowerCase()
if (!normalizedSuffix) {
return null
}
const label = slug(workerId).slice(0, 32)
if (!label) {
return null
}
return `${label}.${normalizedSuffix}`
}
export async function ensureVercelDnsRecord(input: {
hostname: string
targetUrl: string
domain: string | null | undefined
apiBase?: string
token?: string
teamId?: string
teamSlug?: string
}): Promise<boolean> {
const domain = input.domain?.trim().toLowerCase()
const token = input.token?.trim()
if (!domain || !token) {
return false
}
const name = splitHostname(input.hostname, domain)
const targetHost = hostFromUrl(input.targetUrl)
if (name === null || !targetHost) {
return false
}
const list = await vercelRequest<{ records?: VercelDnsRecord[] }>({
apiBase: input.apiBase ?? "https://api.vercel.com",
token,
teamId: input.teamId,
teamSlug: input.teamSlug,
path: `/v4/domains/${encodeURIComponent(domain)}/records`,
})
const records = Array.isArray(list.records) ? list.records : []
const current = records.find((record) => {
if (!record?.id) {
return false
}
if ((record.type ?? "").toUpperCase() !== "CNAME") {
return false
}
return (record.name ?? "") === name
})
if (current && (current.value ?? "").toLowerCase() === targetHost.toLowerCase()) {
return true
}
const payload = {
name,
type: "CNAME",
value: targetHost,
}
if (current?.id) {
await vercelRequest({
apiBase: input.apiBase ?? "https://api.vercel.com",
token,
teamId: input.teamId,
teamSlug: input.teamSlug,
method: "PATCH",
path: `/v4/domains/${encodeURIComponent(domain)}/records/${encodeURIComponent(current.id)}`,
body: payload,
})
return true
}
await vercelRequest({
apiBase: input.apiBase ?? "https://api.vercel.com",
token,
teamId: input.teamId,
teamSlug: input.teamSlug,
method: "POST",
path: `/v4/domains/${encodeURIComponent(domain)}/records`,
body: payload,
})
return true
}

View File

@@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "Bundler",
"rootDir": "src",
"outDir": "dist",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"resolveJsonModule": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,12 @@
DATABASE_URL=
DATABASE_HOST=
DATABASE_USERNAME=
DATABASE_PASSWORD=
DB_MODE=
PORT=8789
OPENWORK_DAYTONA_ENV_PATH=
DAYTONA_API_URL=https://app.daytona.io/api
DAYTONA_API_KEY=
DAYTONA_TARGET=
DAYTONA_OPENWORK_PORT=8787
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400

View File

@@ -0,0 +1,23 @@
{
"name": "@openwork/den-worker-proxy",
"private": true,
"type": "module",
"scripts": {
"dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/server.ts",
"build": "npm run build:den-db && tsc -p tsconfig.json",
"build:den-db": "npm --prefix ../../packages/den-db run build",
"start": "node dist/server.js"
},
"dependencies": {
"@daytonaio/sdk": "^0.150.0",
"@hono/node-server": "^1.13.8",
"dotenv": "^16.4.5",
"hono": "^4.7.2",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^20.11.30",
"tsx": "^4.15.7",
"typescript": "^5.5.4"
}
}

View File

@@ -0,0 +1,178 @@
import "./load-env.js"
import { Daytona } from "@daytonaio/sdk"
import { Hono } from "hono"
import { eq } from "../../../packages/den-db/dist/drizzle.js"
import { createDenDb, DaytonaSandboxTable } from "../../../packages/den-db/dist/index.js"
import { normalizeDenTypeId } from "../../../packages/utils/dist/typeid.js"
import { env } from "./env.js"
const { db } = createDenDb({
databaseUrl: env.databaseUrl,
mode: env.dbMode,
planetscale: env.planetscale,
})
const app = new Hono()
const maxSignedPreviewExpirySeconds = 60 * 60 * 24
const signedPreviewRefreshLeadMs = 5 * 60 * 1000
type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id
function assertDaytonaConfig() {
if (!env.daytona.apiKey) {
throw new Error("DAYTONA_API_KEY is required for worker proxy")
}
}
function createDaytonaClient() {
assertDaytonaConfig()
return new Daytona({
apiKey: env.daytona.apiKey,
apiUrl: env.daytona.apiUrl,
...(env.daytona.target ? { target: env.daytona.target } : {}),
})
}
function normalizedSignedPreviewExpirySeconds() {
return Math.max(1, Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds))
}
function signedPreviewRefreshAt(expiresInSeconds: number) {
return new Date(Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs))
}
function noCacheHeaders(headers: Headers) {
headers.set("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate")
headers.set("Pragma", "no-cache")
headers.set("Expires", "0")
headers.set("Surrogate-Control", "no-store")
}
function stripProxyHeaders(input: Headers) {
const headers = new Headers(input)
headers.delete("host")
headers.delete("content-length")
headers.delete("connection")
return headers
}
function targetUrl(baseUrl: string, requestUrl: string, workerId: WorkerId) {
const current = new URL(requestUrl)
const suffix = current.pathname.slice(`/${encodeURIComponent(workerId)}`.length) || "/"
return `${baseUrl.replace(/\/+$/, "")}${suffix}${current.search}`
}
async function getSignedPreviewUrl(workerId: WorkerId) {
const rows = await db
.select()
.from(DaytonaSandboxTable)
.where(eq(DaytonaSandboxTable.worker_id, workerId))
.limit(1)
const record = rows[0] ?? null
if (!record) {
return null
}
if (record.signed_preview_url_expires_at.getTime() > Date.now()) {
return record.signed_preview_url
}
const daytona = createDaytonaClient()
const sandbox = await daytona.get(record.sandbox_id)
await sandbox.refreshData()
const expiresInSeconds = normalizedSignedPreviewExpirySeconds()
const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds)
await db
.update(DaytonaSandboxTable)
.set({
signed_preview_url: preview.url,
signed_preview_url_expires_at: signedPreviewRefreshAt(expiresInSeconds),
region: sandbox.target,
})
.where(eq(DaytonaSandboxTable.worker_id, workerId))
return preview.url
}
async function proxyRequest(workerId: WorkerId, request: Request) {
let baseUrl: string | null = null
try {
baseUrl = await getSignedPreviewUrl(workerId)
} catch (error) {
const headers = new Headers({ "Content-Type": "application/json" })
noCacheHeaders(headers)
return new Response(JSON.stringify({
error: "worker_proxy_refresh_failed",
message: error instanceof Error ? error.message : "unknown_error",
}), { status: 502, headers })
}
if (!baseUrl) {
const headers = new Headers({ "Content-Type": "application/json" })
noCacheHeaders(headers)
return new Response(JSON.stringify({ error: "worker_proxy_unavailable" }), {
status: 404,
headers,
})
}
let upstream: Response
try {
upstream = await fetch(targetUrl(baseUrl, request.url, workerId), {
method: request.method,
headers: stripProxyHeaders(request.headers),
body: request.method === "GET" || request.method === "HEAD" ? undefined : await request.arrayBuffer(),
redirect: "manual",
})
} catch (error) {
const headers = new Headers({ "Content-Type": "application/json" })
noCacheHeaders(headers)
return new Response(JSON.stringify({
error: "worker_proxy_upstream_failed",
message: error instanceof Error ? error.message : "unknown_error",
}), { status: 502, headers })
}
const headers = new Headers(upstream.headers)
headers.delete("content-length")
noCacheHeaders(headers)
return new Response(upstream.body, {
status: upstream.status,
headers,
})
}
app.all("*", async (c) => {
const requestUrl = new URL(c.req.url)
if (requestUrl.pathname === "/") {
return Response.redirect("https://openworklabs.com", 302)
}
const segments = requestUrl.pathname.split("/").filter(Boolean)
const workerId = segments[0]?.trim()
if (!workerId) {
const headers = new Headers({ "Content-Type": "application/json" })
noCacheHeaders(headers)
return new Response(JSON.stringify({ error: "worker_id_required" }), {
status: 400,
headers,
})
}
try {
return proxyRequest(normalizeDenTypeId("worker", workerId), c.req.raw)
} catch {
const headers = new Headers({ "Content-Type": "application/json" })
noCacheHeaders(headers)
return new Response(JSON.stringify({ error: "worker_not_found" }), {
status: 404,
headers,
})
}
})
export default app

View File

@@ -0,0 +1,67 @@
import { z } from "zod"
const EnvSchema = z.object({
DATABASE_URL: z.string().min(1).optional(),
DATABASE_HOST: z.string().min(1).optional(),
DATABASE_USERNAME: z.string().min(1).optional(),
DATABASE_PASSWORD: z.string().optional(),
DB_MODE: z.enum(["mysql", "planetscale"]).optional(),
PORT: z.string().optional(),
DAYTONA_API_URL: z.string().optional(),
DAYTONA_API_KEY: z.string().optional(),
DAYTONA_TARGET: z.string().optional(),
DAYTONA_OPENWORK_PORT: z.string().optional(),
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(),
}).superRefine((value, ctx) => {
const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale")
if (inferredMode === "mysql" && !value.DATABASE_URL) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "DATABASE_URL is required when using mysql mode",
path: ["DATABASE_URL"],
})
}
if (inferredMode === "planetscale") {
for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) {
if (!value[key]) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `${key} is required when using planetscale mode`,
path: [key],
})
}
}
}
})
const parsed = EnvSchema.parse(process.env)
const planetscaleCredentials =
parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined
? {
host: parsed.DATABASE_HOST,
username: parsed.DATABASE_USERNAME,
password: parsed.DATABASE_PASSWORD,
}
: null
function optionalString(value: string | undefined) {
const trimmed = value?.trim()
return trimmed ? trimmed : undefined
}
export const env = {
databaseUrl: parsed.DATABASE_URL,
dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"),
planetscale: planetscaleCredentials,
port: Number(parsed.PORT ?? "8789"),
daytona: {
apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api",
apiKey: optionalString(parsed.DAYTONA_API_KEY),
target: optionalString(parsed.DAYTONA_TARGET),
openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"),
signedPreviewExpiresSeconds: Number(parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400"),
},
}

View File

@@ -0,0 +1,3 @@
import app from "./app.js"
export default app

View File

@@ -0,0 +1,42 @@
import { existsSync } from "node:fs"
import path from "node:path"
import { fileURLToPath } from "node:url"
import dotenv from "dotenv"
function findUpwards(startDir: string, fileName: string, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = path.join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = path.dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const srcDir = path.dirname(fileURLToPath(import.meta.url))
const serviceDir = path.resolve(srcDir, "..")
for (const filePath of [path.join(serviceDir, ".env.local"), path.join(serviceDir, ".env")]) {
if (existsSync(filePath)) {
dotenv.config({ path: filePath, override: false })
}
}
const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim()
const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona")
const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath
if (daytonaEnvPath && existsSync(daytonaEnvPath)) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
dotenv.config({ override: false })

View File

@@ -0,0 +1,7 @@
import { serve } from "@hono/node-server"
import app from "./app.js"
import { env } from "./env.js"
serve({ fetch: app.fetch, port: env.port }, (info) => {
console.log(`worker proxy listening on ${info.port}`)
})

View File

@@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "Bundler",
"rootDir": "src",
"outDir": "dist",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"resolveJsonModule": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,34 @@
FROM node:22-bookworm-slim
ARG OPENWORK_ORCHESTRATOR_VERSION=0.11.151
ARG OPENCODE_VERSION=1.2.6
ARG OPENCODE_DOWNLOAD_URL=
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates curl tar unzip \
&& rm -rf /var/lib/apt/lists/*
RUN npm install -g "openwork-orchestrator@${OPENWORK_ORCHESTRATOR_VERSION}"
RUN set -eux; \
arch="$(dpkg --print-architecture)"; \
case "$arch" in \
amd64) asset="opencode-linux-x64-baseline.tar.gz" ;; \
arm64) asset="opencode-linux-arm64.tar.gz" ;; \
*) echo "unsupported architecture: $arch" >&2; exit 1 ;; \
esac; \
url="$OPENCODE_DOWNLOAD_URL"; \
if [ -z "$url" ]; then \
url="https://github.com/anomalyco/opencode/releases/download/v${OPENCODE_VERSION}/${asset}"; \
fi; \
tmpdir="$(mktemp -d)"; \
curl -fsSL "$url" -o "$tmpdir/$asset"; \
tar -xzf "$tmpdir/$asset" -C "$tmpdir"; \
binary="$(find "$tmpdir" -type f -name opencode | head -n 1)"; \
test -n "$binary"; \
install -m 0755 "$binary" /usr/local/bin/opencode; \
rm -rf "$tmpdir"
RUN openwork --version && opencode --version
CMD ["sleep", "infinity"]

View File

@@ -1,5 +1,9 @@
DATABASE_URL=mysql://root:password@127.0.0.1:3306/openwork_den
BETTER_AUTH_SECRET=local-dev-secret-not-for-production-use!!
DATABASE_URL=
DATABASE_HOST=
DATABASE_USERNAME=
DATABASE_PASSWORD=
DB_MODE=
BETTER_AUTH_SECRET=
BETTER_AUTH_URL=http://localhost:8788
DEN_BETTER_AUTH_TRUSTED_ORIGINS=http://localhost:3005,http://localhost:5173
GITHUB_CLIENT_ID=
@@ -7,8 +11,10 @@ GITHUB_CLIENT_SECRET=
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
PORT=8788
WORKER_PROXY_PORT=8789
CORS_ORIGINS=http://localhost:3005,http://localhost:5173
PROVISIONER_MODE=stub
OPENWORK_DAYTONA_ENV_PATH=
WORKER_URL_TEMPLATE=https://workers.example.com/{workerId}
RENDER_API_BASE=https://api.render.com/v1
RENDER_API_KEY=
@@ -37,3 +43,31 @@ POLAR_PRODUCT_ID=
POLAR_BENEFIT_ID=
POLAR_SUCCESS_URL=http://localhost:8788
POLAR_RETURN_URL=http://localhost:8788
DAYTONA_API_URL=https://app.daytona.io/api
DAYTONA_API_KEY=
DAYTONA_TARGET=
DAYTONA_SNAPSHOT=
DAYTONA_SANDBOX_IMAGE=node:20-bookworm
DAYTONA_SANDBOX_CPU=2
DAYTONA_SANDBOX_MEMORY=4
DAYTONA_SANDBOX_DISK=8
DAYTONA_SANDBOX_PUBLIC=false
DAYTONA_SANDBOX_AUTO_STOP_INTERVAL=0
DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL=10080
DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL=-1
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS=86400
DAYTONA_WORKER_PROXY_BASE_URL=https://workers.den.openworklabs
DAYTONA_SANDBOX_NAME_PREFIX=den-daytona-worker
DAYTONA_VOLUME_NAME_PREFIX=den-daytona-worker
DAYTONA_WORKSPACE_MOUNT_PATH=/workspace
DAYTONA_DATA_MOUNT_PATH=/persist/openwork
DAYTONA_RUNTIME_WORKSPACE_PATH=/tmp/openwork-workspace
DAYTONA_RUNTIME_DATA_PATH=/tmp/openwork-data
DAYTONA_SIDECAR_DIR=/tmp/openwork-sidecars
DAYTONA_OPENWORK_PORT=8787
DAYTONA_OPENCODE_PORT=4096
DAYTONA_OPENWORK_VERSION=
DAYTONA_CREATE_TIMEOUT_SECONDS=300
DAYTONA_DELETE_TIMEOUT_SECONDS=120
DAYTONA_HEALTHCHECK_TIMEOUT_MS=300000
DAYTONA_POLL_INTERVAL_MS=5000

View File

@@ -5,7 +5,7 @@ This guide explains how agents should operate, test, and troubleshoot the Den se
## What this service does
- Handles auth (`/api/auth/*`) and session lookup (`/v1/me`).
- Creates workers (`/v1/workers`) and provisions cloud workers on Render.
- Creates workers (`/v1/workers`) and provisions cloud workers on Render or Daytona.
- Optionally enforces a Polar paywall for cloud worker creation.
## Core flows to test
@@ -23,10 +23,10 @@ Expected: all succeed with `200`.
Set `POLAR_FEATURE_GATE_ENABLED=false`.
1. `POST /v1/workers` with `destination="cloud"`
2. Confirm `instance.provider="render"`
2. Confirm `instance.provider` matches the configured cloud provisioner (`render` or `daytona`)
3. Poll `instance.url + "/health"`
Expected: worker creation `201`, worker health `200`.
Expected: worker creation `202`, worker health `200` after async provisioning finishes.
### 3) Cloud worker flow (paywall enabled)
@@ -45,13 +45,14 @@ For an entitled user (has the required Polar benefit):
1. `POST /v1/workers` with `destination="cloud"`
Expected: worker creation `201` with Render-backed instance.
Expected: worker creation `202` with a healthy cloud-backed instance once provisioning completes.
## Required env vars (summary)
- Base: `DATABASE_URL`, `BETTER_AUTH_SECRET`, `BETTER_AUTH_URL`
- Optional social auth: `GITHUB_CLIENT_ID`, `GITHUB_CLIENT_SECRET`, `GOOGLE_CLIENT_ID`, `GOOGLE_CLIENT_SECRET`
- Render: `PROVISIONER_MODE=render`, `RENDER_API_KEY`, `RENDER_OWNER_ID`, and `RENDER_WORKER_*`
- Daytona: `PROVISIONER_MODE=daytona`, `DAYTONA_API_KEY`, and optional `DAYTONA_*` sizing/mount settings
- Polar gate:
- `POLAR_FEATURE_GATE_ENABLED`
- `POLAR_ACCESS_TOKEN`
@@ -66,10 +67,10 @@ Expected: worker creation `201` with Render-backed instance.
- `.github/workflows/deploy-den.yml`
It updates Render env vars and triggers a deploy for the configured service ID.
It updates Render env vars and triggers a deploy for the configured service ID. Daytona is intended for local/dev worker testing unless you build a separate hosted Den deployment path for it.
## Common failure modes
- `provisioning_failed`: Render deploy failed or health check timed out.
- `provisioning_failed`: Render deploy failed, Daytona sandbox boot failed, or worker health check timed out.
- `payment_required`: Polar gate is enabled and user does not have the required benefit.
- startup error: paywall enabled but missing Polar env vars.

View File

@@ -36,8 +36,10 @@ The script prints the exact URLs and `docker compose ... down` command to use fo
- `GOOGLE_CLIENT_ID` optional OAuth app client ID for Google sign-in
- `GOOGLE_CLIENT_SECRET` optional OAuth app client secret for Google sign-in
- `PORT` server port
- `CORS_ORIGINS` comma-separated list of trusted browser origins for Express CORS
- `PROVISIONER_MODE` `stub` or `render`
<<<<<<< HEAD
- `CORS_ORIGINS` comma-separated list of trusted browser origins (used for Better Auth origin validation + Express CORS)
- `PROVISIONER_MODE` `stub`, `render`, or `daytona`
- `OPENWORK_DAYTONA_ENV_PATH` optional path to a shared `.env.daytona` file; when unset, Den searches upwards from the repo for `.env.daytona`
- `WORKER_URL_TEMPLATE` template string with `{workerId}`
- `RENDER_API_BASE` Render API base URL (default `https://api.render.com/v1`)
- `RENDER_API_KEY` Render API key (required for `PROVISIONER_MODE=render`)
@@ -66,6 +68,79 @@ The script prints the exact URLs and `docker compose ... down` command to use fo
- `POLAR_BENEFIT_ID` Polar benefit ID required to unlock cloud workers (required when paywall enabled)
- `POLAR_SUCCESS_URL` redirect URL after successful checkout (required when paywall enabled)
- `POLAR_RETURN_URL` return URL shown in checkout (required when paywall enabled)
- Daytona:
- `DAYTONA_API_KEY` API key used to create sandboxes and volumes
- `DAYTONA_API_URL` Daytona API base URL (default `https://app.daytona.io/api`)
- `DAYTONA_TARGET` optional Daytona region/target
- `DAYTONA_SNAPSHOT` optional snapshot name; if omitted Den creates workers from `DAYTONA_SANDBOX_IMAGE`
- `DAYTONA_SANDBOX_IMAGE` sandbox base image when no snapshot is provided (default `node:20-bookworm`)
- `DAYTONA_SANDBOX_CPU`, `DAYTONA_SANDBOX_MEMORY`, `DAYTONA_SANDBOX_DISK` resource sizing when image-backed sandboxes are used
- `DAYTONA_SANDBOX_AUTO_STOP_INTERVAL`, `DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL`, `DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL` lifecycle controls
- `DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS` TTL for the signed OpenWork preview URL returned to Den clients (Daytona currently caps this at 24 hours)
- `DAYTONA_SANDBOX_NAME_PREFIX`, `DAYTONA_VOLUME_NAME_PREFIX` resource naming prefixes
- `DAYTONA_WORKSPACE_MOUNT_PATH`, `DAYTONA_DATA_MOUNT_PATH` volume mount paths inside the sandbox
- `DAYTONA_RUNTIME_WORKSPACE_PATH`, `DAYTONA_RUNTIME_DATA_PATH`, `DAYTONA_SIDECAR_DIR` local sandbox paths used for the live OpenWork runtime; the mounted Daytona volumes are linked into the runtime workspace under `volumes/`
- `DAYTONA_OPENWORK_PORT`, `DAYTONA_OPENCODE_PORT` ports used when launching `openwork serve`
- `DAYTONA_OPENWORK_VERSION` optional npm version to install instead of latest `openwork-orchestrator`
- `DAYTONA_CREATE_TIMEOUT_SECONDS`, `DAYTONA_DELETE_TIMEOUT_SECONDS`, `DAYTONA_HEALTHCHECK_TIMEOUT_MS`, `DAYTONA_POLL_INTERVAL_MS` provisioning timeouts
For local Daytona development, place your Daytona API credentials in `/_repos/openwork/.env.daytona` and Den will pick them up automatically, including from task worktrees.
## Building a Daytona snapshot
If you want Daytona workers to start from a prebuilt runtime instead of a generic base image, create a snapshot and point Den at it.
The snapshot builder for this repo lives at:
- `scripts/create-daytona-openwork-snapshot.sh`
- `services/den-worker-runtime/Dockerfile.daytona-snapshot`
It builds a Linux image with:
- `openwork-orchestrator`
- `opencode`
Prerequisites:
- Docker running locally
- Daytona CLI installed and logged in
- a valid `.env.daytona` with at least `DAYTONA_API_KEY`
From the OpenWork repo root:
```bash
./scripts/create-daytona-openwork-snapshot.sh
```
To publish a custom-named snapshot:
```bash
./scripts/create-daytona-openwork-snapshot.sh openwork-runtime
```
Useful optional overrides:
- `DAYTONA_SNAPSHOT_NAME`
- `DAYTONA_SNAPSHOT_REGION`
- `DAYTONA_SNAPSHOT_CPU`
- `DAYTONA_SNAPSHOT_MEMORY`
- `DAYTONA_SNAPSHOT_DISK`
- `OPENWORK_ORCHESTRATOR_VERSION`
- `OPENCODE_VERSION`
After the snapshot is pushed, set it in `.env.daytona`:
```env
DAYTONA_SNAPSHOT=openwork-runtime
```
Then start Den in Daytona mode:
```bash
DEN_PROVISIONER_MODE=daytona packaging/docker/den-dev-up.sh
```
If you do not set `DAYTONA_SNAPSHOT`, Den falls back to `DAYTONA_SANDBOX_IMAGE` and installs runtime dependencies at sandbox startup.
## Auth setup (Better Auth)
@@ -80,6 +155,9 @@ Apply migrations:
```bash
pnpm db:generate
pnpm db:migrate
# or use the SQL migration runner used by Docker
pnpm db:migrate:sql
```
## API
@@ -96,7 +174,7 @@ pnpm db:migrate
- Includes latest instance metadata when available.
- `POST /v1/workers/:id/tokens`
- `DELETE /v1/workers/:id`
- Deletes worker records and attempts to suspend the backing cloud service when destination is `cloud`.
- Deletes worker records and attempts to tear down the backing cloud runtime when destination is `cloud`.
## CI deployment (dev == prod)

View File

@@ -1,4 +1,4 @@
import "dotenv/config"
import "./src/load-env.ts"
import { defineConfig } from "drizzle-kit"
export default defineConfig({

View File

@@ -1,3 +1,13 @@
CREATE TABLE `admin_allowlist` (
`id` varchar(64) NOT NULL,
`email` varchar(255) NOT NULL,
`note` varchar(255),
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `admin_allowlist_id` PRIMARY KEY(`id`),
CONSTRAINT `admin_allowlist_email` UNIQUE(`email`)
);
--> statement-breakpoint
CREATE TABLE `audit_event` (
`id` varchar(64) NOT NULL,
`org_id` varchar(64) NOT NULL,
@@ -11,30 +21,30 @@ CREATE TABLE `audit_event` (
--> statement-breakpoint
CREATE TABLE `account` (
`id` varchar(64) NOT NULL,
`userId` varchar(64) NOT NULL,
`accountId` varchar(255) NOT NULL,
`providerId` varchar(255) NOT NULL,
`accessToken` text,
`refreshToken` text,
`accessTokenExpiresAt` timestamp(3),
`refreshTokenExpiresAt` timestamp(3),
`scope` varchar(1024),
`idToken` text,
`password` varchar(512),
`createdAt` timestamp(3) NOT NULL DEFAULT (now()),
`updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`user_id` varchar(64) NOT NULL,
`account_id` text NOT NULL,
`provider_id` text NOT NULL,
`access_token` text,
`refresh_token` text,
`access_token_expires_at` timestamp(3),
`refresh_token_expires_at` timestamp(3),
`scope` text,
`id_token` text,
`password` text,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `account_id` PRIMARY KEY(`id`)
);
--> statement-breakpoint
CREATE TABLE `session` (
`id` varchar(64) NOT NULL,
`userId` varchar(64) NOT NULL,
`user_id` varchar(64) NOT NULL,
`token` varchar(255) NOT NULL,
`expiresAt` timestamp(3) NOT NULL,
`ipAddress` varchar(255),
`userAgent` varchar(1024),
`createdAt` timestamp(3) NOT NULL DEFAULT (now()),
`updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`expires_at` timestamp(3) NOT NULL,
`ip_address` text,
`user_agent` text,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `session_id` PRIMARY KEY(`id`),
CONSTRAINT `session_token` UNIQUE(`token`)
);
@@ -43,10 +53,10 @@ CREATE TABLE `user` (
`id` varchar(64) NOT NULL,
`name` varchar(255) NOT NULL,
`email` varchar(255) NOT NULL,
`emailVerified` boolean NOT NULL DEFAULT false,
`image` varchar(2048),
`createdAt` timestamp(3) NOT NULL DEFAULT (now()),
`updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`email_verified` boolean NOT NULL DEFAULT false,
`image` text,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `user_id` PRIMARY KEY(`id`),
CONSTRAINT `user_email` UNIQUE(`email`)
);
@@ -54,13 +64,29 @@ CREATE TABLE `user` (
CREATE TABLE `verification` (
`id` varchar(64) NOT NULL,
`identifier` varchar(255) NOT NULL,
`value` varchar(1024) NOT NULL,
`expiresAt` timestamp(3) NOT NULL,
`createdAt` timestamp(3) NOT NULL DEFAULT (now()),
`updatedAt` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`value` text NOT NULL,
`expires_at` timestamp(3) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `verification_id` PRIMARY KEY(`id`)
);
--> statement-breakpoint
CREATE TABLE `daytona_sandbox` (
`id` varchar(64) NOT NULL,
`worker_id` varchar(64) NOT NULL,
`sandbox_id` varchar(128) NOT NULL,
`workspace_volume_id` varchar(128) NOT NULL,
`data_volume_id` varchar(128) NOT NULL,
`signed_preview_url` varchar(2048) NOT NULL,
`signed_preview_url_expires_at` timestamp(3) NOT NULL,
`region` varchar(64),
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `daytona_sandbox_id` PRIMARY KEY(`id`),
CONSTRAINT `daytona_sandbox_worker_id` UNIQUE(`worker_id`),
CONSTRAINT `daytona_sandbox_sandbox_id` UNIQUE(`sandbox_id`)
);
--> statement-breakpoint
CREATE TABLE `org_membership` (
`id` varchar(64) NOT NULL,
`org_id` varchar(64) NOT NULL,
@@ -105,6 +131,7 @@ CREATE TABLE `worker_instance` (
CREATE TABLE `worker` (
`id` varchar(64) NOT NULL,
`org_id` varchar(64) NOT NULL,
`created_by_user_id` varchar(64),
`name` varchar(255) NOT NULL,
`description` varchar(1024),
`destination` enum('local','cloud') NOT NULL,
@@ -130,10 +157,8 @@ CREATE TABLE `worker_token` (
--> statement-breakpoint
CREATE INDEX `audit_event_org_id` ON `audit_event` (`org_id`);--> statement-breakpoint
CREATE INDEX `audit_event_worker_id` ON `audit_event` (`worker_id`);--> statement-breakpoint
CREATE INDEX `account_user_id` ON `account` (`userId`);--> statement-breakpoint
CREATE INDEX `account_provider_id` ON `account` (`providerId`);--> statement-breakpoint
CREATE INDEX `account_account_id` ON `account` (`accountId`);--> statement-breakpoint
CREATE INDEX `session_user_id` ON `session` (`userId`);--> statement-breakpoint
CREATE INDEX `account_user_id` ON `account` (`user_id`);--> statement-breakpoint
CREATE INDEX `session_user_id` ON `session` (`user_id`);--> statement-breakpoint
CREATE INDEX `verification_identifier` ON `verification` (`identifier`);--> statement-breakpoint
CREATE INDEX `org_membership_org_id` ON `org_membership` (`org_id`);--> statement-breakpoint
CREATE INDEX `org_membership_user_id` ON `org_membership` (`user_id`);--> statement-breakpoint
@@ -141,5 +166,6 @@ CREATE INDEX `org_owner_user_id` ON `org` (`owner_user_id`);--> statement-breakp
CREATE INDEX `worker_bundle_worker_id` ON `worker_bundle` (`worker_id`);--> statement-breakpoint
CREATE INDEX `worker_instance_worker_id` ON `worker_instance` (`worker_id`);--> statement-breakpoint
CREATE INDEX `worker_org_id` ON `worker` (`org_id`);--> statement-breakpoint
CREATE INDEX `worker_created_by_user_id` ON `worker` (`created_by_user_id`);--> statement-breakpoint
CREATE INDEX `worker_status` ON `worker` (`status`);--> statement-breakpoint
CREATE INDEX `worker_token_worker_id` ON `worker_token` (`worker_id`);
CREATE INDEX `worker_token_worker_id` ON `worker_token` (`worker_id`);

View File

@@ -1,65 +0,0 @@
DROP TABLE IF EXISTS `account`;
--> statement-breakpoint
DROP TABLE IF EXISTS `session`;
--> statement-breakpoint
DROP TABLE IF EXISTS `verification`;
--> statement-breakpoint
DROP TABLE IF EXISTS `user`;
--> statement-breakpoint
CREATE TABLE `user` (
`id` varchar(36) NOT NULL,
`name` varchar(255) NOT NULL,
`email` varchar(255) NOT NULL,
`email_verified` boolean NOT NULL DEFAULT false,
`image` text,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `user_id` PRIMARY KEY(`id`),
CONSTRAINT `user_email` UNIQUE(`email`)
);
--> statement-breakpoint
CREATE TABLE `session` (
`id` varchar(36) NOT NULL,
`expires_at` timestamp(3) NOT NULL,
`token` varchar(255) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`ip_address` text,
`user_agent` text,
`user_id` varchar(36) NOT NULL,
CONSTRAINT `session_id` PRIMARY KEY(`id`),
CONSTRAINT `session_token` UNIQUE(`token`)
);
--> statement-breakpoint
CREATE INDEX `session_user_id` ON `session` (`user_id`);
--> statement-breakpoint
CREATE TABLE `account` (
`id` varchar(36) NOT NULL,
`account_id` text NOT NULL,
`provider_id` text NOT NULL,
`user_id` varchar(36) NOT NULL,
`access_token` text,
`refresh_token` text,
`id_token` text,
`access_token_expires_at` timestamp(3),
`refresh_token_expires_at` timestamp(3),
`scope` text,
`password` text,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `account_id` PRIMARY KEY(`id`)
);
--> statement-breakpoint
CREATE INDEX `account_user_id` ON `account` (`user_id`);
--> statement-breakpoint
CREATE TABLE `verification` (
`id` varchar(36) NOT NULL,
`identifier` varchar(255) NOT NULL,
`value` text NOT NULL,
`expires_at` timestamp(3) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT (now()),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `verification_id` PRIMARY KEY(`id`)
);
--> statement-breakpoint
CREATE INDEX `verification_identifier` ON `verification` (`identifier`);

View File

@@ -1,3 +0,0 @@
ALTER TABLE `worker` ADD `created_by_user_id` varchar(64);
--> statement-breakpoint
CREATE INDEX `worker_created_by_user_id` ON `worker` (`created_by_user_id`);

View File

@@ -1,9 +0,0 @@
CREATE TABLE `admin_allowlist` (
`id` varchar(64) NOT NULL,
`email` varchar(255) NOT NULL,
`note` varchar(255),
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `admin_allowlist_id` PRIMARY KEY(`id`),
CONSTRAINT `admin_allowlist_email` UNIQUE(`email`)
);

File diff suppressed because it is too large Load Diff

View File

@@ -5,29 +5,8 @@
{
"idx": 0,
"version": "5",
"when": 1771638056482,
"tag": "0000_tense_lilandra",
"breakpoints": true
},
{
"idx": 1,
"version": "5",
"when": 1771639607782,
"tag": "0001_auth_columns_fix",
"breakpoints": true
},
{
"idx": 2,
"version": "5",
"when": 1771741800000,
"tag": "0002_worker_created_by",
"breakpoints": true
},
{
"idx": 3,
"version": "5",
"when": 1773353100000,
"tag": "0003_admin_allowlist",
"when": 1773705583301,
"tag": "0000_baseline",
"breakpoints": true
},
{
@@ -38,4 +17,4 @@
"breakpoints": true
}
]
}
}

View File

@@ -3,15 +3,20 @@
"private": true,
"type": "module",
"scripts": {
"dev": "OPENWORK_DEV_MODE=1 tsx watch src/index.ts",
"build": "tsc -p tsconfig.json",
"dev": "npm run build:den-db && OPENWORK_DEV_MODE=1 tsx watch src/index.ts",
"build": "npm run build:den-db && tsc -p tsconfig.json",
"build:den-db": "npm --prefix ../../packages/den-db run build",
"start": "node dist/index.js",
"db:migrate:sql": "node scripts/run-sql-migrations.mjs",
"test:smoke:daytona": "pnpm build && node scripts/daytona-provisioner-smoke.mjs",
"test:e2e:daytona": "node scripts/e2e-daytona-worker.mjs",
"test:e2e:worker-limit": "node scripts/e2e-worker-limit.mjs",
"db:generate": "drizzle-kit generate",
"db:migrate": "drizzle-kit migrate",
"auth:generate": "npx @better-auth/cli@latest generate --config src/auth.ts --output src/db/better-auth.schema.ts --yes"
},
"dependencies": {
"@daytonaio/sdk": "^0.150.0",
"better-auth": "^1.4.18",
"cors": "^2.8.5",
"dotenv": "^16.4.5",

View File

@@ -0,0 +1,129 @@
import { randomUUID } from "node:crypto"
import { existsSync } from "node:fs"
import { dirname, join, resolve } from "node:path"
import { fileURLToPath } from "node:url"
import { setTimeout as delay } from "node:timers/promises"
import dotenv from "dotenv"
import { Daytona } from "@daytonaio/sdk"
const __dirname = dirname(fileURLToPath(import.meta.url))
const serviceDir = resolve(__dirname, "..")
const repoRoot = resolve(serviceDir, "..", "..")
function findUpwards(startDir, fileName, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona")
if (daytonaEnvPath) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
process.env.DATABASE_URL ||= "mysql://unused"
process.env.BETTER_AUTH_SECRET ||= "openwork-daytona-local-secret-000000000"
process.env.BETTER_AUTH_URL ||= "http://127.0.0.1"
process.env.CORS_ORIGINS ||= "http://127.0.0.1"
process.env.PROVISIONER_MODE ||= "daytona"
function log(message, detail) {
if (detail === undefined) {
console.log(message)
return
}
console.log(message, detail)
}
function fail(message, detail) {
if (detail !== undefined) {
console.error(message, detail)
} else {
console.error(message)
}
process.exit(1)
}
async function waitForCleanup(daytona, workerId, attempts = 24) {
for (let index = 0; index < attempts; index += 1) {
const sandboxes = await daytona.list(
{
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
},
1,
20,
)
if (sandboxes.items.length === 0) {
return
}
await delay(5000)
}
throw new Error(`cleanup_timeout:${workerId}`)
}
async function main() {
if (!process.env.DAYTONA_API_KEY) {
fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the smoke test.")
}
const { provisionWorker, deprovisionWorker } = await import("../dist/workers/provisioner.js")
const workerId = randomUUID()
const clientToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "")
const hostToken = randomUUID().replaceAll("-", "") + randomUUID().replaceAll("-", "")
const instance = await provisionWorker({
workerId,
name: "daytona-smoke",
hostToken,
clientToken,
})
log("Provisioned Daytona worker", instance)
const workspacesResponse = await fetch(`${instance.url.replace(/\/$/, "")}/workspaces`, {
headers: {
Accept: "application/json",
Authorization: `Bearer ${clientToken}`,
},
})
const workspacesPayload = await workspacesResponse.text()
if (!workspacesResponse.ok) {
fail("Worker /workspaces check failed", {
status: workspacesResponse.status,
body: workspacesPayload,
})
}
log("Worker /workspaces responded", workspacesPayload)
await deprovisionWorker({
workerId,
instanceUrl: instance.url,
})
const daytona = new Daytona({
apiKey: process.env.DAYTONA_API_KEY,
apiUrl: process.env.DAYTONA_API_URL,
...(process.env.DAYTONA_TARGET ? { target: process.env.DAYTONA_TARGET } : {}),
})
await waitForCleanup(daytona, workerId)
log("Daytona worker cleanup completed", workerId)
}
main().catch((error) => {
fail(error instanceof Error ? error.message : String(error))
})

View File

@@ -0,0 +1,489 @@
import { randomUUID } from "node:crypto"
import { once } from "node:events"
import { existsSync } from "node:fs"
import net from "node:net"
import { dirname, join, resolve } from "node:path"
import { fileURLToPath } from "node:url"
import { setTimeout as delay } from "node:timers/promises"
import { spawn } from "node:child_process"
import dotenv from "dotenv"
import mysql from "mysql2/promise"
import { Daytona } from "@daytonaio/sdk"
const __dirname = dirname(fileURLToPath(import.meta.url))
const serviceDir = resolve(__dirname, "..")
const repoRoot = resolve(serviceDir, "..", "..")
function log(message) {
process.stdout.write(`${message}\n`)
}
function fail(message, detail) {
if (detail !== undefined) {
console.error(message, detail)
} else {
console.error(message)
}
process.exit(1)
}
function findUpwards(startDir, fileName, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const daytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim() || findUpwards(repoRoot, ".env.daytona")
if (daytonaEnvPath) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
function slug(value) {
return value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "")
}
function workerHint(workerId) {
return workerId.replace(/-/g, "").slice(0, 12)
}
function sandboxLabels(workerId) {
return {
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
}
}
function workspaceVolumeName(workerId) {
const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker"
return slug(`${prefix}-${workerHint(workerId)}-workspace`).slice(0, 63)
}
function dataVolumeName(workerId) {
const prefix = process.env.DAYTONA_VOLUME_NAME_PREFIX || "den-daytona-worker"
return slug(`${prefix}-${workerHint(workerId)}-data`).slice(0, 63)
}
async function getFreePort() {
return await new Promise((resolvePort, reject) => {
const server = net.createServer()
server.listen(0, "127.0.0.1", () => {
const address = server.address()
if (!address || typeof address === "string") {
reject(new Error("failed_to_resolve_free_port"))
return
}
server.close((error) => (error ? reject(error) : resolvePort(address.port)))
})
server.on("error", reject)
})
}
function spawnCommand(command, args, options = {}) {
return spawn(command, args, {
cwd: serviceDir,
env: process.env,
stdio: "pipe",
...options,
})
}
async function runCommand(command, args, options = {}) {
const child = spawnCommand(command, args, options)
let stdout = ""
let stderr = ""
child.stdout?.on("data", (chunk) => {
stdout += chunk.toString()
})
child.stderr?.on("data", (chunk) => {
stderr += chunk.toString()
})
const [code] = await once(child, "exit")
if (code !== 0) {
throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`)
}
return { stdout, stderr }
}
async function waitForMysqlConnection(databaseUrl, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
try {
const connection = await mysql.createConnection(databaseUrl)
await connection.query("SELECT 1")
await connection.end()
return
} catch {
await delay(1000)
}
}
throw new Error("mysql_not_ready")
}
async function waitForHttp(url, attempts = 60, intervalMs = 500) {
for (let index = 0; index < attempts; index += 1) {
try {
const response = await fetch(url)
if (response.ok) {
return response
}
} catch {
// ignore until retries are exhausted
}
await delay(intervalMs)
}
throw new Error(`http_not_ready:${url}`)
}
async function waitForWorkerReady(baseUrl, workerId, auth, attempts = 180) {
for (let index = 0; index < attempts; index += 1) {
const result = await requestJson(baseUrl, `/v1/workers/${workerId}`, auth)
if (result.response.ok && result.payload?.instance?.url && result.payload?.worker?.status === "healthy") {
return result.payload
}
await delay(5000)
}
throw new Error(`worker_not_ready:${workerId}`)
}
async function waitForDaytonaCleanup(daytona, workerId, attempts = 60) {
for (let index = 0; index < attempts; index += 1) {
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
const volumes = await daytona.volume.list()
const remainingVolumes = volumes.filter((volume) =>
[workspaceVolumeName(workerId), dataVolumeName(workerId)].includes(volume.name),
)
if (sandboxes.items.length === 0 && remainingVolumes.length === 0) {
return
}
await delay(5000)
}
throw new Error(`daytona_cleanup_incomplete:${workerId}`)
}
async function forceDeleteDaytonaResources(daytona, workerId) {
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
for (const sandbox of sandboxes.items) {
await sandbox.delete(120).catch(() => {})
}
const volumes = await daytona.volume.list()
for (const volumeName of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) {
const volume = volumes.find((entry) => entry.name === volumeName)
if (volume) {
await daytona.volume.delete(volume).catch(() => {})
}
}
}
function extractAuthToken(payload) {
if (!payload || typeof payload !== "object") {
return null
}
if (typeof payload.token === "string" && payload.token.trim()) {
return payload.token
}
if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") {
return payload.session.token
}
return null
}
async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) {
const headers = new Headers()
const origin = process.env.DEN_BROWSER_ORIGIN?.trim() || new URL(baseUrl).origin
headers.set("Accept", "application/json")
headers.set("Origin", origin)
headers.set("Referer", `${origin}/`)
if (body !== undefined) {
headers.set("Content-Type", "application/json")
}
if (token) {
headers.set("Authorization", `Bearer ${token}`)
}
if (cookie) {
headers.set("Cookie", cookie)
}
const response = await fetch(`${baseUrl}${path}`, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
})
const text = await response.text()
let payload = null
if (text) {
try {
payload = JSON.parse(text)
} catch {
payload = text
}
}
return {
response,
payload,
cookie: response.headers.get("set-cookie"),
}
}
async function main() {
if (!process.env.DAYTONA_API_KEY) {
fail("DAYTONA_API_KEY is required. Add it to .env.daytona or export it before running the test.")
}
const existingBaseUrl = process.env.DEN_BASE_URL?.trim() || process.env.DEN_API_URL?.trim() || ""
const mysqlPort = existingBaseUrl ? null : await getFreePort()
const appPort = existingBaseUrl ? null : await getFreePort()
const containerName = existingBaseUrl
? null
: `openwork-den-daytona-${randomUUID().slice(0, 8)}`
const dbName = "openwork_den_daytona_e2e"
const dbPassword = "openwork-root"
const baseUrl = existingBaseUrl || `http://127.0.0.1:${appPort}`
const databaseUrl = mysqlPort
? `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}`
: null
const runtimeEnv = {
...process.env,
...(databaseUrl ? { DATABASE_URL: databaseUrl } : {}),
BETTER_AUTH_SECRET: "openwork-den-daytona-secret-0000000000",
BETTER_AUTH_URL: baseUrl,
...(appPort ? { PORT: String(appPort) } : {}),
CORS_ORIGINS: baseUrl,
PROVISIONER_MODE: "daytona",
POLAR_FEATURE_GATE_ENABLED: "false",
OPENWORK_DAYTONA_ENV_PATH: daytonaEnvPath || process.env.OPENWORK_DAYTONA_ENV_PATH || "",
}
const daytona = new Daytona({
apiKey: runtimeEnv.DAYTONA_API_KEY,
apiUrl: runtimeEnv.DAYTONA_API_URL,
...(runtimeEnv.DAYTONA_TARGET ? { target: runtimeEnv.DAYTONA_TARGET } : {}),
})
let serviceProcess = null
let workerId = null
const cleanup = async () => {
if (workerId) {
try {
await forceDeleteDaytonaResources(daytona, workerId)
} catch {
// cleanup best effort only
}
}
if (serviceProcess && !serviceProcess.killed) {
serviceProcess.kill("SIGINT")
await once(serviceProcess, "exit").catch(() => {})
}
if (containerName) {
await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {})
}
}
process.on("SIGINT", async () => {
await cleanup()
process.exit(130)
})
try {
if (containerName && mysqlPort && databaseUrl && appPort) {
log("Starting disposable MySQL container...")
await runCommand("docker", [
"run",
"-d",
"--rm",
"--name",
containerName,
"-e",
`MYSQL_ROOT_PASSWORD=${dbPassword}`,
"-e",
`MYSQL_DATABASE=${dbName}`,
"-p",
`${mysqlPort}:3306`,
"mysql:8.4",
])
log("Waiting for MySQL...")
await waitForMysqlConnection(databaseUrl)
log("Running Den migrations...")
await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env: runtimeEnv })
log("Starting Den service with Daytona provisioner...")
serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], {
cwd: serviceDir,
env: runtimeEnv,
stdio: "pipe",
})
let serviceOutput = ""
serviceProcess.stdout?.on("data", (chunk) => {
serviceOutput += chunk.toString()
})
serviceProcess.stderr?.on("data", (chunk) => {
serviceOutput += chunk.toString()
})
serviceProcess.on("exit", (code) => {
if (code !== 0) {
console.error(serviceOutput)
}
})
} else {
log(`Using existing Den API at ${baseUrl}`)
}
await waitForHttp(`${baseUrl}/health`)
const email = `den-daytona-${Date.now()}@example.com`
const password = "TestPass123!"
log("Creating account...")
const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", {
method: "POST",
body: {
name: "Den Daytona E2E",
email,
password,
},
})
if (!signup.response.ok) {
fail("Signup failed", signup.payload)
}
const token = extractAuthToken(signup.payload)
const cookie = signup.cookie
if (!token && !cookie) {
fail("Signup did not return a bearer token or session cookie", signup.payload)
}
const auth = { token, cookie }
log("Validating authenticated session...")
const me = await requestJson(baseUrl, "/v1/me", auth)
if (!me.response.ok) {
fail("Session lookup failed", me.payload)
}
log("Creating Daytona-backed cloud worker...")
const createWorker = await requestJson(baseUrl, "/v1/workers", {
method: "POST",
...auth,
body: {
name: "daytona-worker",
destination: "cloud",
},
})
if (createWorker.response.status !== 202) {
fail("Worker creation did not return async launch", {
status: createWorker.response.status,
payload: createWorker.payload,
})
}
workerId = createWorker.payload?.worker?.id || null
if (!workerId) {
fail("Worker response did not include an id", createWorker.payload)
}
log("Waiting for worker provisioning to finish...")
const workerPayload = await waitForWorkerReady(baseUrl, workerId, auth)
if (workerPayload.instance.provider !== "daytona") {
fail("Worker instance did not report the Daytona provider", workerPayload)
}
log("Checking worker health endpoint...")
await waitForHttp(`${workerPayload.instance.url.replace(/\/$/, "")}/health`, 120, 5000)
log("Checking OpenWork connect metadata...")
const tokensResponse = await requestJson(baseUrl, `/v1/workers/${workerId}/tokens`, {
method: "POST",
...auth,
})
if (!tokensResponse.response.ok || !tokensResponse.payload?.connect?.openworkUrl) {
fail("Worker tokens/connect payload missing", tokensResponse.payload)
}
const clientToken = tokensResponse.payload.tokens?.client
if (!clientToken) {
fail("Client token missing from worker token payload", tokensResponse.payload)
}
const connectHeaders = {
Accept: "application/json",
Authorization: `Bearer ${clientToken}`,
}
const statusResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/status`, {
headers: connectHeaders,
})
if (!statusResponse.ok) {
fail("Connected worker /status failed", await statusResponse.text())
}
const capabilitiesResponse = await fetch(`${tokensResponse.payload.connect.openworkUrl}/capabilities`, {
headers: connectHeaders,
})
if (!capabilitiesResponse.ok) {
fail("Connected worker /capabilities failed", await capabilitiesResponse.text())
}
log("Verifying Daytona resources exist...")
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
if (sandboxes.items.length === 0) {
fail("Expected a Daytona sandbox for the worker but none were found")
}
const volumes = await daytona.volume.list()
const expectedVolumeNames = [workspaceVolumeName(workerId), dataVolumeName(workerId)]
const missingVolumes = expectedVolumeNames.filter(
(name) => !volumes.some((volume) => volume.name === name),
)
if (missingVolumes.length > 0) {
fail("Expected Daytona volumes were not created", missingVolumes)
}
log("Deleting worker and waiting for Daytona cleanup...")
const deleteResponse = await requestJson(baseUrl, `/v1/workers/${workerId}`, {
method: "DELETE",
...auth,
})
if (deleteResponse.response.status !== 204) {
fail("Worker deletion failed", {
status: deleteResponse.response.status,
payload: deleteResponse.payload,
})
}
await waitForDaytonaCleanup(daytona, workerId)
workerId = null
log("Daytona worker flow passed.")
} finally {
await cleanup()
}
}
main().catch((error) => {
fail(error instanceof Error ? error.message : String(error))
})

View File

@@ -165,6 +165,7 @@ async function main() {
BETTER_AUTH_SECRET: "openwork-den-e2e-secret-000000000000",
BETTER_AUTH_URL: baseUrl,
PORT: String(appPort),
OPENWORK_DEV_MODE: "1",
CORS_ORIGINS: baseUrl,
PROVISIONER_MODE: "stub",
WORKER_URL_TEMPLATE: "https://workers.example.com/{workerId}",
@@ -306,15 +307,15 @@ async function main() {
},
});
if (secondWorker.response.status !== 409) {
fail("Second worker was not blocked by the one-worker limit", {
if (secondWorker.response.status !== 202) {
fail("Second worker should be allowed in dev mode", {
status: secondWorker.response.status,
payload: secondWorker.payload,
});
}
if (!secondWorker.payload || secondWorker.payload.error !== "worker_limit_reached") {
fail("Second worker returned the wrong error payload", secondWorker.payload);
if (!secondWorker.payload?.worker?.id) {
fail("Second worker did not return a worker payload", secondWorker.payload);
}
log("Listing workers...");
@@ -324,11 +325,11 @@ async function main() {
}
const items = Array.isArray(workers.payload?.workers) ? workers.payload.workers : null;
if (!items || items.length !== 1) {
fail("Expected exactly one worker after limit enforcement", workers.payload);
if (!items || items.length !== 2) {
fail("Expected two cloud workers in dev mode", workers.payload);
}
log("E2E worker limit check passed.");
log("E2E dev worker limit check passed.");
} finally {
await cleanup();
}

View File

@@ -0,0 +1,87 @@
import { readdir, readFile } from "node:fs/promises"
import path from "node:path"
import { fileURLToPath } from "node:url"
import mysql from "mysql2/promise"
const __dirname = path.dirname(fileURLToPath(import.meta.url))
const drizzleDir = path.resolve(__dirname, "..", "drizzle")
function splitStatements(sql) {
return sql
.split(/--> statement-breakpoint/g)
.map((part) => part.trim())
.filter(Boolean)
}
async function ensureMigrationsTable(connection) {
await connection.query(`
CREATE TABLE IF NOT EXISTS __den_migrations (
id varchar(255) NOT NULL PRIMARY KEY,
applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
)
`)
}
async function appliedMigrations(connection) {
const [rows] = await connection.query("SELECT id FROM __den_migrations")
return new Set(rows.map((row) => row.id))
}
function connectionConfigFromEnv() {
const databaseUrl = process.env.DATABASE_URL?.trim()
if (databaseUrl) {
return databaseUrl
}
const host = process.env.DATABASE_HOST?.trim()
const user = process.env.DATABASE_USERNAME?.trim()
const password = process.env.DATABASE_PASSWORD ?? ""
if (!host || !user) {
throw new Error("DATABASE_URL or DATABASE_HOST/DATABASE_USERNAME/DATABASE_PASSWORD is required")
}
return {
host,
user,
password,
ssl: {
rejectUnauthorized: true,
},
}
}
async function run() {
const connection = await mysql.createConnection(connectionConfigFromEnv())
try {
await ensureMigrationsTable(connection)
const completed = await appliedMigrations(connection)
const files = (await readdir(drizzleDir))
.filter((file) => file.endsWith(".sql"))
.sort((left, right) => left.localeCompare(right))
for (const file of files) {
if (completed.has(file)) {
continue
}
const sql = await readFile(path.join(drizzleDir, file), "utf8")
const statements = splitStatements(sql)
for (const statement of statements) {
await connection.query(statement)
}
await connection.query("INSERT INTO __den_migrations (id) VALUES (?)", [file])
process.stdout.write(`[den] Applied migration ${file}\n`)
}
} finally {
await connection.end()
}
}
run().catch((error) => {
console.error(error instanceof Error ? error.stack ?? error.message : String(error))
process.exit(1)
})

View File

@@ -1,34 +1,37 @@
import { inArray, sql } from "drizzle-orm"
import { sql } from "./db/drizzle.js"
import { db } from "./db/index.js"
import { AdminAllowlistTable } from "./db/schema.js"
import { createDenTypeId } from "./db/typeid.js"
const ADMIN_ALLOWLIST_SEEDS = [
{
id: "admin-ben-openworklabs-com",
email: "ben@openworklabs.com",
note: "Seeded internal admin",
},
{
id: "admin-berk-openworklabs-com",
email: "jan@openworklabs.com",
note: "Seeded internal admin",
},
{
email: "omar@openworklabs.com",
note: "Seeded internal admin",
},
{
email: "berk@openworklabs.com",
note: "Seeded internal admin",
},
] as const
const MANAGED_ADMIN_ALLOWLIST_IDS = [
"admin-ben-openworklabs-com",
"admin-jan-openworklabs-com",
"admin-omar-openworklabs-com",
"admin-berk-openworklabs-com",
] as const
let ensureAdminAllowlistSeededPromise: Promise<void> | null = null
async function seedAdminAllowlist() {
for (const entry of ADMIN_ALLOWLIST_SEEDS) {
await db
.insert(AdminAllowlistTable)
.values(entry)
.values({
id: createDenTypeId("adminAllowlist"),
...entry,
})
.onDuplicateKeyUpdate({
set: {
note: entry.note,
@@ -36,13 +39,6 @@ async function seedAdminAllowlist() {
},
})
}
const activeSeedIds = new Set<string>(ADMIN_ALLOWLIST_SEEDS.map((entry) => entry.id))
const staleSeedIds = MANAGED_ADMIN_ALLOWLIST_IDS.filter((id) => !activeSeedIds.has(id))
if (staleSeedIds.length > 0) {
await db.delete(AdminAllowlistTable).where(inArray(AdminAllowlistTable.id, staleSeedIds))
}
}
export async function ensureAdminAllowlistSeeded() {

View File

@@ -2,6 +2,7 @@ import { betterAuth } from "better-auth"
import { drizzleAdapter } from "better-auth/adapters/drizzle"
import { db } from "./db/index.js"
import * as schema from "./db/schema.js"
import { createDenTypeId, normalizeDenTypeId } from "./db/typeid.js"
import { env } from "./env.js"
import { ensureDefaultOrg } from "./orgs.js"
@@ -33,6 +34,24 @@ export const auth = betterAuth({
provider: "mysql",
schema,
}),
advanced: {
database: {
generateId: (options) => {
switch (options.model) {
case "user":
return createDenTypeId("user")
case "session":
return createDenTypeId("session")
case "account":
return createDenTypeId("account")
case "verification":
return createDenTypeId("verification")
default:
return false
}
},
},
},
emailAndPassword: {
enabled: true,
},
@@ -41,7 +60,7 @@ export const auth = betterAuth({
create: {
after: async (user) => {
const name = user.name ?? user.email ?? "Personal"
await ensureDefaultOrg(user.id, name)
await ensureDefaultOrg(normalizeDenTypeId("user", user.id), name)
},
},
},

View File

@@ -0,0 +1 @@
export { and, asc, desc, eq, gt, isNotNull, isNull, sql } from "../../../../packages/den-db/dist/drizzle.js"

View File

@@ -1,129 +1,9 @@
import { drizzle } from "drizzle-orm/mysql2"
import type { FieldPacket, QueryOptions, QueryResult } from "mysql2"
import mysql from "mysql2/promise"
import { createDenDb, isTransientDbConnectionError } from "../../../../packages/den-db/dist/index.js"
import { env } from "../env.js"
import * as schema from "./schema.js"
const TRANSIENT_DB_ERROR_CODES = new Set([
"ECONNRESET",
"EPIPE",
"ETIMEDOUT",
"PROTOCOL_CONNECTION_LOST",
"PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR",
])
const RETRYABLE_QUERY_PREFIXES = ["select", "show", "describe", "explain"]
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
}
function getErrorCode(error: unknown): string | null {
if (!isRecord(error)) {
return null
}
if (typeof error.code === "string") {
return error.code
}
return getErrorCode(error.cause)
}
function isTransientDbConnectionError(error: unknown): boolean {
const code = getErrorCode(error)
if (!code) {
return false
}
return TRANSIENT_DB_ERROR_CODES.has(code)
}
function extractSql(value: unknown): string | null {
if (typeof value === "string") {
return value
}
if (!isRecord(value)) {
return null
}
if (typeof value.sql === "string") {
return value.sql
}
return null
}
function isRetryableReadQuery(sql: string | null): boolean {
if (!sql) {
return false
}
const normalized = sql.trimStart().toLowerCase()
return RETRYABLE_QUERY_PREFIXES.some((prefix) => normalized.startsWith(prefix))
}
async function retryReadQuery<T>(label: "query" | "execute", sql: string | null, run: () => Promise<T>): Promise<T> {
try {
return await run()
} catch (error) {
if (!isRetryableReadQuery(sql) || !isTransientDbConnectionError(error)) {
throw error
}
const queryType = sql?.trimStart().split(/\s+/, 1)[0]?.toUpperCase() ?? "QUERY"
console.warn(`[db] transient mysql error on ${label} (${queryType}); retrying once`)
return run()
}
}
const client = mysql.createPool({
uri: env.databaseUrl,
waitForConnections: true,
connectionLimit: 10,
maxIdle: 10,
idleTimeout: 60_000,
queueLimit: 0,
enableKeepAlive: true,
keepAliveInitialDelay: 0,
export const { db } = createDenDb({
databaseUrl: env.databaseUrl,
mode: env.dbMode,
planetscale: env.planetscale,
})
const query = client.query.bind(client)
async function retryingQuery<T extends QueryResult>(sql: string): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(sql: string, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(options: QueryOptions): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(
options: QueryOptions,
values: unknown,
): Promise<[T, FieldPacket[]]>
async function retryingQuery<T extends QueryResult>(
sqlOrOptions: string | QueryOptions,
values?: unknown,
): Promise<[T, FieldPacket[]]> {
const sql = extractSql(sqlOrOptions)
return retryReadQuery("query", sql, () => query<T>(sqlOrOptions as never, values as never))
}
client.query = retryingQuery
const execute = client.execute.bind(client)
async function retryingExecute<T extends QueryResult>(sql: string): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(sql: string, values: unknown): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(options: QueryOptions): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(
options: QueryOptions,
values: unknown,
): Promise<[T, FieldPacket[]]>
async function retryingExecute<T extends QueryResult>(
sqlOrOptions: string | QueryOptions,
values?: unknown,
): Promise<[T, FieldPacket[]]> {
const sql = extractSql(sqlOrOptions)
return retryReadQuery("execute", sql, () => execute<T>(sqlOrOptions as never, values as never))
}
client.execute = retryingExecute
export const db = drizzle(client, { schema, mode: "default" })
export { isTransientDbConnectionError }

View File

@@ -1,230 +1 @@
import { sql } from "drizzle-orm"
import {
boolean,
index,
json,
mysqlEnum,
mysqlTable,
text,
timestamp,
uniqueIndex,
varchar,
} from "drizzle-orm/mysql-core"
const id = () => varchar("id", { length: 64 }).notNull()
const timestamps = {
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updated_at: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
}
export const OrgRole = ["owner", "member"] as const
export const WorkerDestination = ["local", "cloud"] as const
export const WorkerStatus = ["provisioning", "healthy", "failed", "stopped"] as const
export const TokenScope = ["client", "host"] as const
export const AuthUserTable = mysqlTable(
"user",
{
id: varchar("id", { length: 36 }).notNull().primaryKey(),
name: varchar("name", { length: 255 }).notNull(),
email: varchar("email", { length: 255 }).notNull(),
emailVerified: boolean("email_verified").notNull().default(false),
image: text("image"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [uniqueIndex("user_email").on(table.email)],
)
export const AuthSessionTable = mysqlTable(
"session",
{
id: varchar("id", { length: 36 }).notNull().primaryKey(),
userId: varchar("user_id", { length: 36 }).notNull(),
token: varchar("token", { length: 255 }).notNull(),
expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(),
ipAddress: text("ip_address"),
userAgent: text("user_agent"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
uniqueIndex("session_token").on(table.token),
index("session_user_id").on(table.userId),
],
)
export const AuthAccountTable = mysqlTable(
"account",
{
id: varchar("id", { length: 36 }).notNull().primaryKey(),
userId: varchar("user_id", { length: 36 }).notNull(),
accountId: text("account_id").notNull(),
providerId: text("provider_id").notNull(),
accessToken: text("access_token"),
refreshToken: text("refresh_token"),
accessTokenExpiresAt: timestamp("access_token_expires_at", { fsp: 3 }),
refreshTokenExpiresAt: timestamp("refresh_token_expires_at", { fsp: 3 }),
scope: text("scope"),
idToken: text("id_token"),
password: text("password"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [index("account_user_id").on(table.userId)],
)
export const AuthVerificationTable = mysqlTable(
"verification",
{
id: varchar("id", { length: 36 }).notNull().primaryKey(),
identifier: varchar("identifier", { length: 255 }).notNull(),
value: text("value").notNull(),
expiresAt: timestamp("expires_at", { fsp: 3 }).notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 })
.notNull()
.default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [index("verification_identifier").on(table.identifier)],
)
export const user = AuthUserTable
export const session = AuthSessionTable
export const account = AuthAccountTable
export const verification = AuthVerificationTable
export const DesktopHandoffGrantTable = mysqlTable(
"desktop_handoff_grant",
{
id: id().primaryKey(),
user_id: varchar("user_id", { length: 64 }).notNull(),
session_token: text("session_token").notNull(),
expires_at: timestamp("expires_at", { fsp: 3 }).notNull(),
consumed_at: timestamp("consumed_at", { fsp: 3 }),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("desktop_handoff_grant_user_id").on(table.user_id), index("desktop_handoff_grant_expires_at").on(table.expires_at)],
)
export const OrgTable = mysqlTable(
"org",
{
id: id().primaryKey(),
name: varchar("name", { length: 255 }).notNull(),
slug: varchar("slug", { length: 255 }).notNull(),
owner_user_id: varchar("owner_user_id", { length: 64 }).notNull(),
...timestamps,
},
(table) => [uniqueIndex("org_slug").on(table.slug), index("org_owner_user_id").on(table.owner_user_id)],
)
export const OrgMembershipTable = mysqlTable(
"org_membership",
{
id: id().primaryKey(),
org_id: varchar("org_id", { length: 64 }).notNull(),
user_id: varchar("user_id", { length: 64 }).notNull(),
role: mysqlEnum("role", OrgRole).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("org_membership_org_id").on(table.org_id), index("org_membership_user_id").on(table.user_id)],
)
export const AdminAllowlistTable = mysqlTable(
"admin_allowlist",
{
id: id().primaryKey(),
email: varchar("email", { length: 255 }).notNull(),
note: varchar("note", { length: 255 }),
...timestamps,
},
(table) => [uniqueIndex("admin_allowlist_email").on(table.email)],
)
export const WorkerTable = mysqlTable(
"worker",
{
id: id().primaryKey(),
org_id: varchar("org_id", { length: 64 }).notNull(),
created_by_user_id: varchar("created_by_user_id", { length: 64 }),
name: varchar("name", { length: 255 }).notNull(),
description: varchar("description", { length: 1024 }),
destination: mysqlEnum("destination", WorkerDestination).notNull(),
status: mysqlEnum("status", WorkerStatus).notNull(),
image_version: varchar("image_version", { length: 128 }),
workspace_path: varchar("workspace_path", { length: 1024 }),
sandbox_backend: varchar("sandbox_backend", { length: 64 }),
...timestamps,
},
(table) => [
index("worker_org_id").on(table.org_id),
index("worker_created_by_user_id").on(table.created_by_user_id),
index("worker_status").on(table.status),
],
)
export const WorkerInstanceTable = mysqlTable(
"worker_instance",
{
id: id().primaryKey(),
worker_id: varchar("worker_id", { length: 64 }).notNull(),
provider: varchar("provider", { length: 64 }).notNull(),
region: varchar("region", { length: 64 }),
url: varchar("url", { length: 2048 }).notNull(),
status: mysqlEnum("status", WorkerStatus).notNull(),
...timestamps,
},
(table) => [index("worker_instance_worker_id").on(table.worker_id)],
)
export const WorkerTokenTable = mysqlTable(
"worker_token",
{
id: id().primaryKey(),
worker_id: varchar("worker_id", { length: 64 }).notNull(),
scope: mysqlEnum("scope", TokenScope).notNull(),
token: varchar("token", { length: 128 }).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
revoked_at: timestamp("revoked_at", { fsp: 3 }),
},
(table) => [
index("worker_token_worker_id").on(table.worker_id),
uniqueIndex("worker_token_token").on(table.token),
],
)
export const WorkerBundleTable = mysqlTable(
"worker_bundle",
{
id: id().primaryKey(),
worker_id: varchar("worker_id", { length: 64 }).notNull(),
storage_url: varchar("storage_url", { length: 2048 }).notNull(),
status: varchar("status", { length: 64 }).notNull(),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("worker_bundle_worker_id").on(table.worker_id)],
)
export const AuditEventTable = mysqlTable(
"audit_event",
{
id: id().primaryKey(),
org_id: varchar("org_id", { length: 64 }).notNull(),
worker_id: varchar("worker_id", { length: 64 }),
actor_user_id: varchar("actor_user_id", { length: 64 }).notNull(),
action: varchar("action", { length: 128 }).notNull(),
payload: json("payload"),
created_at: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [index("audit_event_org_id").on(table.org_id), index("audit_event_worker_id").on(table.worker_id)],
)
export * from "../../../../packages/den-db/dist/schema.js"

View File

@@ -0,0 +1 @@
export * from "../../../../packages/utils/dist/typeid.js"

View File

@@ -1,7 +1,11 @@
import { z } from "zod";
const schema = z.object({
DATABASE_URL: z.string().min(1),
DATABASE_URL: z.string().min(1).optional(),
DATABASE_HOST: z.string().min(1).optional(),
DATABASE_USERNAME: z.string().min(1).optional(),
DATABASE_PASSWORD: z.string().optional(),
DB_MODE: z.enum(["mysql", "planetscale"]).optional(),
BETTER_AUTH_SECRET: z.string().min(32),
BETTER_AUTH_URL: z.string().min(1),
DEN_BETTER_AUTH_TRUSTED_ORIGINS: z.string().optional(),
@@ -10,9 +14,12 @@ const schema = z.object({
GOOGLE_CLIENT_ID: z.string().optional(),
GOOGLE_CLIENT_SECRET: z.string().optional(),
PORT: z.string().optional(),
WORKER_PROXY_PORT: z.string().optional(),
OPENWORK_DEV_MODE: z.string().optional(),
CORS_ORIGINS: z.string().optional(),
PROVISIONER_MODE: z.enum(["stub", "render"]).optional(),
PROVISIONER_MODE: z.enum(["stub", "render", "daytona"]).optional(),
WORKER_URL_TEMPLATE: z.string().optional(),
OPENWORK_DAYTONA_ENV_PATH: z.string().optional(),
RENDER_API_BASE: z.string().optional(),
RENDER_API_KEY: z.string().optional(),
RENDER_OWNER_ID: z.string().optional(),
@@ -40,10 +47,65 @@ const schema = z.object({
POLAR_BENEFIT_ID: z.string().optional(),
POLAR_SUCCESS_URL: z.string().optional(),
POLAR_RETURN_URL: z.string().optional(),
DAYTONA_API_URL: z.string().optional(),
DAYTONA_API_KEY: z.string().optional(),
DAYTONA_TARGET: z.string().optional(),
DAYTONA_SNAPSHOT: z.string().optional(),
DAYTONA_SANDBOX_IMAGE: z.string().optional(),
DAYTONA_SANDBOX_CPU: z.string().optional(),
DAYTONA_SANDBOX_MEMORY: z.string().optional(),
DAYTONA_SANDBOX_DISK: z.string().optional(),
DAYTONA_SANDBOX_PUBLIC: z.string().optional(),
DAYTONA_SANDBOX_AUTO_STOP_INTERVAL: z.string().optional(),
DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL: z.string().optional(),
DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL: z.string().optional(),
DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS: z.string().optional(),
DAYTONA_WORKER_PROXY_BASE_URL: z.string().optional(),
DAYTONA_SANDBOX_NAME_PREFIX: z.string().optional(),
DAYTONA_VOLUME_NAME_PREFIX: z.string().optional(),
DAYTONA_WORKSPACE_MOUNT_PATH: z.string().optional(),
DAYTONA_DATA_MOUNT_PATH: z.string().optional(),
DAYTONA_RUNTIME_WORKSPACE_PATH: z.string().optional(),
DAYTONA_RUNTIME_DATA_PATH: z.string().optional(),
DAYTONA_SIDECAR_DIR: z.string().optional(),
DAYTONA_OPENWORK_PORT: z.string().optional(),
DAYTONA_OPENCODE_PORT: z.string().optional(),
DAYTONA_OPENWORK_VERSION: z.string().optional(),
DAYTONA_CREATE_TIMEOUT_SECONDS: z.string().optional(),
DAYTONA_DELETE_TIMEOUT_SECONDS: z.string().optional(),
DAYTONA_HEALTHCHECK_TIMEOUT_MS: z.string().optional(),
DAYTONA_POLL_INTERVAL_MS: z.string().optional(),
}).superRefine((value, ctx) => {
const inferredMode = value.DB_MODE ?? (value.DATABASE_URL ? "mysql" : "planetscale")
if (inferredMode === "mysql" && !value.DATABASE_URL) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "DATABASE_URL is required when using mysql mode",
path: ["DATABASE_URL"],
})
}
if (inferredMode === "planetscale") {
for (const key of ["DATABASE_HOST", "DATABASE_USERNAME", "DATABASE_PASSWORD"] as const) {
if (!value[key]) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `${key} is required when using planetscale mode`,
path: [key],
})
}
}
}
});
const parsed = schema.parse(process.env);
function optionalString(value: string | undefined): string | undefined {
const trimmed = value?.trim();
return trimmed ? trimmed : undefined;
}
function normalizeOrigin(origin: string): string {
const value = origin.trim();
if (value === "*") {
@@ -66,11 +128,26 @@ const betterAuthTrustedOrigins =
const polarFeatureGateEnabled =
(parsed.POLAR_FEATURE_GATE_ENABLED ?? "false").toLowerCase() === "true";
const daytonaSandboxPublic =
(parsed.DAYTONA_SANDBOX_PUBLIC ?? "false").toLowerCase() === "true";
const planetscaleCredentials =
parsed.DATABASE_HOST && parsed.DATABASE_USERNAME && parsed.DATABASE_PASSWORD !== undefined
? {
host: parsed.DATABASE_HOST,
username: parsed.DATABASE_USERNAME,
password: parsed.DATABASE_PASSWORD,
}
: null
export const env = {
databaseUrl: parsed.DATABASE_URL,
dbMode: parsed.DB_MODE ?? (parsed.DATABASE_URL ? "mysql" : "planetscale"),
planetscale: planetscaleCredentials,
betterAuthSecret: parsed.BETTER_AUTH_SECRET,
betterAuthUrl: parsed.BETTER_AUTH_URL,
betterAuthTrustedOrigins,
devMode: (parsed.OPENWORK_DEV_MODE ?? "0").trim() === "1",
github: {
clientId: parsed.GITHUB_CLIENT_ID?.trim() || undefined,
clientSecret: parsed.GITHUB_CLIENT_SECRET?.trim() || undefined,
@@ -80,6 +157,7 @@ export const env = {
clientSecret: parsed.GOOGLE_CLIENT_SECRET?.trim() || undefined,
},
port: Number(parsed.PORT ?? "8788"),
workerProxyPort: Number(parsed.WORKER_PROXY_PORT ?? "8789"),
corsOrigins: corsOrigins ?? [],
provisionerMode: parsed.PROVISIONER_MODE ?? "stub",
workerUrlTemplate: parsed.WORKER_URL_TEMPLATE,
@@ -122,4 +200,54 @@ export const env = {
successUrl: parsed.POLAR_SUCCESS_URL,
returnUrl: parsed.POLAR_RETURN_URL,
},
daytona: {
envPath: optionalString(parsed.OPENWORK_DAYTONA_ENV_PATH),
apiUrl: optionalString(parsed.DAYTONA_API_URL) ?? "https://app.daytona.io/api",
apiKey: optionalString(parsed.DAYTONA_API_KEY),
target: optionalString(parsed.DAYTONA_TARGET),
snapshot: optionalString(parsed.DAYTONA_SNAPSHOT),
image: optionalString(parsed.DAYTONA_SANDBOX_IMAGE) ?? "node:20-bookworm",
resources: {
cpu: Number(parsed.DAYTONA_SANDBOX_CPU ?? "2"),
memory: Number(parsed.DAYTONA_SANDBOX_MEMORY ?? "4"),
disk: Number(parsed.DAYTONA_SANDBOX_DISK ?? "8"),
},
public: daytonaSandboxPublic,
autoStopInterval: Number(parsed.DAYTONA_SANDBOX_AUTO_STOP_INTERVAL ?? "0"),
autoArchiveInterval: Number(
parsed.DAYTONA_SANDBOX_AUTO_ARCHIVE_INTERVAL ?? "10080",
),
autoDeleteInterval: Number(
parsed.DAYTONA_SANDBOX_AUTO_DELETE_INTERVAL ?? "-1",
),
signedPreviewExpiresSeconds: Number(
parsed.DAYTONA_SIGNED_PREVIEW_EXPIRES_SECONDS ?? "86400",
),
workerProxyBaseUrl:
optionalString(parsed.DAYTONA_WORKER_PROXY_BASE_URL) ?? "https://workers.den.openworklabs",
sandboxNamePrefix:
optionalString(parsed.DAYTONA_SANDBOX_NAME_PREFIX) ?? "den-daytona-worker",
volumeNamePrefix:
optionalString(parsed.DAYTONA_VOLUME_NAME_PREFIX) ?? "den-daytona-worker",
workspaceMountPath:
optionalString(parsed.DAYTONA_WORKSPACE_MOUNT_PATH) ?? "/workspace",
dataMountPath:
optionalString(parsed.DAYTONA_DATA_MOUNT_PATH) ?? "/persist/openwork",
runtimeWorkspacePath:
optionalString(parsed.DAYTONA_RUNTIME_WORKSPACE_PATH) ??
"/tmp/openwork-workspace",
runtimeDataPath:
optionalString(parsed.DAYTONA_RUNTIME_DATA_PATH) ?? "/tmp/openwork-data",
sidecarDir:
optionalString(parsed.DAYTONA_SIDECAR_DIR) ?? "/tmp/openwork-sidecars",
openworkPort: Number(parsed.DAYTONA_OPENWORK_PORT ?? "8787"),
opencodePort: Number(parsed.DAYTONA_OPENCODE_PORT ?? "4096"),
openworkVersion: optionalString(parsed.DAYTONA_OPENWORK_VERSION),
createTimeoutSeconds: Number(parsed.DAYTONA_CREATE_TIMEOUT_SECONDS ?? "300"),
deleteTimeoutSeconds: Number(parsed.DAYTONA_DELETE_TIMEOUT_SECONDS ?? "120"),
healthcheckTimeoutMs: Number(
parsed.DAYTONA_HEALTHCHECK_TIMEOUT_MS ?? "300000",
),
pollIntervalMs: Number(parsed.DAYTONA_POLL_INTERVAL_MS ?? "5000"),
},
};

View File

@@ -1,11 +1,15 @@
import express from "express"
import { asc, desc, eq, isNotNull, sql } from "drizzle-orm"
import { fromNodeHeaders } from "better-auth/node"
import { asc, desc, eq, isNotNull, sql } from "../db/drizzle.js"
import { ensureAdminAllowlistSeeded } from "../admin-allowlist.js"
import { auth } from "../auth.js"
import { getCloudWorkerAdminBillingStatus } from "../billing/polar.js"
import { db } from "../db/index.js"
import { AdminAllowlistTable, AuthAccountTable, AuthSessionTable, AuthUserTable, WorkerTable } from "../db/schema.js"
import { normalizeDenTypeId } from "../db/typeid.js"
import { asyncRoute } from "./errors.js"
import { getRequestSession } from "./session.js"
type UserId = typeof AuthUserTable.$inferSelect.id
function normalizeEmail(value: string | null | undefined) {
return value?.trim().toLowerCase() ?? ""
@@ -82,13 +86,17 @@ async function mapWithConcurrency<T, R>(items: T[], limit: number, mapper: (item
}
async function requireAdminSession(req: express.Request, res: express.Response) {
const session = await getRequestSession(req)
const session = await auth.api.getSession({
headers: fromNodeHeaders(req.headers),
})
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return null
}
const userId = normalizeDenTypeId("user", session.user.id)
const email = normalizeEmail(session.user.email)
if (!email) {
res.status(403).json({ error: "admin_email_required" })
@@ -108,7 +116,13 @@ async function requireAdminSession(req: express.Request, res: express.Response)
return null
}
return session
return {
...session,
user: {
...session.user,
id: userId,
},
}
}
export const adminRouter = express.Router()
@@ -155,7 +169,7 @@ adminRouter.get("/overview", asyncRoute(async (req, res) => {
.from(AuthAccountTable),
])
const workerStatsByUser = new Map<string, {
const workerStatsByUser = new Map<UserId, {
workerCount: number
cloudWorkerCount: number
localWorkerCount: number
@@ -175,7 +189,7 @@ adminRouter.get("/overview", asyncRoute(async (req, res) => {
})
}
const sessionStatsByUser = new Map<string, {
const sessionStatsByUser = new Map<UserId, {
sessionCount: number
lastSeenAt: Date | string | null
}>()
@@ -187,7 +201,7 @@ adminRouter.get("/overview", asyncRoute(async (req, res) => {
})
}
const providersByUser = new Map<string, Set<string>>()
const providersByUser = new Map<UserId, Set<string>>()
for (const row of accountRows) {
const providerId = normalizeProvider(row.providerId)
const existing = providersByUser.get(row.userId) ?? new Set<string>()

View File

@@ -1,9 +1,10 @@
import type express from "express"
import { fromNodeHeaders } from "better-auth/node"
import { and, eq, gt } from "drizzle-orm"
import { and, eq, gt } from "../db/drizzle.js"
import { auth } from "../auth.js"
import { db } from "../db/index.js"
import { AuthSessionTable, AuthUserTable } from "../db/schema.js"
import { normalizeDenTypeId } from "../db/typeid.js"
type AuthSessionLike = Awaited<ReturnType<typeof auth.api.getSession>>
@@ -57,7 +58,10 @@ async function getSessionFromBearerToken(token: string): Promise<AuthSessionLike
return {
session: row.session,
user: row.user,
user: {
...row.user,
id: normalizeDenTypeId("user", row.user.id),
},
}
}
@@ -66,7 +70,13 @@ export async function getRequestSession(req: express.Request): Promise<AuthSessi
headers: fromNodeHeaders(req.headers),
})
if (cookieSession?.user?.id) {
return cookieSession
return {
...cookieSession,
user: {
...cookieSession.user,
id: normalizeDenTypeId("user", cookieSession.user.id),
},
}
}
const bearerToken = readBearerToken(req)

View File

@@ -1,16 +1,20 @@
import { randomBytes, randomUUID } from "crypto"
import { randomBytes } from "crypto"
import express from "express"
import { and, asc, desc, eq, isNull } from "drizzle-orm"
import { fromNodeHeaders } from "better-auth/node"
import { and, asc, desc, eq, isNull } from "../db/drizzle.js"
import { z } from "zod"
import { getCloudWorkerBillingStatus, requireCloudWorkerAccess, setCloudWorkerSubscriptionCancellation } from "../billing/polar.js"
import { auth } from "../auth.js"
// Polar billing is temporarily disabled for the one-worker experiment in hosted mode.
// Keep the old billing integration nearby so it can be restored quickly.
// import { getCloudWorkerBillingStatus, setCloudWorkerSubscriptionCancellation } from "../billing/polar.js"
import { db } from "../db/index.js"
import { AuditEventTable, WorkerBundleTable, WorkerInstanceTable, WorkerTable, WorkerTokenTable } from "../db/schema.js"
import { AuditEventTable, AuthUserTable, DaytonaSandboxTable, OrgMembershipTable, WorkerBundleTable, WorkerInstanceTable, WorkerTable, WorkerTokenTable } from "../db/schema.js"
import { env } from "../env.js"
import { asyncRoute, isTransientDbConnectionError } from "./errors.js"
import { getRequestSession } from "./session.js"
import { ensureDefaultOrg, listUserOrgs, resolveUserOrg } from "../orgs.js"
import { ensureDefaultOrg } from "../orgs.js"
import { deprovisionWorker, provisionWorker } from "../workers/provisioner.js"
import { customDomainForWorker } from "../workers/vanity-domain.js"
import { createDenTypeId, normalizeDenTypeId } from "../db/typeid.js"
const createSchema = z.object({
name: z.string().min(1),
@@ -33,6 +37,17 @@ const token = () => randomBytes(32).toString("hex")
type WorkerRow = typeof WorkerTable.$inferSelect
type WorkerInstanceRow = typeof WorkerInstanceTable.$inferSelect
type WorkerId = WorkerRow["id"]
type OrgId = typeof OrgMembershipTable.$inferSelect.org_id
type UserId = typeof AuthUserTable.$inferSelect.id
function parseWorkerIdParam(value: string): WorkerId {
return normalizeDenTypeId("worker", value)
}
function parseUserId(value: string): UserId {
return normalizeDenTypeId("user", value)
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null
@@ -70,14 +85,6 @@ function parseWorkspaceSelection(payload: unknown): { workspaceId: string; openw
}
}
function parseIssuedToken(payload: unknown): string | null {
if (!isRecord(payload)) {
return null
}
const token = typeof payload.token === "string" ? payload.token.trim() : ""
return token || null
}
async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: string) {
const baseUrl = normalizeUrl(instanceUrl)
if (!baseUrl || !clientToken.trim()) {
@@ -108,7 +115,7 @@ async function resolveConnectUrlFromWorker(instanceUrl: string, clientToken: str
}
}
function getConnectUrlCandidates(workerId: string, instanceUrl: string | null) {
function getConnectUrlCandidates(workerId: WorkerId, instanceUrl: string | null) {
const candidates: string[] = []
const vanityHostname = customDomainForWorker(workerId, env.render.workerPublicDomainSuffix)
if (vanityHostname) {
@@ -138,7 +145,7 @@ function queryIncludesFlag(value: unknown): boolean {
return false
}
async function resolveConnectUrlFromCandidates(workerId: string, instanceUrl: string | null, clientToken: string) {
async function resolveConnectUrlFromCandidates(workerId: WorkerId, instanceUrl: string | null, clientToken: string) {
const candidates = getConnectUrlCandidates(workerId, instanceUrl)
for (const candidate of candidates) {
const resolved = await resolveConnectUrlFromWorker(candidate, clientToken)
@@ -149,7 +156,7 @@ async function resolveConnectUrlFromCandidates(workerId: string, instanceUrl: st
return null
}
async function getWorkerRuntimeAccess(workerId: string) {
async function getWorkerRuntimeAccess(workerId: WorkerId) {
const instance = await getLatestWorkerInstance(workerId)
const tokenRows = await db
.select()
@@ -170,7 +177,7 @@ async function getWorkerRuntimeAccess(workerId: string) {
}
async function fetchWorkerRuntimeJson(input: {
workerId: string
workerId: WorkerId
path: string
method?: "GET" | "POST"
body?: unknown
@@ -221,73 +228,36 @@ async function fetchWorkerRuntimeJson(input: {
return { ok: false as const, status: lastStatus, payload: lastPayload }
}
async function issueWorkerOwnerToken(workerId: string): Promise<string> {
const result = await fetchWorkerRuntimeJson({
workerId,
path: "/tokens",
method: "POST",
body: { scope: "owner", label: "Den owner token" },
})
const token = parseIssuedToken(result.payload)
if (result.ok && token) {
return token
}
const message =
isRecord(result.payload) && typeof result.payload.message === "string"
? result.payload.message
: `Owner token request failed with ${result.status}.`
throw new Error(message)
}
async function requireSession(req: express.Request, res: express.Response) {
const session = await getRequestSession(req)
const session = await auth.api.getSession({
headers: fromNodeHeaders(req.headers),
})
if (!session?.user?.id) {
res.status(401).json({ error: "unauthorized" })
return null
}
return session
return {
...session,
user: {
...session.user,
id: parseUserId(session.user.id),
},
}
}
function readRequestedOrgId(req: express.Request): string | null {
const queryValue = typeof req.query.orgId === "string" ? req.query.orgId : ""
if (queryValue.trim()) {
return queryValue.trim()
async function getOrgId(userId: UserId): Promise<OrgId | null> {
const membership = await db
.select()
.from(OrgMembershipTable)
.where(eq(OrgMembershipTable.user_id, userId))
.limit(1)
if (membership.length === 0) {
return null
}
if (isRecord(req.body) && typeof req.body.orgId === "string" && req.body.orgId.trim()) {
return req.body.orgId.trim()
}
return null
return membership[0].org_id
}
async function requireOrgContext(req: express.Request, res: express.Response, userId: string) {
const requestedOrgId = readRequestedOrgId(req)
const org = await resolveUserOrg(userId, requestedOrgId)
if (!org) {
const memberships = await listUserOrgs(userId)
if (memberships.length === 0) {
return null
}
if (requestedOrgId) {
res.status(403).json({
error: "org_forbidden",
message: "You do not have access to that org.",
})
return undefined
}
return memberships[0]
}
return org
}
async function countUserCloudWorkers(userId: string) {
async function countUserCloudWorkers(userId: UserId) {
const rows = await db
.select({ id: WorkerTable.id })
.from(WorkerTable)
@@ -297,7 +267,22 @@ async function countUserCloudWorkers(userId: string) {
return rows.length
}
async function getLatestWorkerInstance(workerId: string) {
function getExperimentBillingSummary() {
return {
featureGateEnabled: false,
hasActivePlan: false,
checkoutRequired: false,
checkoutUrl: null,
portalUrl: null,
price: null,
subscription: null,
invoices: [],
productId: env.polar.productId,
benefitId: env.polar.benefitId,
}
}
async function getLatestWorkerInstance(workerId: WorkerId) {
for (let attempt = 0; attempt < 2; attempt += 1) {
try {
const rows = await db
@@ -359,7 +344,7 @@ function toWorkerResponse(row: WorkerRow, userId: string) {
}
}
async function continueCloudProvisioning(input: { workerId: string; name: string; hostToken: string; clientToken: string }) {
async function continueCloudProvisioning(input: { workerId: WorkerId; name: string; hostToken: string; clientToken: string }) {
try {
const provisioned = await provisionWorker({
workerId: input.workerId,
@@ -374,7 +359,7 @@ async function continueCloudProvisioning(input: { workerId: string; name: string
.where(eq(WorkerTable.id, input.workerId))
await db.insert(WorkerInstanceTable).values({
id: randomUUID(),
id: createDenTypeId("workerInstance"),
worker_id: input.workerId,
provider: provisioned.provider,
region: provisioned.region,
@@ -398,11 +383,8 @@ workersRouter.get("/", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
return
}
if (!org) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.json({ workers: [] })
return
}
@@ -416,7 +398,7 @@ workersRouter.get("/", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(eq(WorkerTable.org_id, org.id))
.where(eq(WorkerTable.org_id, orgId))
.orderBy(desc(WorkerTable.created_at))
.limit(parsed.data.limit)
@@ -448,44 +430,38 @@ workersRouter.post("/", asyncRoute(async (req, res) => {
return
}
if (parsed.data.destination === "cloud" && (await countUserCloudWorkers(session.user.id)) > 0) {
const access = await requireCloudWorkerAccess({
userId: session.user.id,
email: session.user.email ?? `${session.user.id}@placeholder.local`,
name: session.user.name ?? session.user.email ?? "OpenWork User"
if (parsed.data.destination === "cloud" && !env.devMode && (await countUserCloudWorkers(session.user.id)) > 0) {
// Polar is temporarily disabled for this experiment.
// Keep the previous paywall block nearby so it can be restored quickly.
//
// const access = await requireCloudWorkerAccess({
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// })
// if (!access.allowed) {
// res.status(402).json({
// error: "payment_required",
// message: "Additional cloud workers require an active Den Cloud plan.",
// polar: {
// checkoutUrl: access.checkoutUrl,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
// return
// }
res.status(409).json({
error: "worker_limit_reached",
message: "You can only create one cloud worker during this experiment.",
})
if (!access.allowed) {
res.status(402).json({
error: "payment_required",
message: "Additional cloud workers require an active Den Cloud plan.",
polar: {
checkoutUrl: access.checkoutUrl,
productId: env.polar.productId,
benefitId: env.polar.benefitId
}
})
return
}
return
}
const requestedOrgId = readRequestedOrgId(req)
let orgId = requestedOrgId
if (requestedOrgId) {
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
return
}
if (!org) {
res.status(404).json({ error: "org_not_found" })
return
}
orgId = org.id
}
if (!orgId) {
orgId = (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal"))
}
const workerId = randomUUID()
const orgId =
(await getOrgId(session.user.id)) ?? (await ensureDefaultOrg(session.user.id, session.user.name ?? session.user.email ?? "Personal"))
const workerId = createDenTypeId("worker")
let workerStatus: WorkerRow["status"] = parsed.data.destination === "cloud" ? "provisioning" : "healthy"
await db.insert(WorkerTable).values({
@@ -505,13 +481,13 @@ workersRouter.post("/", asyncRoute(async (req, res) => {
const clientToken = token()
await db.insert(WorkerTokenTable).values([
{
id: randomUUID(),
id: createDenTypeId("workerToken"),
worker_id: workerId,
scope: "host",
token: hostToken,
},
{
id: randomUUID(),
id: createDenTypeId("workerToken"),
worker_id: workerId,
scope: "client",
token: clientToken,
@@ -558,32 +534,37 @@ workersRouter.get("/billing", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout)
const includePortalUrl = !queryIncludesFlag(req.query.excludePortal)
const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices)
const billingInput = {
userId: session.user.id,
email: session.user.email ?? `${session.user.id}@placeholder.local`,
name: session.user.name ?? session.user.email ?? "OpenWork User"
}
const billing = await getCloudWorkerBillingStatus(
billingInput,
{
includeCheckoutUrl,
includePortalUrl,
includeInvoices
}
)
res.json({
billing: {
...billing,
productId: env.polar.productId,
benefitId: env.polar.benefitId
}
billing: getExperimentBillingSummary(),
})
// Polar billing is temporarily disabled for the one-worker experiment.
// const includeCheckoutUrl = queryIncludesFlag(req.query.includeCheckout)
// const includePortalUrl = !queryIncludesFlag(req.query.excludePortal)
// const includeInvoices = !queryIncludesFlag(req.query.excludeInvoices)
//
// const billingInput = {
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// }
//
// const billing = await getCloudWorkerBillingStatus(
// billingInput,
// {
// includeCheckoutUrl,
// includePortalUrl,
// includeInvoices,
// },
// )
//
// res.json({
// billing: {
// ...billing,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
}))
workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => {
@@ -596,38 +577,49 @@ workersRouter.post("/billing/subscription", asyncRoute(async (req, res) => {
return
}
const billingInput = {
userId: session.user.id,
email: session.user.email ?? `${session.user.id}@placeholder.local`,
name: session.user.name ?? session.user.email ?? "OpenWork User"
}
const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd)
const billing = await getCloudWorkerBillingStatus(billingInput, {
includeCheckoutUrl: false,
includePortalUrl: true,
includeInvoices: true
})
res.json({
subscription,
billing: {
...billing,
productId: env.polar.productId,
benefitId: env.polar.benefitId
}
subscription: null,
billing: getExperimentBillingSummary(),
})
// Polar billing is temporarily disabled for the one-worker experiment.
// const billingInput = {
// userId: session.user.id,
// email: session.user.email ?? `${session.user.id}@placeholder.local`,
// name: session.user.name ?? session.user.email ?? "OpenWork User",
// }
//
// const subscription = await setCloudWorkerSubscriptionCancellation(billingInput, parsed.data.cancelAtPeriodEnd)
// const billing = await getCloudWorkerBillingStatus(billingInput, {
// includeCheckoutUrl: false,
// includePortalUrl: true,
// includeInvoices: true,
// })
//
// res.json({
// subscription,
// billing: {
// ...billing,
// productId: env.polar.productId,
// benefitId: env.polar.benefitId,
// },
// })
}))
workersRouter.get("/:id", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
if (!org) {
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -635,7 +627,7 @@ workersRouter.get("/:id", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id)))
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
@@ -655,11 +647,16 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
if (!org) {
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -667,10 +664,10 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(eq(WorkerTable.id, req.params.id))
.where(eq(WorkerTable.id, workerId))
.limit(1)
if (rows.length === 0 || rows[0].org_id !== org.id) {
if (rows.length === 0 || rows[0].org_id !== orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -694,24 +691,11 @@ workersRouter.post("/:id/tokens", asyncRoute(async (req, res) => {
const instance = await getLatestWorkerInstance(rows[0].id)
const connect = await resolveConnectUrlFromCandidates(rows[0].id, instance?.url ?? null, clientToken)
let ownerToken: string
try {
ownerToken = await issueWorkerOwnerToken(rows[0].id)
} catch (error) {
res.status(502).json({
error: "worker_owner_token_unavailable",
message: error instanceof Error ? error.message : "Could not mint an owner token for this worker.",
})
return
}
res.json({
tokens: {
host: hostToken,
client: clientToken,
collaborator: clientToken,
owner: ownerToken,
},
connect: connect ?? (instance?.url ? { openworkUrl: instance.url, workspaceId: null } : null),
})
@@ -721,11 +705,16 @@ workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
if (!org) {
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -733,7 +722,7 @@ workersRouter.get("/:id/runtime", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id)))
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
@@ -753,11 +742,16 @@ workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
if (!org) {
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -765,7 +759,7 @@ workersRouter.post("/:id/runtime/upgrade", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id)))
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
@@ -787,11 +781,16 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => {
const session = await requireSession(req, res)
if (!session) return
const org = await requireOrgContext(req, res, session.user.id)
if (org === undefined) {
const orgId = await getOrgId(session.user.id)
if (!orgId) {
res.status(404).json({ error: "worker_not_found" })
return
}
if (!org) {
let workerId: WorkerId
try {
workerId = parseWorkerIdParam(req.params.id)
} catch {
res.status(404).json({ error: "worker_not_found" })
return
}
@@ -799,7 +798,7 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => {
const rows = await db
.select()
.from(WorkerTable)
.where(and(eq(WorkerTable.id, req.params.id), eq(WorkerTable.org_id, org.id)))
.where(and(eq(WorkerTable.id, workerId), eq(WorkerTable.org_id, orgId)))
.limit(1)
if (rows.length === 0) {
@@ -824,6 +823,7 @@ workersRouter.delete("/:id", asyncRoute(async (req, res) => {
await db.transaction(async (tx) => {
await tx.delete(WorkerTokenTable).where(eq(WorkerTokenTable.worker_id, worker.id))
await tx.delete(DaytonaSandboxTable).where(eq(DaytonaSandboxTable.worker_id, worker.id))
await tx.delete(WorkerInstanceTable).where(eq(WorkerInstanceTable.worker_id, worker.id))
await tx.delete(WorkerBundleTable).where(eq(WorkerBundleTable.worker_id, worker.id))
await tx.delete(AuditEventTable).where(eq(AuditEventTable.worker_id, worker.id))

View File

@@ -1,4 +1,4 @@
import "dotenv/config"
import "./load-env.js"
import cors from "cors"
import express from "express"
import path from "node:path"
@@ -11,6 +11,7 @@ import { desktopAuthRouter } from "./http/desktop-auth.js"
import { asyncRoute, errorMiddleware } from "./http/errors.js"
import { getRequestSession } from "./http/session.js"
import { workersRouter } from "./http/workers.js"
import { normalizeDenTypeId } from "./db/typeid.js"
import { listUserOrgs } from "./orgs.js"
const app = express()
@@ -51,7 +52,7 @@ app.get("/v1/me/orgs", asyncRoute(async (req, res) => {
return
}
const orgs = await listUserOrgs(session.user.id)
const orgs = await listUserOrgs(normalizeDenTypeId("user", session.user.id))
res.json({
orgs,
defaultOrgId: orgs[0]?.id ?? null,

View File

@@ -0,0 +1,45 @@
import { existsSync } from "node:fs"
import path from "node:path"
import { fileURLToPath } from "node:url"
import dotenv from "dotenv"
function findUpwards(startDir: string, fileName: string, maxDepth = 8) {
let current = startDir
for (let depth = 0; depth <= maxDepth; depth += 1) {
const candidate = path.join(current, fileName)
if (existsSync(candidate)) {
return candidate
}
const parent = path.dirname(current)
if (parent === current) {
break
}
current = parent
}
return null
}
const srcDir = path.dirname(fileURLToPath(import.meta.url))
const serviceDir = path.resolve(srcDir, "..")
for (const filePath of [
path.join(serviceDir, ".env.local"),
path.join(serviceDir, ".env"),
]) {
if (existsSync(filePath)) {
dotenv.config({ path: filePath, override: false })
}
}
const explicitDaytonaEnvPath = process.env.OPENWORK_DAYTONA_ENV_PATH?.trim()
const detectedDaytonaEnvPath = findUpwards(path.resolve(serviceDir, "..", ".."), ".env.daytona")
const daytonaEnvPath = explicitDaytonaEnvPath || detectedDaytonaEnvPath
if (daytonaEnvPath && existsSync(daytonaEnvPath)) {
dotenv.config({ path: daytonaEnvPath, override: false })
}
dotenv.config({ override: false })

View File

@@ -1,16 +1,12 @@
import { randomUUID } from "crypto"
import { asc, eq } from "drizzle-orm"
import { eq } from "./db/drizzle.js"
import { db } from "./db/index.js"
import { OrgMembershipTable, OrgTable } from "./db/schema.js"
import { AuthUserTable, OrgMembershipTable, OrgTable } from "./db/schema.js"
import { createDenTypeId } from "./db/typeid.js"
export type OrgSummary = {
id: string
name: string
slug: string
role: "owner" | "member"
}
type UserId = typeof AuthUserTable.$inferSelect.id
type OrgId = typeof OrgTable.$inferSelect.id
export async function ensureDefaultOrg(userId: string, name: string) {
export async function ensureDefaultOrg(userId: UserId, name: string): Promise<OrgId> {
const existing = await db
.select()
.from(OrgMembershipTable)
@@ -21,7 +17,7 @@ export async function ensureDefaultOrg(userId: string, name: string) {
return existing[0].org_id
}
const orgId = randomUUID()
const orgId = createDenTypeId("org")
const slug = `personal-${orgId.slice(0, 8)}`
await db.insert(OrgTable).values({
id: orgId,
@@ -30,7 +26,7 @@ export async function ensureDefaultOrg(userId: string, name: string) {
owner_user_id: userId,
})
await db.insert(OrgMembershipTable).values({
id: randomUUID(),
id: createDenTypeId("orgMembership"),
org_id: orgId,
user_id: userId,
role: "owner",
@@ -38,38 +34,32 @@ export async function ensureDefaultOrg(userId: string, name: string) {
return orgId
}
export async function listUserOrgs(userId: string): Promise<OrgSummary[]> {
const rows = await db
export async function listUserOrgs(userId: UserId) {
const memberships = await db
.select({
id: OrgTable.id,
name: OrgTable.name,
slug: OrgTable.slug,
membershipId: OrgMembershipTable.id,
role: OrgMembershipTable.role,
createdAt: OrgTable.created_at,
org: {
id: OrgTable.id,
name: OrgTable.name,
slug: OrgTable.slug,
ownerUserId: OrgTable.owner_user_id,
createdAt: OrgTable.created_at,
updatedAt: OrgTable.updated_at,
},
})
.from(OrgMembershipTable)
.innerJoin(OrgTable, eq(OrgMembershipTable.org_id, OrgTable.id))
.where(eq(OrgMembershipTable.user_id, userId))
.orderBy(asc(OrgTable.created_at))
return rows.map((row) => ({
id: row.id,
name: row.name,
slug: row.slug,
return memberships.map((row) => ({
id: row.org.id,
name: row.org.name,
slug: row.org.slug,
ownerUserId: row.org.ownerUserId,
role: row.role,
membershipId: row.membershipId,
createdAt: row.org.createdAt,
updatedAt: row.org.updatedAt,
}))
}
export async function resolveUserOrg(userId: string, requestedOrgId?: string | null): Promise<OrgSummary | null> {
const orgs = await listUserOrgs(userId)
if (orgs.length === 0) {
return null
}
const requested = requestedOrgId?.trim() ?? ""
if (!requested) {
return orgs[0]
}
return orgs.find((org) => org.id === requested) ?? null
}

View File

@@ -0,0 +1,484 @@
import { Daytona, type Sandbox } from "@daytonaio/sdk"
import { eq } from "../db/drizzle.js"
import { db } from "../db/index.js"
import { DaytonaSandboxTable } from "../db/schema.js"
import { createDenTypeId } from "../db/typeid.js"
import { env } from "../env.js"
type WorkerId = typeof DaytonaSandboxTable.$inferSelect.worker_id
type ProvisionInput = {
workerId: WorkerId
name: string
hostToken: string
clientToken: string
}
type ProvisionedInstance = {
provider: string
url: string
status: "provisioning" | "healthy"
region?: string
}
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
const maxSignedPreviewExpirySeconds = 60 * 60 * 24
const signedPreviewRefreshLeadMs = 5 * 60 * 1000
const slug = (value: string) =>
value
.toLowerCase()
.replace(/[^a-z0-9-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "")
function shellQuote(value: string) {
return `'${value.replace(/'/g, `'"'"'`)}'`
}
function createDaytonaClient() {
return new Daytona({
apiKey: env.daytona.apiKey,
apiUrl: env.daytona.apiUrl,
...(env.daytona.target ? { target: env.daytona.target } : {}),
})
}
function normalizedSignedPreviewExpirySeconds() {
return Math.max(
1,
Math.min(env.daytona.signedPreviewExpiresSeconds, maxSignedPreviewExpirySeconds),
)
}
function signedPreviewRefreshAt(expiresInSeconds: number) {
return new Date(
Date.now() + Math.max(0, expiresInSeconds * 1000 - signedPreviewRefreshLeadMs),
)
}
function workerProxyUrl(workerId: WorkerId) {
return `${env.daytona.workerProxyBaseUrl.replace(/\/+$/, "")}/${encodeURIComponent(workerId)}`
}
function assertDaytonaConfig() {
if (!env.daytona.apiKey) {
throw new Error("DAYTONA_API_KEY is required for daytona provisioner")
}
}
function workerHint(workerId: WorkerId) {
return workerId.replace(/-/g, "").slice(0, 12)
}
function sandboxLabels(workerId: WorkerId) {
return {
"openwork.den.provider": "daytona",
"openwork.den.worker-id": workerId,
}
}
function sandboxName(input: ProvisionInput) {
return slug(
`${env.daytona.sandboxNamePrefix}-${input.name}-${workerHint(input.workerId)}`,
).slice(0, 63)
}
function workspaceVolumeName(workerId: WorkerId) {
return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-workspace`).slice(0, 63)
}
function dataVolumeName(workerId: WorkerId) {
return slug(`${env.daytona.volumeNamePrefix}-${workerHint(workerId)}-data`).slice(0, 63)
}
function buildOpenWorkStartCommand(input: ProvisionInput) {
const orchestratorPackage = env.daytona.openworkVersion?.trim()
? `openwork-orchestrator@${env.daytona.openworkVersion.trim()}`
: "openwork-orchestrator"
const installStep = [
`if ! command -v openwork >/dev/null 2>&1; then npm install -g ${shellQuote(orchestratorPackage)}; fi`,
"if ! command -v opencode >/dev/null 2>&1; then echo 'opencode binary missing from Daytona runtime; bake it into the snapshot image and expose it on PATH' >&2; exit 1; fi",
].join("; ")
const openworkServe = [
"OPENWORK_DATA_DIR=",
shellQuote(env.daytona.runtimeDataPath),
" OPENWORK_SIDECAR_DIR=",
shellQuote(env.daytona.sidecarDir),
" OPENWORK_TOKEN=",
shellQuote(input.clientToken),
" OPENWORK_HOST_TOKEN=",
shellQuote(input.hostToken),
" openwork serve",
` --workspace ${shellQuote(env.daytona.runtimeWorkspacePath)}`,
` --openwork-host 0.0.0.0`,
` --openwork-port ${env.daytona.openworkPort}`,
` --opencode-host 127.0.0.1`,
` --opencode-port ${env.daytona.opencodePort}`,
` --connect-host 127.0.0.1`,
` --cors '*'`,
` --approval manual`,
` --allow-external`,
` --opencode-source external`,
` --opencode-bin $(command -v opencode)`,
` --no-opencode-router`,
` --verbose`,
].join("")
const script = `
set -u
mkdir -p ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(env.daytona.runtimeWorkspacePath)} ${shellQuote(env.daytona.runtimeDataPath)} ${shellQuote(env.daytona.sidecarDir)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes`)}
ln -sfn ${shellQuote(env.daytona.workspaceMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/workspace`) }
ln -sfn ${shellQuote(env.daytona.dataMountPath)} ${shellQuote(`${env.daytona.runtimeWorkspacePath}/volumes/data`) }
${installStep}
attempt=0
while [ "$attempt" -lt 3 ]; do
attempt=$((attempt + 1))
if ${openworkServe}; then
exit 0
fi
status=$?
echo "openwork serve failed (attempt $attempt, exit $status); retrying in 3s"
sleep 3
done
exit 1
`.trim()
return `sh -lc ${shellQuote(script)}`
}
async function waitForVolumeReady(daytona: Daytona, name: string, timeoutMs: number) {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
const volume = await daytona.volume.get(name)
if (volume.state === "ready") {
return volume
}
await sleep(env.daytona.pollIntervalMs)
}
throw new Error(`Timed out waiting for Daytona volume ${name} to become ready`)
}
async function waitForHealth(url: string, timeoutMs: number, sandbox: Sandbox, sessionId: string, commandId: string) {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
try {
const response = await fetch(`${url.replace(/\/$/, "")}/health`, { method: "GET" })
if (response.ok) {
return
}
} catch {
// ignore transient startup failures
}
try {
const command = await sandbox.process.getSessionCommand(sessionId, commandId)
if (typeof command.exitCode === "number" && command.exitCode !== 0) {
const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId)
throw new Error(
[
`openwork session exited with ${command.exitCode}`,
logs.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "",
logs.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "",
]
.filter(Boolean)
.join("\n\n"),
)
}
} catch (error) {
if (error instanceof Error && error.message.startsWith("openwork session exited")) {
throw error
}
}
await sleep(env.daytona.pollIntervalMs)
}
const logs = await sandbox.process.getSessionCommandLogs(sessionId, commandId).catch(
() => null,
)
throw new Error(
[
`Timed out waiting for Daytona worker health at ${url.replace(/\/$/, "")}/health`,
logs?.stdout?.trim() ? `stdout:\n${logs.stdout.trim().slice(-4000)}` : "",
logs?.stderr?.trim() ? `stderr:\n${logs.stderr.trim().slice(-4000)}` : "",
]
.filter(Boolean)
.join("\n\n"),
)
}
async function upsertDaytonaSandbox(input: {
workerId: WorkerId
sandboxId: string
workspaceVolumeId: string
dataVolumeId: string
signedPreviewUrl: string
signedPreviewUrlExpiresAt: Date
region: string | null
}) {
const existing = await db
.select({ id: DaytonaSandboxTable.id })
.from(DaytonaSandboxTable)
.where(eq(DaytonaSandboxTable.worker_id, input.workerId))
.limit(1)
if (existing.length > 0) {
await db
.update(DaytonaSandboxTable)
.set({
sandbox_id: input.sandboxId,
workspace_volume_id: input.workspaceVolumeId,
data_volume_id: input.dataVolumeId,
signed_preview_url: input.signedPreviewUrl,
signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt,
region: input.region,
})
.where(eq(DaytonaSandboxTable.worker_id, input.workerId))
return
}
await db.insert(DaytonaSandboxTable).values({
id: createDenTypeId("daytonaSandbox"),
worker_id: input.workerId,
sandbox_id: input.sandboxId,
workspace_volume_id: input.workspaceVolumeId,
data_volume_id: input.dataVolumeId,
signed_preview_url: input.signedPreviewUrl,
signed_preview_url_expires_at: input.signedPreviewUrlExpiresAt,
region: input.region,
})
}
export async function getDaytonaSandboxRecord(workerId: WorkerId) {
const rows = await db
.select()
.from(DaytonaSandboxTable)
.where(eq(DaytonaSandboxTable.worker_id, workerId))
.limit(1)
return rows[0] ?? null
}
export async function refreshDaytonaSignedPreview(workerId: WorkerId) {
assertDaytonaConfig()
const record = await getDaytonaSandboxRecord(workerId)
if (!record) {
return null
}
const daytona = createDaytonaClient()
const sandbox = await daytona.get(record.sandbox_id)
await sandbox.refreshData()
const expiresInSeconds = normalizedSignedPreviewExpirySeconds()
const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds)
const expiresAt = signedPreviewRefreshAt(expiresInSeconds)
await db
.update(DaytonaSandboxTable)
.set({
signed_preview_url: preview.url,
signed_preview_url_expires_at: expiresAt,
region: sandbox.target,
})
.where(eq(DaytonaSandboxTable.worker_id, workerId))
return {
...record,
signed_preview_url: preview.url,
signed_preview_url_expires_at: expiresAt,
region: sandbox.target,
}
}
export async function getDaytonaSignedPreviewForProxy(workerId: WorkerId) {
const record = await getDaytonaSandboxRecord(workerId)
if (!record) {
return null
}
if (record.signed_preview_url_expires_at.getTime() > Date.now()) {
return record.signed_preview_url
}
const refreshed = await refreshDaytonaSignedPreview(workerId)
return refreshed?.signed_preview_url ?? null
}
export async function provisionWorkerOnDaytona(
input: ProvisionInput,
): Promise<ProvisionedInstance> {
assertDaytonaConfig()
const daytona = createDaytonaClient()
const labels = sandboxLabels(input.workerId)
const workspaceVolumeNameValue = workspaceVolumeName(input.workerId)
const dataVolumeNameValue = dataVolumeName(input.workerId)
await daytona.volume.get(workspaceVolumeNameValue, true)
await daytona.volume.get(dataVolumeNameValue, true)
const workspaceVolume = await waitForVolumeReady(
daytona,
workspaceVolumeNameValue,
env.daytona.createTimeoutSeconds * 1000,
)
const dataVolume = await waitForVolumeReady(
daytona,
dataVolumeNameValue,
env.daytona.createTimeoutSeconds * 1000,
)
let sandbox: Awaited<ReturnType<typeof daytona.create>> | null = null
try {
sandbox = env.daytona.snapshot
? await daytona.create(
{
name: sandboxName(input),
snapshot: env.daytona.snapshot,
autoStopInterval: env.daytona.autoStopInterval,
autoArchiveInterval: env.daytona.autoArchiveInterval,
autoDeleteInterval: env.daytona.autoDeleteInterval,
public: env.daytona.public,
labels,
envVars: {
DEN_WORKER_ID: input.workerId,
},
volumes: [
{
volumeId: workspaceVolume.id,
mountPath: env.daytona.workspaceMountPath,
},
{
volumeId: dataVolume.id,
mountPath: env.daytona.dataMountPath,
},
],
},
{ timeout: env.daytona.createTimeoutSeconds },
)
: await daytona.create(
{
name: sandboxName(input),
image: env.daytona.image,
autoStopInterval: env.daytona.autoStopInterval,
autoArchiveInterval: env.daytona.autoArchiveInterval,
autoDeleteInterval: env.daytona.autoDeleteInterval,
public: env.daytona.public,
labels,
envVars: {
DEN_WORKER_ID: input.workerId,
},
resources: {
cpu: env.daytona.resources.cpu,
memory: env.daytona.resources.memory,
disk: env.daytona.resources.disk,
},
volumes: [
{
volumeId: workspaceVolume.id,
mountPath: env.daytona.workspaceMountPath,
},
{
volumeId: dataVolume.id,
mountPath: env.daytona.dataMountPath,
},
],
},
{ timeout: env.daytona.createTimeoutSeconds },
)
const sessionId = `openwork-${workerHint(input.workerId)}`
await sandbox.process.createSession(sessionId)
const command = await sandbox.process.executeSessionCommand(
sessionId,
{
command: buildOpenWorkStartCommand(input),
runAsync: true,
},
0,
)
const expiresInSeconds = normalizedSignedPreviewExpirySeconds()
const preview = await sandbox.getSignedPreviewUrl(env.daytona.openworkPort, expiresInSeconds)
await waitForHealth(preview.url, env.daytona.healthcheckTimeoutMs, sandbox, sessionId, command.cmdId)
await upsertDaytonaSandbox({
workerId: input.workerId,
sandboxId: sandbox.id,
workspaceVolumeId: workspaceVolume.id,
dataVolumeId: dataVolume.id,
signedPreviewUrl: preview.url,
signedPreviewUrlExpiresAt: signedPreviewRefreshAt(expiresInSeconds),
region: sandbox.target ?? null,
})
return {
provider: "daytona",
url: workerProxyUrl(input.workerId),
status: "healthy",
region: sandbox.target,
}
} catch (error) {
if (sandbox) {
await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch(() => {})
}
await daytona.volume.delete(workspaceVolume).catch(() => {})
await daytona.volume.delete(dataVolume).catch(() => {})
throw error
}
}
export async function deprovisionWorkerOnDaytona(workerId: WorkerId) {
assertDaytonaConfig()
const daytona = createDaytonaClient()
const record = await getDaytonaSandboxRecord(workerId)
if (record) {
try {
const sandbox = await daytona.get(record.sandbox_id)
await sandbox.delete(env.daytona.deleteTimeoutSeconds)
} catch (error) {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona sandbox ${record.sandbox_id}: ${message}`)
}
const volumes = await daytona.volume.list().catch(() => [])
for (const volumeId of [record.workspace_volume_id, record.data_volume_id]) {
const volume = volumes.find((entry) => entry.id === volumeId)
if (!volume) {
continue
}
await daytona.volume.delete(volume).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona volume ${volumeId}: ${message}`)
})
}
return
}
const sandboxes = await daytona.list(sandboxLabels(workerId), 1, 20)
for (const sandbox of sandboxes.items) {
await sandbox.delete(env.daytona.deleteTimeoutSeconds).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona sandbox ${sandbox.id}: ${message}`)
})
}
const volumes = await daytona.volume.list()
for (const name of [workspaceVolumeName(workerId), dataVolumeName(workerId)]) {
const volume = volumes.find((entry) => entry.name === name)
if (!volume) {
continue
}
await daytona.volume.delete(volume).catch((error) => {
const message = error instanceof Error ? error.message : "unknown_error"
console.warn(`[provisioner] failed to delete Daytona volume ${name}: ${message}`)
})
}
}

View File

@@ -1,11 +1,18 @@
import { env } from "../env.js";
import { WorkerTable } from "../db/schema.js";
import {
deprovisionWorkerOnDaytona,
provisionWorkerOnDaytona,
} from "./daytona.js";
import {
customDomainForWorker,
ensureVercelDnsRecord,
} from "./vanity-domain.js";
type WorkerId = typeof WorkerTable.$inferSelect.id;
export type ProvisionInput = {
workerId: string;
workerId: WorkerId;
name: string;
hostToken: string;
clientToken: string;
@@ -331,6 +338,10 @@ export async function provisionWorker(
return provisionWorkerOnRender(input);
}
if (env.provisionerMode === "daytona") {
return provisionWorkerOnDaytona(input);
}
const template = env.workerUrlTemplate ?? "https://workers.local/{workerId}";
const url = template.replace("{workerId}", input.workerId);
return {
@@ -341,9 +352,14 @@ export async function provisionWorker(
}
export async function deprovisionWorker(input: {
workerId: string;
workerId: WorkerId;
instanceUrl: string | null;
}) {
if (env.provisionerMode === "daytona") {
await deprovisionWorkerOnDaytona(input.workerId);
return;
}
if (env.provisionerMode !== "render") {
return;
}