mirror of
https://github.com/different-ai/openwork
synced 2026-04-25 17:15:34 +02:00
* feat(den): add daytona-backed docker dev flow * fix(den): allow multiple cloud workers in dev * fix(den): use Daytona snapshots for sandbox runtime Use a prebuilt Daytona snapshot for the dev worker runtime so sandboxes start with openwork and opencode already installed. Pass the snapshot through the local Docker flow and add a helper to build the snapshot image for repeatable setup. * chore(den): lower Daytona snapshot defaults Reduce the default snapshot footprint to 1 CPU and 2GB RAM so local Daytona worker testing fits smaller org limits more easily. * Omar is comfortable Make Daytona-backed cloud workers stable enough to reconnect through a dedicated proxy instead of persisting expiring signed preview URLs. Split the proxy into its own deployable service, share Den schema access through a common package, and fix the web badge so healthy workers show ready. * chore(den-db): add Drizzle package scripts Move the shared schema package toward owning its own migration workflow by adding generate and migrate commands plus a local Drizzle config. * chore: update lockfile Refresh the workspace lockfile so the new den-db Drizzle tooling is captured in pnpm-lock.yaml. * feat(den-worker-proxy): make Vercel deployment-ready Align the proxy service with Vercel's Hono runtime entry pattern and keep a separate Node server entry for Docker/local runs. Also scaffold the Vercel project/env setup and wire Render deploy sync to pass Daytona variables needed for daytona mode. * feat(den-db): add db mode switch for PlanetScale Support DB_MODE=planetscale with Drizzle's PlanetScale serverless driver while keeping mysql2 as the local default. This lets Vercel-hosted services use HTTP database access without changing local development workflows. * refactor(den-db): adopt shared TypeID ids Move the Den TypeID system into a shared utils package and use it across auth, org, worker, and sandbox records so fresh databases get one consistent internal ID format. Wire Better Auth into the same generator and update Den request boundaries to normalize typed ids cleanly. * fix(den): restore docker dev stack after refactor Include the shared utils package in the Den Docker images, expose MySQL to the host for local inspection, and fix the remaining Den build/runtime issues surfaced by the Docker path after the shared package and TypeID changes. * docs(den): document Daytona snapshot setup Add README guidance for building and publishing the prebuilt Daytona runtime snapshot, including the helper script, required env, and how to point Den at the snapshot for local Daytona mode. * refactor(den-db): reset migrations and load env files Replace the old Den SQL migration history with a fresh baseline for the current schema, and let Drizzle commands load database credentials from env files. Default to mysql when DATABASE_URL is present and otherwise use PlanetScale credentials so local Docker and hosted environments can share the same DB package cleanly. * fix(den): prepare manual PlanetScale deploys Update the Render workflow and Docker build path for the shared workspace packages, support PlanetScale credentials in the manual SQL migration runner, and stop auto-running DB migrations on Den startup so schema changes stay manual. * feat(den-v2): add Daytona-first control plane Create a new den-v2 service from the current Daytona-enabled control plane, default it to Daytona provisioning, and add a dedicated Render deployment workflow targeting the new v2 Render service. * feat(den-worker-proxy): redirect root to landing Send root proxy traffic to openworklabs.com so direct visits to the worker proxy domain do not hit worker-resolution errors. --------- Co-authored-by: OmarMcAdam <gh@mcadam.io>
341 lines
8.9 KiB
JavaScript
341 lines
8.9 KiB
JavaScript
import { randomUUID } from "node:crypto";
|
|
import { once } from "node:events";
|
|
import net from "node:net";
|
|
import { dirname, resolve } from "node:path";
|
|
import { fileURLToPath } from "node:url";
|
|
import { setTimeout as delay } from "node:timers/promises";
|
|
import { spawn } from "node:child_process";
|
|
import mysql from "mysql2/promise";
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const serviceDir = resolve(__dirname, "..");
|
|
|
|
function log(message) {
|
|
process.stdout.write(`${message}\n`);
|
|
}
|
|
|
|
function fail(message, detail) {
|
|
if (detail !== undefined) {
|
|
console.error(message, detail);
|
|
} else {
|
|
console.error(message);
|
|
}
|
|
process.exit(1);
|
|
}
|
|
|
|
async function getFreePort() {
|
|
return await new Promise((resolvePort, reject) => {
|
|
const server = net.createServer();
|
|
server.listen(0, "127.0.0.1", () => {
|
|
const address = server.address();
|
|
if (!address || typeof address === "string") {
|
|
reject(new Error("failed_to_resolve_free_port"));
|
|
return;
|
|
}
|
|
const { port } = address;
|
|
server.close((error) => (error ? reject(error) : resolvePort(port)));
|
|
});
|
|
server.on("error", reject);
|
|
});
|
|
}
|
|
|
|
function spawnCommand(command, args, options = {}) {
|
|
return spawn(command, args, {
|
|
cwd: serviceDir,
|
|
env: process.env,
|
|
stdio: "pipe",
|
|
...options,
|
|
});
|
|
}
|
|
|
|
async function runCommand(command, args, options = {}) {
|
|
const child = spawnCommand(command, args, options);
|
|
let stdout = "";
|
|
let stderr = "";
|
|
child.stdout?.on("data", (chunk) => {
|
|
stdout += chunk.toString();
|
|
});
|
|
child.stderr?.on("data", (chunk) => {
|
|
stderr += chunk.toString();
|
|
});
|
|
const [code] = await once(child, "exit");
|
|
if (code !== 0) {
|
|
throw new Error(`${command} ${args.join(" ")} failed\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`);
|
|
}
|
|
return { stdout, stderr };
|
|
}
|
|
|
|
async function waitForMysqlConnection(databaseUrl, attempts = 60) {
|
|
for (let index = 0; index < attempts; index += 1) {
|
|
try {
|
|
const connection = await mysql.createConnection(databaseUrl);
|
|
await connection.query("SELECT 1");
|
|
await connection.end();
|
|
return;
|
|
} catch {
|
|
await delay(1000);
|
|
}
|
|
}
|
|
|
|
throw new Error("mysql_not_ready");
|
|
}
|
|
|
|
async function waitForHttp(url, attempts = 60) {
|
|
for (let index = 0; index < attempts; index += 1) {
|
|
try {
|
|
const response = await fetch(url);
|
|
if (response.ok) {
|
|
return;
|
|
}
|
|
} catch {
|
|
// ignore until retries are exhausted
|
|
}
|
|
await delay(500);
|
|
}
|
|
|
|
throw new Error(`http_not_ready:${url}`);
|
|
}
|
|
|
|
function extractAuthToken(payload) {
|
|
if (!payload || typeof payload !== "object") {
|
|
return null;
|
|
}
|
|
|
|
if (typeof payload.token === "string" && payload.token.trim()) {
|
|
return payload.token;
|
|
}
|
|
|
|
if (payload.session && typeof payload.session === "object" && typeof payload.session.token === "string") {
|
|
return payload.session.token;
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function requestJson(baseUrl, path, { method = "GET", body, token, cookie } = {}) {
|
|
const headers = new Headers();
|
|
const origin = new URL(baseUrl).origin;
|
|
headers.set("Accept", "application/json");
|
|
headers.set("Origin", origin);
|
|
headers.set("Referer", `${origin}/`);
|
|
if (body !== undefined) {
|
|
headers.set("Content-Type", "application/json");
|
|
}
|
|
if (token) {
|
|
headers.set("Authorization", `Bearer ${token}`);
|
|
}
|
|
if (cookie) {
|
|
headers.set("Cookie", cookie);
|
|
}
|
|
|
|
const response = await fetch(`${baseUrl}${path}`, {
|
|
method,
|
|
headers,
|
|
body: body === undefined ? undefined : JSON.stringify(body),
|
|
});
|
|
|
|
const text = await response.text();
|
|
let payload = null;
|
|
if (text) {
|
|
try {
|
|
payload = JSON.parse(text);
|
|
} catch {
|
|
payload = text;
|
|
}
|
|
}
|
|
|
|
return {
|
|
response,
|
|
payload,
|
|
cookie: response.headers.get("set-cookie"),
|
|
};
|
|
}
|
|
|
|
async function main() {
|
|
const mysqlPort = await getFreePort();
|
|
const appPort = await getFreePort();
|
|
const containerName = `openwork-den-e2e-${randomUUID().slice(0, 8)}`;
|
|
const dbName = "openwork_den_e2e";
|
|
const dbPassword = "openwork-root";
|
|
const baseUrl = `http://127.0.0.1:${appPort}`;
|
|
const databaseUrl = `mysql://root:${dbPassword}@127.0.0.1:${mysqlPort}/${dbName}`;
|
|
const env = {
|
|
...process.env,
|
|
DATABASE_URL: databaseUrl,
|
|
BETTER_AUTH_SECRET: "openwork-den-e2e-secret-000000000000",
|
|
BETTER_AUTH_URL: baseUrl,
|
|
PORT: String(appPort),
|
|
OPENWORK_DEV_MODE: "1",
|
|
CORS_ORIGINS: baseUrl,
|
|
PROVISIONER_MODE: "stub",
|
|
WORKER_URL_TEMPLATE: "https://workers.example.com/{workerId}",
|
|
POLAR_FEATURE_GATE_ENABLED: "false",
|
|
};
|
|
|
|
let serviceProcess = null;
|
|
|
|
const cleanup = async () => {
|
|
if (serviceProcess && !serviceProcess.killed) {
|
|
serviceProcess.kill("SIGINT");
|
|
await once(serviceProcess, "exit").catch(() => {});
|
|
}
|
|
|
|
await runCommand("docker", ["rm", "-f", containerName], { cwd: serviceDir }).catch(() => {});
|
|
};
|
|
|
|
process.on("SIGINT", async () => {
|
|
await cleanup();
|
|
process.exit(130);
|
|
});
|
|
|
|
try {
|
|
log("Starting disposable MySQL container...");
|
|
await runCommand("docker", [
|
|
"run",
|
|
"-d",
|
|
"--rm",
|
|
"--name",
|
|
containerName,
|
|
"-e",
|
|
`MYSQL_ROOT_PASSWORD=${dbPassword}`,
|
|
"-e",
|
|
`MYSQL_DATABASE=${dbName}`,
|
|
"-p",
|
|
`${mysqlPort}:3306`,
|
|
"mysql:8.4",
|
|
]);
|
|
|
|
log("Waiting for MySQL...");
|
|
await waitForMysqlConnection(databaseUrl);
|
|
|
|
log("Running Den migrations...");
|
|
await runCommand("pnpm", ["db:migrate"], { cwd: serviceDir, env });
|
|
|
|
log("Starting Den service...");
|
|
serviceProcess = spawn("pnpm", ["exec", "tsx", "src/index.ts"], {
|
|
cwd: serviceDir,
|
|
env,
|
|
stdio: "pipe",
|
|
});
|
|
|
|
let serviceOutput = "";
|
|
serviceProcess.stdout?.on("data", (chunk) => {
|
|
serviceOutput += chunk.toString();
|
|
});
|
|
serviceProcess.stderr?.on("data", (chunk) => {
|
|
serviceOutput += chunk.toString();
|
|
});
|
|
|
|
serviceProcess.on("exit", (code) => {
|
|
if (code !== 0) {
|
|
console.error(serviceOutput);
|
|
}
|
|
});
|
|
|
|
await waitForHttp(`${baseUrl}/health`);
|
|
|
|
const email = `den-e2e-${Date.now()}@example.com`;
|
|
const password = "TestPass123!";
|
|
|
|
log("Creating account...");
|
|
const signup = await requestJson(baseUrl, "/api/auth/sign-up/email", {
|
|
method: "POST",
|
|
body: {
|
|
name: "Den E2E",
|
|
email,
|
|
password,
|
|
},
|
|
});
|
|
|
|
if (!signup.response.ok) {
|
|
fail("Signup failed", signup.payload);
|
|
}
|
|
|
|
const token = extractAuthToken(signup.payload);
|
|
const cookie = signup.cookie;
|
|
if (!token && !cookie) {
|
|
fail("Signup did not return a bearer token or session cookie", signup.payload);
|
|
}
|
|
|
|
log("Validating authenticated session...");
|
|
const me = await requestJson(baseUrl, "/v1/me", { token, cookie });
|
|
if (!me.response.ok) {
|
|
fail("Session lookup failed", me.payload);
|
|
}
|
|
|
|
log("Checking billing summary is disabled...");
|
|
const billing = await requestJson(baseUrl, "/v1/workers/billing", { token, cookie });
|
|
if (!billing.response.ok) {
|
|
fail("Billing summary request failed", billing.payload);
|
|
}
|
|
|
|
if (
|
|
!billing.payload?.billing ||
|
|
billing.payload.billing.featureGateEnabled !== false ||
|
|
billing.payload.billing.checkoutRequired !== false ||
|
|
billing.payload.billing.checkoutUrl !== null
|
|
) {
|
|
fail("Billing summary should be disabled for the experiment", billing.payload);
|
|
}
|
|
|
|
log("Creating first cloud worker...");
|
|
const firstWorker = await requestJson(baseUrl, "/v1/workers", {
|
|
method: "POST",
|
|
token,
|
|
cookie,
|
|
body: {
|
|
name: "first-worker",
|
|
destination: "cloud",
|
|
},
|
|
});
|
|
|
|
if (firstWorker.response.status !== 202) {
|
|
fail("First worker did not launch successfully", {
|
|
status: firstWorker.response.status,
|
|
payload: firstWorker.payload,
|
|
});
|
|
}
|
|
|
|
log("Attempting second cloud worker...");
|
|
const secondWorker = await requestJson(baseUrl, "/v1/workers", {
|
|
method: "POST",
|
|
token,
|
|
cookie,
|
|
body: {
|
|
name: "second-worker",
|
|
destination: "cloud",
|
|
},
|
|
});
|
|
|
|
if (secondWorker.response.status !== 202) {
|
|
fail("Second worker should be allowed in dev mode", {
|
|
status: secondWorker.response.status,
|
|
payload: secondWorker.payload,
|
|
});
|
|
}
|
|
|
|
if (!secondWorker.payload?.worker?.id) {
|
|
fail("Second worker did not return a worker payload", secondWorker.payload);
|
|
}
|
|
|
|
log("Listing workers...");
|
|
const workers = await requestJson(baseUrl, "/v1/workers?limit=20", { token, cookie });
|
|
if (!workers.response.ok) {
|
|
fail("Worker list request failed", workers.payload);
|
|
}
|
|
|
|
const items = Array.isArray(workers.payload?.workers) ? workers.payload.workers : null;
|
|
if (!items || items.length !== 2) {
|
|
fail("Expected two cloud workers in dev mode", workers.payload);
|
|
}
|
|
|
|
log("E2E dev worker limit check passed.");
|
|
} finally {
|
|
await cleanup();
|
|
}
|
|
}
|
|
|
|
await main().catch((error) => {
|
|
fail(error instanceof Error ? error.message : String(error));
|
|
});
|