mirror of
https://github.com/paperclipai/paperclip
synced 2026-05-06 07:02:11 +02:00
Compare commits
247 Commits
pap-2115-m
...
codex/pap-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcfea025e8 | ||
|
|
773ba92e93 | ||
|
|
d34b367bec | ||
|
|
7bf9fde510 | ||
|
|
2ed7901f1b | ||
|
|
1cc3838261 | ||
|
|
83d8a62d5a | ||
|
|
10398a933b | ||
|
|
c0ce35d1fb | ||
|
|
a4ac6ff133 | ||
|
|
4cf612a92d | ||
|
|
f9cf1d2f6a | ||
|
|
a0f5cbffd7 | ||
|
|
367d4cab72 | ||
|
|
9b99d30330 | ||
|
|
703e268c96 | ||
|
|
e60f2f7b5e | ||
|
|
dd57bfba21 | ||
|
|
caf37a355d | ||
|
|
0a345e9648 | ||
|
|
398d85488c | ||
|
|
41a989e00c | ||
|
|
ba88e86f63 | ||
|
|
ed51123e3c | ||
|
|
2ba694d10f | ||
|
|
02dd5309b3 | ||
|
|
33bcaa5a4c | ||
|
|
229a4c60a2 | ||
|
|
b13c9f5fba | ||
|
|
4c5eaeadd5 | ||
|
|
7690c21148 | ||
|
|
7fc94107cf | ||
|
|
326fd2f6fb | ||
|
|
0b85f56d27 | ||
|
|
aa1628008e | ||
|
|
925ac9a3c1 | ||
|
|
6ac545bae7 | ||
|
|
2b3a7f683b | ||
|
|
cbd091b3ef | ||
|
|
ed6600b9d1 | ||
|
|
613812d1c0 | ||
|
|
04c85b6db6 | ||
|
|
d354ecadfa | ||
|
|
9413f55381 | ||
|
|
628d879978 | ||
|
|
eb6957fa86 | ||
|
|
229487afef | ||
|
|
ffe319a3f7 | ||
|
|
bcc88b561a | ||
|
|
9703830313 | ||
|
|
08a9e0caf0 | ||
|
|
5adbe69381 | ||
|
|
7f9ae0adcd | ||
|
|
c1fd7521a8 | ||
|
|
1e68cb9be0 | ||
|
|
b72ab70a4f | ||
|
|
3494e84a29 | ||
|
|
7a57a54b77 | ||
|
|
2834e356d9 | ||
|
|
6b7f6ce4b8 | ||
|
|
8d1647b039 | ||
|
|
de4ce9828d | ||
|
|
1991ec9d6f | ||
|
|
47d862b4b2 | ||
|
|
d9f540c331 | ||
|
|
38b57c5d56 | ||
|
|
3ea2ff4187 | ||
|
|
7d475f85bf | ||
|
|
2656a86dae | ||
|
|
4b309a9e3b | ||
|
|
55518170ad | ||
|
|
90e6a394c7 | ||
|
|
45f1bc293b | ||
|
|
930a468772 | ||
|
|
b0d6ec582d | ||
|
|
14a149c79f | ||
|
|
9465d04755 | ||
|
|
ec2c5537fc | ||
|
|
b6cf7dd91a | ||
|
|
e71b27fbd4 | ||
|
|
889cf660ac | ||
|
|
19c786a20b | ||
|
|
9c2dbab2fc | ||
|
|
1cb1d7c61b | ||
|
|
9a2c345866 | ||
|
|
09923b01b2 | ||
|
|
39ba2797fb | ||
|
|
4c02abd919 | ||
|
|
9a35d3bd0d | ||
|
|
a2be4580ae | ||
|
|
647c2c487e | ||
|
|
42e1b1bfa1 | ||
|
|
e090d9a77b | ||
|
|
484d37feb9 | ||
|
|
b779e43272 | ||
|
|
e7d32f19cd | ||
|
|
930b717ba5 | ||
|
|
8b7cf25d59 | ||
|
|
82a558df86 | ||
|
|
2176de4b48 | ||
|
|
6df68f5305 | ||
|
|
e3869bda4c | ||
|
|
559a843a25 | ||
|
|
48a7c43e23 | ||
|
|
e46d0559c8 | ||
|
|
3d31751fe7 | ||
|
|
af903a3d61 | ||
|
|
fcee7e129e | ||
|
|
3f8e2a6a0d | ||
|
|
4401454bee | ||
|
|
55364835e9 | ||
|
|
d0bdbe11a9 | ||
|
|
43b0f2ae58 | ||
|
|
f88f538e6d | ||
|
|
d3123be870 | ||
|
|
68c37660f0 | ||
|
|
8b5cfb2613 | ||
|
|
e670b510d5 | ||
|
|
f20e7fa790 | ||
|
|
45529bca74 | ||
|
|
aefdf5ed23 | ||
|
|
0c170712e1 | ||
|
|
7a9b3a6037 | ||
|
|
6ccf80bcf2 | ||
|
|
d95968a9f8 | ||
|
|
748db32478 | ||
|
|
b028c4866b | ||
|
|
d424ada8d3 | ||
|
|
b2e619a1a8 | ||
|
|
22c23e1957 | ||
|
|
4850ae158b | ||
|
|
15c0ce3722 | ||
|
|
6e8966a954 | ||
|
|
ecd92af001 | ||
|
|
215b6cd161 | ||
|
|
53396f272a | ||
|
|
fda296ee4f | ||
|
|
f0f9460d1d | ||
|
|
1d8c7a09b8 | ||
|
|
d2cbe2cb23 | ||
|
|
82e257c7ba | ||
|
|
868d08903e | ||
|
|
1d9f7a5149 | ||
|
|
8145141c55 | ||
|
|
54ab0d24cd | ||
|
|
b2496c8067 | ||
|
|
08af830430 | ||
|
|
d47ffa87f0 | ||
|
|
d1484551ee | ||
|
|
91333ec86f | ||
|
|
c036bbfa98 | ||
|
|
df425fde96 | ||
|
|
40782f703d | ||
|
|
4ef969f084 | ||
|
|
ecdb43b64f | ||
|
|
b522672423 | ||
|
|
5bd0f578fd | ||
|
|
f985b38c62 | ||
|
|
deba60ebb2 | ||
|
|
f68e9caa9a | ||
|
|
73fbdf36db | ||
|
|
6916e30f8e | ||
|
|
0c6961a03e | ||
|
|
4d2534e1bc | ||
|
|
5a0c1979cf | ||
|
|
17b860bdfa | ||
|
|
3df7db0b44 | ||
|
|
9a8d219949 | ||
|
|
25c9e92ca4 | ||
|
|
69fa65328a | ||
|
|
80ae1b55f6 | ||
|
|
c6955a607c | ||
|
|
4de7648bd9 | ||
|
|
a279304c30 | ||
|
|
70679a3321 | ||
|
|
641eb44949 | ||
|
|
77a72e28c2 | ||
|
|
aa69bd4082 | ||
|
|
3e5a1c83e3 | ||
|
|
0c813dddd3 | ||
|
|
7515fa673e | ||
|
|
8cc676cb84 | ||
|
|
496a7ec858 | ||
|
|
524de337b4 | ||
|
|
4e445c155e | ||
|
|
a62b180587 | ||
|
|
195091afac | ||
|
|
ef740acbae | ||
|
|
e729c0b1fe | ||
|
|
98f6136a3e | ||
|
|
1439708ea0 | ||
|
|
d4c70cdd18 | ||
|
|
3e4dd2a988 | ||
|
|
4fb8e337e9 | ||
|
|
2e0111b11e | ||
|
|
528468c5b3 | ||
|
|
6a518ca828 | ||
|
|
f626996e63 | ||
|
|
0ee91c1651 | ||
|
|
8f208110b0 | ||
|
|
4788e563fd | ||
|
|
4ccbd7ec43 | ||
|
|
97ea7e50f8 | ||
|
|
1fa83cbdd3 | ||
|
|
fcaa681400 | ||
|
|
855471b6a0 | ||
|
|
c19197581b | ||
|
|
5d22432abe | ||
|
|
4d2c361cca | ||
|
|
c5020b13fc | ||
|
|
6fd5fe6607 | ||
|
|
d72b693fd6 | ||
|
|
89b94da25b | ||
|
|
269bae3688 | ||
|
|
4bcd27bc18 | ||
|
|
cd2c922af2 | ||
|
|
43ce73ecb7 | ||
|
|
6f34728caa | ||
|
|
6c7c18a7c5 | ||
|
|
356dac58f5 | ||
|
|
164c3f4491 | ||
|
|
f457c9d36f | ||
|
|
deca3c6842 | ||
|
|
6eed8376f1 | ||
|
|
4533859706 | ||
|
|
46deb875b5 | ||
|
|
74948dab52 | ||
|
|
23fef9d430 | ||
|
|
1feac07821 | ||
|
|
d845ff6d9b | ||
|
|
747ad09977 | ||
|
|
0abe882151 | ||
|
|
65d410f405 | ||
|
|
fdb2bcec39 | ||
|
|
321b90205f | ||
|
|
8efd9cef86 | ||
|
|
27e8d75829 | ||
|
|
6b53b31a00 | ||
|
|
17962ad465 | ||
|
|
ecc6c8f1b6 | ||
|
|
08abf9d060 | ||
|
|
e23a3a1856 | ||
|
|
f16f4025fd | ||
|
|
8f1cd0474f | ||
|
|
4fdbbeced3 | ||
|
|
7ad225a198 | ||
|
|
35a9dc37b0 |
@@ -177,8 +177,12 @@ real name or email). To find GitHub usernames:
|
||||
|
||||
**Never expose contributor email addresses.** Use `@username` only.
|
||||
|
||||
Exclude bot accounts (e.g. `lockfile-bot`, `dependabot`) from the list. List contributors
|
||||
in alphabetical order by GitHub username (case-insensitive).
|
||||
Exclude bot accounts (e.g. `lockfile-bot`, `dependabot`) from the list.
|
||||
Exclude Paperclip founders from the list (e.g. `cryppadotta`, `forgottendev`, `devinfoley`, `sockmonster`, `scotttong`)
|
||||
|
||||
List contributors in alphabetical order by GitHub username (case-insensitive).
|
||||
|
||||
If there are no contributors left after exclusions, then just skip this section and don't mention it.
|
||||
|
||||
## Step 6 — Review Before Release
|
||||
|
||||
|
||||
42
.github/workflows/pr.yml
vendored
42
.github/workflows/pr.yml
vendored
@@ -41,44 +41,7 @@ jobs:
|
||||
node-version: 24
|
||||
|
||||
- name: Validate Dockerfile deps stage
|
||||
run: |
|
||||
missing=0
|
||||
|
||||
# Extract only the deps stage from the Dockerfile
|
||||
deps_stage="$(awk '/^FROM .* AS deps$/{found=1; next} found && /^FROM /{exit} found{print}' Dockerfile)"
|
||||
|
||||
if [ -z "$deps_stage" ]; then
|
||||
echo "::error::Could not extract deps stage from Dockerfile (expected 'FROM ... AS deps')"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Derive workspace search roots from pnpm-workspace.yaml (exclude dev-only packages)
|
||||
search_roots="$(grep '^ *- ' pnpm-workspace.yaml | sed 's/^ *- //' | sed 's/\*$//' | grep -v 'examples' | grep -v 'create-paperclip-plugin' | tr '\n' ' ')"
|
||||
|
||||
if [ -z "$search_roots" ]; then
|
||||
echo "::error::Could not derive workspace roots from pnpm-workspace.yaml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check all workspace package.json files are copied in the deps stage
|
||||
for pkg in $(find $search_roots -maxdepth 2 -name package.json -not -path '*/examples/*' -not -path '*/create-paperclip-plugin/*' -not -path '*/node_modules/*' 2>/dev/null | sort -u); do
|
||||
dir="$(dirname "$pkg")"
|
||||
if ! echo "$deps_stage" | grep -q "^COPY ${dir}/package.json"; then
|
||||
echo "::error::Dockerfile deps stage missing: COPY ${pkg} ${dir}/"
|
||||
missing=1
|
||||
fi
|
||||
done
|
||||
|
||||
# Check patches directory is copied if it exists
|
||||
if [ -d patches ] && ! echo "$deps_stage" | grep -q '^COPY patches/'; then
|
||||
echo "::error::Dockerfile deps stage missing: COPY patches/ patches/"
|
||||
missing=1
|
||||
fi
|
||||
|
||||
if [ "$missing" -eq 1 ]; then
|
||||
echo "Dockerfile deps stage is out of sync. Update it to include the missing files."
|
||||
exit 1
|
||||
fi
|
||||
run: node ./scripts/check-docker-deps-stage.mjs
|
||||
|
||||
- name: Validate dependency resolution when manifests change
|
||||
run: |
|
||||
@@ -117,6 +80,9 @@ jobs:
|
||||
- name: Run tests
|
||||
run: pnpm test:run
|
||||
|
||||
- name: Verify release registry test coverage
|
||||
run: pnpm run test:release-registry
|
||||
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,6 +3,7 @@ node_modules/
|
||||
**/node_modules
|
||||
**/node_modules/
|
||||
dist/
|
||||
ui/storybook-static/
|
||||
.env
|
||||
*.tsbuildinfo
|
||||
drizzle/meta/
|
||||
|
||||
@@ -123,7 +123,9 @@ pnpm test:release-smoke
|
||||
|
||||
Run the browser suites only when your change touches them or when you are explicitly verifying CI/release flows.
|
||||
|
||||
Run this full check before claiming done:
|
||||
For normal issue work, run the smallest relevant verification first. Do not default to repo-wide typecheck/build/test on every heartbeat when a narrower check is enough to prove the change.
|
||||
|
||||
Run this full check before claiming repo work done in a PR-ready hand-off, or when the change scope is broad enough that targeted checks are not sufficient:
|
||||
|
||||
```sh
|
||||
pnpm -r typecheck
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1.20
|
||||
FROM node:lts-trixie-slim AS base
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=1000
|
||||
@@ -29,6 +30,8 @@ COPY packages/adapters/openclaw-gateway/package.json packages/adapters/openclaw-
|
||||
COPY packages/adapters/opencode-local/package.json packages/adapters/opencode-local/
|
||||
COPY packages/adapters/pi-local/package.json packages/adapters/pi-local/
|
||||
COPY packages/plugins/sdk/package.json packages/plugins/sdk/
|
||||
COPY --parents packages/plugins/sandbox-providers/./*/package.json packages/plugins/sandbox-providers/
|
||||
COPY packages/plugins/paperclip-plugin-fake-sandbox/package.json packages/plugins/paperclip-plugin-fake-sandbox/
|
||||
COPY patches/ patches/
|
||||
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
<a href="#quickstart"><strong>Quickstart</strong></a> ·
|
||||
<a href="https://paperclip.ing/docs"><strong>Docs</strong></a> ·
|
||||
<a href="https://github.com/paperclipai/paperclip"><strong>GitHub</strong></a> ·
|
||||
<a href="https://discord.gg/m4HZY7xNG3"><strong>Discord</strong></a>
|
||||
<a href="https://discord.gg/m4HZY7xNG3"><strong>Discord</strong></a> ·
|
||||
<a href="https://x.com/papercliping"><strong>Twitter</strong></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -409,6 +410,7 @@ We welcome contributions. See the [contributing guide](CONTRIBUTING.md) for deta
|
||||
## Community
|
||||
|
||||
- [Discord](https://discord.gg/m4HZY7xNG3) — Join the community
|
||||
- [Twitter / X](https://x.com/papercliping) — Follow updates and announcements
|
||||
- [GitHub Issues](https://github.com/paperclipai/paperclip/issues) — bugs and feature requests
|
||||
- [GitHub Discussions](https://github.com/paperclipai/paperclip/discussions) — ideas and RFC
|
||||
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
<a href="#quickstart"><strong>Quickstart</strong></a> ·
|
||||
<a href="https://paperclip.ing/docs"><strong>Docs</strong></a> ·
|
||||
<a href="https://github.com/paperclipai/paperclip"><strong>GitHub</strong></a> ·
|
||||
<a href="https://discord.gg/m4HZY7xNG3"><strong>Discord</strong></a>
|
||||
<a href="https://discord.gg/m4HZY7xNG3"><strong>Discord</strong></a> ·
|
||||
<a href="https://x.com/papercliping"><strong>Twitter</strong></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -278,6 +279,7 @@ We welcome contributions. See the [contributing guide](https://github.com/paperc
|
||||
## Community
|
||||
|
||||
- [Discord](https://discord.gg/m4HZY7xNG3) — Join the community
|
||||
- [Twitter / X](https://x.com/papercliping) — Follow updates and announcements
|
||||
- [GitHub Issues](https://github.com/paperclipai/paperclip/issues) — bugs and feature requests
|
||||
- [GitHub Discussions](https://github.com/paperclipai/paperclip/discussions) — ideas and RFC
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ function makeCompany(overrides: Partial<Company>): Company {
|
||||
issueCounter: 1,
|
||||
budgetMonthlyCents: 0,
|
||||
spentMonthlyCents: 0,
|
||||
attachmentMaxBytes: 10 * 1024 * 1024,
|
||||
requireBoardApprovalForNewAgents: false,
|
||||
feedbackDataSharingEnabled: false,
|
||||
feedbackDataSharingConsentAt: null,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { execFile, spawn } from "node:child_process";
|
||||
import { mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from "node:fs";
|
||||
import net from "node:net";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
@@ -104,20 +104,50 @@ function writeTestConfig(configPath: string, tempRoot: string, port: number, con
|
||||
writeFileSync(configPath, `${JSON.stringify(config, null, 2)}\n`, "utf8");
|
||||
}
|
||||
|
||||
function createServerEnv(configPath: string, port: number, connectionString: string) {
|
||||
interface TestPaperclipEnv {
|
||||
configPath: string;
|
||||
paperclipHome: string;
|
||||
instanceId: string;
|
||||
shellHome?: string;
|
||||
}
|
||||
|
||||
function createBasePaperclipEnv(options: TestPaperclipEnv) {
|
||||
const env = { ...process.env };
|
||||
for (const key of Object.keys(env)) {
|
||||
if (key.startsWith("PAPERCLIP_")) {
|
||||
delete env[key];
|
||||
}
|
||||
}
|
||||
|
||||
env.PAPERCLIP_CONFIG = options.configPath;
|
||||
env.PAPERCLIP_HOME = options.paperclipHome;
|
||||
env.PAPERCLIP_INSTANCE_ID = options.instanceId;
|
||||
env.PAPERCLIP_CONTEXT = path.join(options.paperclipHome, "context.json");
|
||||
env.PAPERCLIP_AUTH_STORE = path.join(options.paperclipHome, "auth.json");
|
||||
if (options.shellHome) {
|
||||
env.HOME = options.shellHome;
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
function createServerEnv(
|
||||
configPath: string,
|
||||
port: number,
|
||||
connectionString: string,
|
||||
options: Omit<TestPaperclipEnv, "configPath">,
|
||||
) {
|
||||
const env = createBasePaperclipEnv({
|
||||
configPath,
|
||||
...options,
|
||||
});
|
||||
|
||||
delete env.DATABASE_URL;
|
||||
delete env.PORT;
|
||||
delete env.HOST;
|
||||
delete env.SERVE_UI;
|
||||
delete env.HEARTBEAT_SCHEDULER_ENABLED;
|
||||
|
||||
env.PAPERCLIP_CONFIG = configPath;
|
||||
env.DATABASE_URL = connectionString;
|
||||
env.HOST = "127.0.0.1";
|
||||
env.PORT = String(port);
|
||||
@@ -130,13 +160,8 @@ function createServerEnv(configPath: string, port: number, connectionString: str
|
||||
return env;
|
||||
}
|
||||
|
||||
function createCliEnv() {
|
||||
const env = { ...process.env };
|
||||
for (const key of Object.keys(env)) {
|
||||
if (key.startsWith("PAPERCLIP_")) {
|
||||
delete env[key];
|
||||
}
|
||||
}
|
||||
function createCliEnv(options: TestPaperclipEnv) {
|
||||
const env = createBasePaperclipEnv(options);
|
||||
delete env.DATABASE_URL;
|
||||
delete env.PORT;
|
||||
delete env.HOST;
|
||||
@@ -183,14 +208,25 @@ async function api<T>(baseUrl: string, pathname: string, init?: RequestInit): Pr
|
||||
return text ? JSON.parse(text) as T : (null as T);
|
||||
}
|
||||
|
||||
async function runCliJson<T>(args: string[], opts: { apiBase: string; configPath: string }) {
|
||||
async function runCliJson<T>(
|
||||
args: string[],
|
||||
opts: TestPaperclipEnv & { apiBase?: string; includeConfigArg?: boolean },
|
||||
) {
|
||||
const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "../../..");
|
||||
const cliArgs = ["--silent", "paperclipai", ...args];
|
||||
if (opts.apiBase) {
|
||||
cliArgs.push("--api-base", opts.apiBase);
|
||||
}
|
||||
if (opts.includeConfigArg !== false) {
|
||||
cliArgs.push("--config", opts.configPath);
|
||||
}
|
||||
cliArgs.push("--json");
|
||||
const result = await execFileAsync(
|
||||
"pnpm",
|
||||
["--silent", "paperclipai", ...args, "--api-base", opts.apiBase, "--config", opts.configPath, "--json"],
|
||||
cliArgs,
|
||||
{
|
||||
cwd: repoRoot,
|
||||
env: createCliEnv(),
|
||||
env: createCliEnv(opts),
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
},
|
||||
);
|
||||
@@ -235,6 +271,9 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
let configPath = "";
|
||||
let exportDir = "";
|
||||
let apiBase = "";
|
||||
let paperclipHome = "";
|
||||
let cliShellHome = "";
|
||||
let paperclipInstanceId = "";
|
||||
let serverProcess: ServerProcess | null = null;
|
||||
let tempDb: Awaited<ReturnType<typeof startEmbeddedPostgresTestDatabase>> | null = null;
|
||||
|
||||
@@ -242,6 +281,11 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
tempRoot = mkdtempSync(path.join(os.tmpdir(), "paperclip-company-cli-e2e-"));
|
||||
configPath = path.join(tempRoot, "config", "config.json");
|
||||
exportDir = path.join(tempRoot, "exported-company");
|
||||
paperclipHome = path.join(tempRoot, "paperclip-home");
|
||||
cliShellHome = path.join(tempRoot, "shell-home");
|
||||
paperclipInstanceId = "company-cli-e2e";
|
||||
mkdirSync(paperclipHome, { recursive: true });
|
||||
mkdirSync(cliShellHome, { recursive: true });
|
||||
|
||||
tempDb = await startEmbeddedPostgresTestDatabase("paperclip-company-cli-db-");
|
||||
|
||||
@@ -256,7 +300,11 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
["paperclipai", "run", "--config", configPath],
|
||||
{
|
||||
cwd: repoRoot,
|
||||
env: createServerEnv(configPath, port, tempDb.connectionString),
|
||||
env: createServerEnv(configPath, port, tempDb.connectionString, {
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
}),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
},
|
||||
);
|
||||
@@ -282,6 +330,31 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
it("exports a company package and imports it into new and existing companies", async () => {
|
||||
expect(serverProcess).not.toBeNull();
|
||||
|
||||
const cliContext = await runCliJson<{
|
||||
contextPath: string;
|
||||
profileName: string;
|
||||
profile: { apiBase?: string };
|
||||
}>(
|
||||
["context", "set", "--profile", "isolation-check", "--api-base", "https://example.test"],
|
||||
{
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
includeConfigArg: false,
|
||||
},
|
||||
);
|
||||
|
||||
const expectedContextPath = path.join(paperclipHome, "context.json");
|
||||
const leakedContextPath = path.join(cliShellHome, ".paperclip", "context.json");
|
||||
expect(cliContext.contextPath).toBe(expectedContextPath);
|
||||
expect(cliContext.profileName).toBe("isolation-check");
|
||||
expect(cliContext.profile.apiBase).toBe("https://example.test");
|
||||
expect(existsSync(expectedContextPath)).toBe(true);
|
||||
expect(existsSync(leakedContextPath)).toBe(false);
|
||||
rmSync(expectedContextPath, { force: true });
|
||||
expect(existsSync(expectedContextPath)).toBe(false);
|
||||
|
||||
const sourceCompany = await api<{ id: string; name: string; issuePrefix: string }>(apiBase, "/api/companies", {
|
||||
method: "POST",
|
||||
headers: { "content-type": "application/json" },
|
||||
@@ -303,8 +376,11 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
name: "Export Engineer",
|
||||
role: "engineer",
|
||||
adapterType: "claude_local",
|
||||
adapterConfig: {
|
||||
promptTemplate: "You verify company portability.",
|
||||
adapterConfig: {},
|
||||
instructionsBundle: {
|
||||
files: {
|
||||
"AGENTS.md": "You verify company portability.",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
@@ -355,7 +431,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
"--include",
|
||||
"company,agents,projects,issues",
|
||||
],
|
||||
{ apiBase, configPath },
|
||||
{
|
||||
apiBase,
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
},
|
||||
);
|
||||
|
||||
expect(exportResult.ok).toBe(true);
|
||||
@@ -379,7 +461,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
"company,agents,projects,issues",
|
||||
"--yes",
|
||||
],
|
||||
{ apiBase, configPath },
|
||||
{
|
||||
apiBase,
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
},
|
||||
);
|
||||
|
||||
expect(importedNew.company.action).toBe("created");
|
||||
@@ -398,10 +486,11 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
apiBase,
|
||||
`/api/companies/${importedNew.company.id}/issues`,
|
||||
);
|
||||
const importedMatchingIssues = importedIssues.filter((issue) => issue.title === sourceIssue.title);
|
||||
|
||||
expect(importedAgents.map((agent) => agent.name)).toContain(sourceAgent.name);
|
||||
expect(importedProjects.map((project) => project.name)).toContain(sourceProject.name);
|
||||
expect(importedIssues.map((issue) => issue.title)).toContain(sourceIssue.title);
|
||||
expect(importedMatchingIssues).toHaveLength(1);
|
||||
|
||||
const previewExisting = await runCliJson<{
|
||||
errors: string[];
|
||||
@@ -426,7 +515,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
"rename",
|
||||
"--dry-run",
|
||||
],
|
||||
{ apiBase, configPath },
|
||||
{
|
||||
apiBase,
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
},
|
||||
);
|
||||
|
||||
expect(previewExisting.errors).toEqual([]);
|
||||
@@ -453,7 +548,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
"rename",
|
||||
"--yes",
|
||||
],
|
||||
{ apiBase, configPath },
|
||||
{
|
||||
apiBase,
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
},
|
||||
);
|
||||
|
||||
expect(importedExisting.company.action).toBe("unchanged");
|
||||
@@ -471,11 +572,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
apiBase,
|
||||
`/api/companies/${importedNew.company.id}/issues`,
|
||||
);
|
||||
const twiceImportedMatchingIssues = twiceImportedIssues.filter((issue) => issue.title === sourceIssue.title);
|
||||
|
||||
expect(twiceImportedAgents).toHaveLength(2);
|
||||
expect(new Set(twiceImportedAgents.map((agent) => agent.name)).size).toBe(2);
|
||||
expect(twiceImportedProjects).toHaveLength(2);
|
||||
expect(twiceImportedIssues).toHaveLength(2);
|
||||
expect(twiceImportedMatchingIssues).toHaveLength(2);
|
||||
expect(new Set(twiceImportedMatchingIssues.map((issue) => issue.identifier)).size).toBe(2);
|
||||
|
||||
const zipPath = path.join(tempRoot, "exported-company.zip");
|
||||
const portableFiles: Record<string, string> = {};
|
||||
@@ -498,7 +601,13 @@ describeEmbeddedPostgres("paperclipai company import/export e2e", () => {
|
||||
"company,agents,projects,issues",
|
||||
"--yes",
|
||||
],
|
||||
{ apiBase, configPath },
|
||||
{
|
||||
apiBase,
|
||||
configPath,
|
||||
paperclipHome,
|
||||
instanceId: paperclipInstanceId,
|
||||
shellHome: cliShellHome,
|
||||
},
|
||||
);
|
||||
|
||||
expect(importedFromZip.company.action).toBe("created");
|
||||
|
||||
@@ -160,6 +160,7 @@ describe("renderCompanyImportPreview", () => {
|
||||
path: "COMPANY.md",
|
||||
name: "Source Co",
|
||||
description: null,
|
||||
attachmentMaxBytes: null,
|
||||
brandColor: null,
|
||||
logoPath: null,
|
||||
requireBoardApprovalForNewAgents: false,
|
||||
@@ -375,6 +376,7 @@ describe("import selection catalog", () => {
|
||||
path: "COMPANY.md",
|
||||
name: "Source Co",
|
||||
description: null,
|
||||
attachmentMaxBytes: null,
|
||||
brandColor: null,
|
||||
logoPath: "images/company-logo.png",
|
||||
requireBoardApprovalForNewAgents: false,
|
||||
|
||||
@@ -190,8 +190,9 @@ describe("worktree helpers", () => {
|
||||
).toEqual(["worktree", "add", "-b", "my-worktree", "/tmp/my-worktree", "origin/main"]);
|
||||
});
|
||||
|
||||
it("rewrites loopback auth URLs to the new port only", () => {
|
||||
it("rewrites auth URLs only when they already include a port", () => {
|
||||
expect(rewriteLocalUrlPort("http://127.0.0.1:3100", 3110)).toBe("http://127.0.0.1:3110/");
|
||||
expect(rewriteLocalUrlPort("http://my-host.ts.net:3100", 3110)).toBe("http://my-host.ts.net:3110/");
|
||||
expect(rewriteLocalUrlPort("https://paperclip.example", 3110)).toBe("https://paperclip.example");
|
||||
});
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ interface IssueUpdateOptions extends BaseClientOptions {
|
||||
interface IssueCommentOptions extends BaseClientOptions {
|
||||
body: string;
|
||||
reopen?: boolean;
|
||||
resume?: boolean;
|
||||
}
|
||||
|
||||
interface IssueCheckoutOptions extends BaseClientOptions {
|
||||
@@ -241,12 +242,14 @@ export function registerIssueCommands(program: Command): void {
|
||||
.argument("<issueId>", "Issue ID")
|
||||
.requiredOption("--body <text>", "Comment body")
|
||||
.option("--reopen", "Reopen if issue is done/cancelled")
|
||||
.option("--resume", "Request explicit follow-up and wake the assignee when resumable")
|
||||
.action(async (issueId: string, opts: IssueCommentOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts);
|
||||
const payload = addIssueCommentSchema.parse({
|
||||
body: opts.body,
|
||||
reopen: opts.reopen,
|
||||
resume: opts.resume,
|
||||
});
|
||||
const comment = await ctx.api.post<IssueComment>(`/api/issues/${issueId}/comments`, payload);
|
||||
printOutput(comment, { json: ctx.json });
|
||||
|
||||
@@ -75,11 +75,6 @@ function nonEmpty(value: string | null | undefined): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function isLoopbackHost(hostname: string): boolean {
|
||||
const value = hostname.trim().toLowerCase();
|
||||
return value === "127.0.0.1" || value === "localhost" || value === "::1";
|
||||
}
|
||||
|
||||
export function sanitizeWorktreeInstanceId(rawValue: string): string {
|
||||
const trimmed = rawValue.trim().toLowerCase();
|
||||
const normalized = trimmed
|
||||
@@ -168,7 +163,8 @@ export function rewriteLocalUrlPort(rawUrl: string | undefined, port: number): s
|
||||
if (!rawUrl) return undefined;
|
||||
try {
|
||||
const parsed = new URL(rawUrl);
|
||||
if (!isLoopbackHost(parsed.hostname)) return rawUrl;
|
||||
// The URL API normalizes default ports like :80/:443 to "", so treat them as stable URLs.
|
||||
if (!parsed.port) return rawUrl;
|
||||
parsed.port = String(port);
|
||||
return parsed.toString();
|
||||
} catch {
|
||||
|
||||
@@ -59,11 +59,11 @@ cp .env.example .env
|
||||
# DATABASE_URL=postgres://paperclip:paperclip@localhost:5432/paperclip
|
||||
```
|
||||
|
||||
Run migrations (once the migration generation issue is fixed) or use `drizzle-kit push`:
|
||||
Run migrations:
|
||||
|
||||
```sh
|
||||
DATABASE_URL=postgres://paperclip:paperclip@localhost:5432/paperclip \
|
||||
npx drizzle-kit push
|
||||
pnpm db:migrate
|
||||
```
|
||||
|
||||
Start the server:
|
||||
@@ -100,37 +100,27 @@ postgres://postgres.[PROJECT-REF]:[PASSWORD]@aws-0-[REGION].pooler.supabase.com:
|
||||
|
||||
### Configure
|
||||
|
||||
Set `DATABASE_URL` in your `.env`:
|
||||
For the application runtime, use a direct PostgreSQL connection unless the database client has explicit prepared-statement configuration for your pooling mode:
|
||||
|
||||
```sh
|
||||
DATABASE_URL=postgres://postgres.[PROJECT-REF]:[PASSWORD]@aws-0-[REGION].pooler.supabase.com:6543/postgres
|
||||
DATABASE_URL=postgres://postgres.[PROJECT-REF]:[PASSWORD]@aws-0-[REGION].pooler.supabase.com:5432/postgres
|
||||
```
|
||||
|
||||
For hosted deployments that use a pooled runtime URL, set
|
||||
`DATABASE_MIGRATION_URL` to the direct connection URL. Paperclip uses it for
|
||||
startup schema checks/migrations and plugin namespace migrations, while the app
|
||||
continues to use `DATABASE_URL` for runtime queries:
|
||||
If you later run the app with a pooled runtime URL, set `DATABASE_MIGRATION_URL` to the direct connection URL. Paperclip uses it for startup schema checks/migrations and plugin namespace migrations, while the app continues to use `DATABASE_URL` for runtime queries:
|
||||
|
||||
```sh
|
||||
DATABASE_URL=postgres://postgres.[PROJECT-REF]:[PASSWORD]@aws-0-[REGION].pooler.supabase.com:6543/postgres
|
||||
DATABASE_MIGRATION_URL=postgres://postgres.[PROJECT-REF]:[PASSWORD]@aws-0-[REGION].pooler.supabase.com:5432/postgres
|
||||
```
|
||||
|
||||
If using connection pooling (port 6543), the `postgres` client must disable prepared statements. Update `packages/db/src/client.ts`:
|
||||
|
||||
```ts
|
||||
export function createDb(url: string) {
|
||||
const sql = postgres(url, { prepare: false });
|
||||
return drizzlePg(sql, { schema });
|
||||
}
|
||||
```
|
||||
If your hosted database requires transaction-pooling-only connections, use a direct or session-pooled connection for Paperclip until runtime pooling support is documented in this guide. Do not edit database client source files as part of deployment setup.
|
||||
|
||||
### Push the schema
|
||||
|
||||
```sh
|
||||
# Use the direct connection (port 5432) for schema changes
|
||||
DATABASE_URL=postgres://postgres.[PROJECT-REF]:[PASSWORD]@...5432/postgres \
|
||||
npx drizzle-kit push
|
||||
pnpm db:migrate
|
||||
```
|
||||
|
||||
### Free tier limits
|
||||
@@ -153,6 +143,22 @@ The database mode is controlled by `DATABASE_URL`:
|
||||
|
||||
Your Drizzle schema (`packages/db/src/schema/`) stays the same regardless of mode.
|
||||
|
||||
## Plugin database namespaces
|
||||
|
||||
The plugin runtime tracks plugin-owned database namespaces and migrations in `plugin_database_namespaces` and `plugin_migrations`. Hosted deployments that separate runtime and migration connections should set `DATABASE_MIGRATION_URL`; plugin namespace migration work uses the migration connection when present.
|
||||
|
||||
## Backups
|
||||
|
||||
Paperclip supports automatic and manual logical database backups. These dumps include
|
||||
non-system database schemas such as `public`, the Drizzle migration journal, and
|
||||
plugin-owned database schemas. See `doc/DEVELOPING.md` for the current
|
||||
`paperclipai db:backup` / `pnpm db:backup` commands and backup retention
|
||||
configuration.
|
||||
|
||||
Database backups do not include non-database instance files such as local-disk
|
||||
uploads, workspace files, or the local encrypted secrets master key. Back those paths
|
||||
up separately when you need full instance disaster recovery.
|
||||
|
||||
## Secret storage
|
||||
|
||||
Paperclip stores secret metadata and versions in:
|
||||
|
||||
@@ -43,6 +43,8 @@ This starts:
|
||||
|
||||
`pnpm dev` and `pnpm dev:once` are now idempotent for the current repo and instance: if the matching Paperclip dev runner is already alive, Paperclip reports the existing process instead of starting a duplicate.
|
||||
|
||||
Issue execution may also use project execution workspace policies and workspace runtime services for per-project worktrees, preview servers, and managed dev commands. Configure those through the project workspace/runtime surfaces rather than starting long-running unmanaged processes when a task needs a reusable service.
|
||||
|
||||
## Storybook
|
||||
|
||||
The board UI Storybook keeps stories and Storybook config under `ui/storybook/` so component review files stay out of the app source routes.
|
||||
@@ -113,6 +115,8 @@ pnpm test:release-smoke
|
||||
|
||||
These browser suites are intended for targeted local verification and CI, not the default agent/human test command.
|
||||
|
||||
For normal issue work, start with the smallest targeted check that proves the change. Reserve repo-wide typecheck/build/test runs for PR-ready handoff or changes broad enough that narrow checks do not cover the risk.
|
||||
|
||||
## One-Command Local Run
|
||||
|
||||
For a first-time local install, you can bootstrap and run in one command:
|
||||
@@ -194,6 +198,8 @@ For `codex_local`, Paperclip also manages a per-company Codex home under the ins
|
||||
|
||||
If the `codex` CLI is not installed or not on `PATH`, `codex_local` agent runs fail at execution time with a clear adapter error. Quota polling uses a short-lived `codex app-server` subprocess: when `codex` cannot be spawned, that provider reports `ok: false` in aggregated quota results and the API server keeps running (it must not exit on a missing binary).
|
||||
|
||||
Local adapters require their corresponding CLI/session setup on the machine running Paperclip. External adapters are installed through the adapter/plugin flow and should not require hardcoded imports in `server/` or `ui/`.
|
||||
|
||||
## Worktree-local Instances
|
||||
|
||||
When developing from multiple git worktrees, do not point two Paperclip servers at the same embedded PostgreSQL data directory.
|
||||
@@ -415,7 +421,9 @@ If you set `DATABASE_URL`, the server will use that instead of embedded PostgreS
|
||||
|
||||
## Automatic DB Backups
|
||||
|
||||
Paperclip can run automatic DB backups on a timer. Defaults:
|
||||
Paperclip can run automatic logical database backups on a timer. These backups cover
|
||||
non-system database schemas, including migration history and plugin-owned database
|
||||
schemas. Defaults:
|
||||
|
||||
- enabled
|
||||
- every 60 minutes
|
||||
@@ -443,6 +451,10 @@ Environment overrides:
|
||||
- `PAPERCLIP_DB_BACKUP_RETENTION_DAYS=<days>`
|
||||
- `PAPERCLIP_DB_BACKUP_DIR=/absolute/or/~/path`
|
||||
|
||||
DB backups are not full instance filesystem backups. For full local disaster
|
||||
recovery, also back up local storage files and the local encrypted secrets key if
|
||||
those providers are enabled.
|
||||
|
||||
## Secrets in Dev
|
||||
|
||||
Agent env vars now support secret references. By default, secret values are stored with local encryption and only secret refs are persisted in agent config.
|
||||
|
||||
15
doc/GOAL.md
15
doc/GOAL.md
@@ -23,7 +23,7 @@ Paperclip is the command, communication, and control plane for a company of AI a
|
||||
- **Track work in real time** — see at any moment what every agent is working on
|
||||
- **Control costs** — token salary budgets per agent, spend tracking, burn rate
|
||||
- **Align to goals** — agents see how their work serves the bigger mission
|
||||
- **Store company knowledge** — a shared brain for the organization
|
||||
- **Preserve work context** — comments, documents, work products, attachments, and company state stay attached to the work
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -36,17 +36,20 @@ The central nervous system. Manages:
|
||||
- Agent registry and org chart
|
||||
- Task assignment and status
|
||||
- Budget and token spend tracking
|
||||
- Company knowledge base
|
||||
- Issue comments, documents, work products, attachments, and company state
|
||||
- Goal hierarchy (company → team → agent → task)
|
||||
- Heartbeat monitoring — know when agents are alive, idle, or stuck
|
||||
|
||||
It also enforces execution-control semantics such as single-assignee issues, atomic checkout and execution locks, blockers, recovery issues, and workspace/runtime controls.
|
||||
|
||||
### 2. Execution Services (adapters)
|
||||
|
||||
Agents run externally and report into the control plane. An agent is just Python code that gets kicked off and does work. Adapters connect different execution environments:
|
||||
Agents run externally and report into the control plane. Adapters connect different execution environments and define how a heartbeat is invoked, observed, and cancelled:
|
||||
|
||||
- **OpenClaw** — initial adapter target
|
||||
- **Heartbeat loop** — simple custom Python that loops, checks in, does work
|
||||
- **Others** — any runtime that can call an API
|
||||
- **Local CLI/session adapters** — built-in adapters for tools such as Claude Code, Codex, Gemini, OpenCode, Pi, and Cursor
|
||||
- **HTTP/process-style adapters** — command or webhook/API integrations for custom runtimes
|
||||
- **OpenClaw gateway** — integration for OpenClaw-style remote agents
|
||||
- **External adapter plugins** — dynamically loaded adapters installed outside the core app
|
||||
|
||||
The control plane doesn't run agents. It orchestrates them. Agents run wherever they run and phone home.
|
||||
|
||||
|
||||
@@ -32,12 +32,14 @@ Then you define who reports to the CEO: a CTO managing programmers, a CMO managi
|
||||
|
||||
### Agent Execution
|
||||
|
||||
There are two fundamental modes for running an agent's heartbeat:
|
||||
Paperclip supports several ways to run an agent's heartbeat:
|
||||
|
||||
1. **Run a command** — Paperclip kicks off a process (shell command, Python script, etc.) and tracks it. The heartbeat is "execute this and monitor it."
|
||||
2. **Fire and forget a request** — Paperclip sends a webhook/API call to an externally running agent. The heartbeat is "notify this agent to wake up." (OpenClaw hooks work this way.)
|
||||
1. **Local CLI/session adapters** — Paperclip starts or resumes local coding-tool sessions such as Claude Code, Codex, Gemini, OpenCode, Pi, and Cursor, then tracks the run.
|
||||
2. **Run a command** — Paperclip kicks off a process (shell command, Python script, etc.) and tracks it. The heartbeat is "execute this and monitor it."
|
||||
3. **Fire and forget a request** — Paperclip sends a webhook/API call to an externally running agent. The heartbeat is "notify this agent to wake up." OpenClaw-style hooks work this way.
|
||||
4. **External adapter plugins** — Paperclip loads adapter packages through the plugin/adapter flow so self-hosted installs can add runtimes without hardcoding them in core.
|
||||
|
||||
We provide sensible defaults — a default agent that shells out to Claude Code or Codex with your configuration, remembers session IDs, runs basic scripts. But you can plug in anything.
|
||||
Agent runs can use project and execution workspaces, managed runtime services such as preview/dev servers, adapter-specific session state, and HTTP/webhook-style execution. We provide sensible defaults, but the adapter is still the boundary: if a runtime can be invoked, observed, and authorized, Paperclip can coordinate it.
|
||||
|
||||
### Task Management
|
||||
|
||||
@@ -54,7 +56,7 @@ I am researching the Facebook ads Granola uses (current task)
|
||||
|
||||
Tasks have parentage. Every task exists in service of a parent task, all the way up to the company goal. This is what keeps autonomous agents aligned — they can always answer "why am I doing this?"
|
||||
|
||||
More detailed task structure TBD.
|
||||
The current issue model includes stable issue identifiers, parent/sub-issues, blockers, a single assignee, comments, issue documents, attachments and work products, and review/approval handoffs. That structure keeps work inspectable by both the board and agents while still allowing agents to decompose work into smaller tasks.
|
||||
|
||||
## Principles
|
||||
|
||||
@@ -115,7 +117,7 @@ Paperclip’s core identity is a **control plane for autonomous AI companies**,
|
||||
|
||||
- Do not make the core product a general chat app. The current product definition is explicitly task/comment-centric and “not a chatbot,” and that boundary is valuable.
|
||||
- Do not build a complete Jira/GitHub replacement. The repo/docs already position Paperclip as organization orchestration, not focused on pull-request review.
|
||||
- Do not build enterprise-grade RBAC first. The current V1 spec still treats multi-board governance and fine-grained human permissions as out of scope, so the first multi-user version should be coarse and company-scoped.
|
||||
- Do not build enterprise-grade RBAC first. Paperclip now has authenticated mode, company memberships, instance roles, and permission grants, but fine-grained enterprise governance should remain secondary to the core company control plane.
|
||||
- Do not lead with raw bash logs and transcripts. Default view should be human-readable intent/progress, with raw detail beneath.
|
||||
- Do not force users to understand provider/API-key plumbing unless absolutely necessary. There are active onboarding/auth issues already; friction here is clearly real.
|
||||
|
||||
@@ -136,11 +138,14 @@ Paperclip’s core identity is a **control plane for autonomous AI companies**,
|
||||
5. **Output-first**
|
||||
Work is not done until the user can see the result: file, document, preview link, screenshot, plan, or PR.
|
||||
|
||||
6. **Local-first, cloud-ready**
|
||||
6. **Execution visibility without log worship**
|
||||
Active runs, recovery issues, productivity review states, blockers, and work products should be first-class surfaces. Raw transcripts are available when needed, but they are not the primary product surface.
|
||||
|
||||
7. **Local-first, cloud-ready**
|
||||
The mental model should not change between local solo use and shared/private or public/cloud deployment.
|
||||
|
||||
7. **Safe autonomy**
|
||||
8. **Safe autonomy**
|
||||
Auto mode is allowed; hidden token burn is not.
|
||||
|
||||
8. **Thin core, rich edges**
|
||||
9. **Thin core, rich edges**
|
||||
Put optional chat, knowledge, and special surfaces into plugins/extensions rather than bloating the control plane.
|
||||
|
||||
@@ -143,6 +143,13 @@ This keeps the default install path unchanged while allowing explicit installs w
|
||||
npx paperclipai@canary onboard
|
||||
```
|
||||
|
||||
The release script now verifies two things after a canary publish:
|
||||
|
||||
- the `canary` dist-tag resolves to the version that was just published
|
||||
- every published internal `@paperclipai/*` dependency referenced by that manifest exists on npm
|
||||
|
||||
It also treats `latest -> canary` as a failure by default, because npm metadata can otherwise leave the default install path pointing at an unreleased canary dependency graph. Only pass `./scripts/release.sh canary --allow-canary-latest` when that `latest` behavior is explicitly intended.
|
||||
|
||||
### Stable
|
||||
|
||||
Stable publishes use the npm dist-tag `latest`.
|
||||
|
||||
@@ -63,6 +63,8 @@ It:
|
||||
- verifies the pushed commit
|
||||
- computes the canary version for the current UTC date
|
||||
- publishes under npm dist-tag `canary`
|
||||
- verifies that `canary` resolves to the just-published version and that published internal dependencies exist on npm
|
||||
- fails by default if npm leaves `latest` pointing at a canary; use `--allow-canary-latest` only when that state is intentional
|
||||
- creates a git tag `canary/vYYYY.MDD.P-canary.N`
|
||||
|
||||
Users install canaries with:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Paperclip V1 Implementation Spec
|
||||
|
||||
Status: Implementation contract for first release (V1)
|
||||
Date: 2026-02-17
|
||||
Date: 2026-04-28
|
||||
Audience: Product, engineering, and agent-integration authors
|
||||
Source inputs: `GOAL.md`, `PRODUCT.md`, `SPEC.md`, `DATABASE.md`, current monorepo code
|
||||
|
||||
@@ -37,8 +37,9 @@ These decisions close open questions from `SPEC.md` for V1.
|
||||
| Visibility | Full visibility to board and all agents in same company |
|
||||
| Communication | Tasks + comments only (no separate chat system) |
|
||||
| Task ownership | Single assignee; atomic checkout required for `in_progress` transition |
|
||||
| Recovery | No automatic reassignment; work recovery stays manual/explicit |
|
||||
| Agent adapters | Built-in `process` and `http` adapters |
|
||||
| Recovery | Liveness/watchdog recovery preserves explicit ownership: retry lost execution continuity where safe, otherwise create visible recovery issues or require human escalation (see `doc/execution-semantics.md`) |
|
||||
| Agent adapters | Built-in `process`, `http`, local CLI/session adapters, and OpenClaw gateway support; external adapters can also be loaded through the adapter plugin flow |
|
||||
| Plugin framework | Local/self-hosted early plugin runtime is in scope; cloud marketplace and packaged public distribution remain out of scope |
|
||||
| Auth | Mode-dependent human auth (`local_trusted` implicit board in current code; authenticated mode uses sessions), API keys for agents |
|
||||
| Budget period | Monthly UTC calendar window |
|
||||
| Budget enforcement | Soft alerts + hard limit auto-pause |
|
||||
@@ -73,7 +74,7 @@ V1 implementation extends this baseline into a company-centric, governance-aware
|
||||
|
||||
## 5.2 Out of Scope (V1)
|
||||
|
||||
- Plugin framework and third-party extension SDK
|
||||
- Cloud-grade plugin marketplace/distribution beyond the local/self-hosted plugin runtime
|
||||
- Revenue/expense accounting beyond model/token costs
|
||||
- Knowledge base subsystem
|
||||
- Public marketplace (ClipHub)
|
||||
@@ -123,6 +124,16 @@ Human auth tables (`users`, `sessions`, and provider-specific auth artifacts) ar
|
||||
- `name` text not null
|
||||
- `description` text null
|
||||
- `status` enum: `active | paused | archived`
|
||||
- `pause_reason` text null
|
||||
- `paused_at` timestamptz null
|
||||
- `issue_prefix` text not null
|
||||
- `issue_counter` int not null
|
||||
- `budget_monthly_cents` int not null default 0
|
||||
- `spent_monthly_cents` int not null default 0
|
||||
- `attachment_max_bytes` int not null
|
||||
- `require_board_approval_for_new_agents` boolean not null default false
|
||||
- feedback sharing consent fields
|
||||
- branding fields such as `brand_color`
|
||||
|
||||
Invariant: every business record belongs to exactly one company.
|
||||
|
||||
@@ -133,15 +144,21 @@ Invariant: every business record belongs to exactly one company.
|
||||
- `name` text not null
|
||||
- `role` text not null
|
||||
- `title` text null
|
||||
- `status` enum: `active | paused | idle | running | error | terminated`
|
||||
- `icon` text null
|
||||
- `status` enum: `active | paused | idle | running | error | pending_approval | terminated`
|
||||
- `reports_to` uuid fk `agents.id` null
|
||||
- `capabilities` text null
|
||||
- `adapter_type` enum: `process | http`
|
||||
- `adapter_type` text; built-ins include `process`, `http`, `claude_local`, `codex_local`, `gemini_local`, `opencode_local`, `pi_local`, `cursor`, and `openclaw_gateway`
|
||||
- `adapter_config` jsonb not null
|
||||
- `runtime_config` jsonb not null default `{}`
|
||||
- `default_environment_id` uuid fk `environments.id` null
|
||||
- `context_mode` enum: `thin | fat` default `thin`
|
||||
- `budget_monthly_cents` int not null default 0
|
||||
- `spent_monthly_cents` int not null default 0
|
||||
- pause fields: `pause_reason`, `paused_at`
|
||||
- `permissions` jsonb not null default `{}`
|
||||
- `last_heartbeat_at` timestamptz null
|
||||
- `metadata` jsonb null
|
||||
|
||||
Invariants:
|
||||
|
||||
@@ -195,6 +212,7 @@ Invariant:
|
||||
- `id` uuid pk
|
||||
- `company_id` uuid fk not null
|
||||
- `project_id` uuid fk `projects.id` null
|
||||
- `project_workspace_id` uuid fk `project_workspaces.id` null
|
||||
- `goal_id` uuid fk `goals.id` null
|
||||
- `parent_id` uuid fk `issues.id` null
|
||||
- `title` text not null
|
||||
@@ -202,13 +220,22 @@ Invariant:
|
||||
- `status` enum: `backlog | todo | in_progress | in_review | done | blocked | cancelled`
|
||||
- `priority` enum: `critical | high | medium | low`
|
||||
- `assignee_agent_id` uuid fk `agents.id` null
|
||||
- `assignee_user_id` text null
|
||||
- checkout/execution locks: `checkout_run_id`, `execution_run_id`, `execution_agent_name_key`, `execution_locked_at`
|
||||
- `created_by_agent_id` uuid fk `agents.id` null
|
||||
- `created_by_user_id` uuid fk `users.id` null
|
||||
- identifier fields: `issue_number`, `identifier`
|
||||
- origin fields: `origin_kind`, `origin_id`, `origin_run_id`, `origin_fingerprint`
|
||||
- `request_depth` int not null default 0
|
||||
- `billing_code` text null
|
||||
- `assignee_adapter_overrides` jsonb null
|
||||
- `execution_policy` jsonb null
|
||||
- `execution_state` jsonb null
|
||||
- execution workspace fields: `execution_workspace_id`, `execution_workspace_preference`, `execution_workspace_settings`
|
||||
- `started_at` timestamptz null
|
||||
- `completed_at` timestamptz null
|
||||
- `cancelled_at` timestamptz null
|
||||
- `hidden_at` timestamptz null
|
||||
|
||||
Invariants:
|
||||
|
||||
@@ -261,10 +288,10 @@ Invariant: each event must attach to agent and company; rollups are aggregation,
|
||||
|
||||
- `id` uuid pk
|
||||
- `company_id` uuid fk not null
|
||||
- `type` enum: `hire_agent | approve_ceo_strategy`
|
||||
- `type` enum: `hire_agent | approve_ceo_strategy | budget_override_required | request_board_approval`
|
||||
- `requested_by_agent_id` uuid fk `agents.id` null
|
||||
- `requested_by_user_id` uuid fk `users.id` null
|
||||
- `status` enum: `pending | approved | rejected | cancelled`
|
||||
- `status` enum: `pending | revision_requested | approved | rejected | cancelled`
|
||||
- `payload` jsonb not null
|
||||
- `decision_note` text null
|
||||
- `decided_by_user_id` uuid fk `users.id` null
|
||||
@@ -363,6 +390,15 @@ Operational policy:
|
||||
- `document_id` uuid fk not null
|
||||
- `key` text not null (`plan`, `design`, `notes`, etc.)
|
||||
|
||||
## 7.16 Current Implementation Addenda
|
||||
|
||||
The current implementation includes additional V1-control-plane tables beyond the original February snapshot:
|
||||
|
||||
- Issue structure and review: `issue_relations` for blockers, `labels`/`issue_labels`, `issue_thread_interactions`, `issue_approvals`, `issue_execution_decisions`, `issue_work_products`, `issue_inbox_archives`, `issue_read_states`, and issue reference mention indexes.
|
||||
- Execution and workspace control: `execution_workspaces`, `project_workspaces`, `workspace_runtime_services`, `workspace_operations`, `environments`, `environment_leases`, `agent_task_sessions`, `agent_runtime_state`, `agent_wakeup_requests`, heartbeat events, and watchdog decision tables.
|
||||
- Plugins and routines: `plugins`, plugin config/state/entities/jobs/logs/webhooks, plugin database namespaces/migrations, plugin company settings, and `routines`.
|
||||
- Access and operations: company memberships, instance roles, principal permission grants, invites, join requests, board API keys, CLI auth challenges, budget policies/incidents, feedback exports/votes, company skills, sidebar preferences, and company logos.
|
||||
|
||||
## 8. State Machines
|
||||
|
||||
## 8.1 Agent Status
|
||||
@@ -395,7 +431,14 @@ Side effects:
|
||||
- entering `done` sets `completed_at`
|
||||
- entering `cancelled` sets `cancelled_at`
|
||||
|
||||
Detailed ownership, execution, blocker, and crash-recovery semantics are documented in `doc/execution-semantics.md`.
|
||||
V1 non-terminal liveness rule:
|
||||
|
||||
- agent-owned `todo`, `in_progress`, `in_review`, and `blocked` issues must have a live execution path, an explicit waiting path, or an explicit recovery path
|
||||
- `in_review` is healthy only when a typed execution participant, pending issue-thread interaction or approval, user owner, active run, queued wake, or explicit recovery issue owns the next action
|
||||
- a blocked chain is covered only when each unresolved leaf issue is live or explicitly waiting
|
||||
- when Paperclip cannot safely infer the next action, it surfaces the problem through visible blocked/recovery work instead of silently completing or reassigning work
|
||||
|
||||
Detailed ownership, execution, blocker, active-run watchdog, crash-recovery, and non-terminal liveness semantics are documented in `doc/execution-semantics.md`.
|
||||
|
||||
## 8.3 Approval Status
|
||||
|
||||
@@ -556,6 +599,17 @@ Dashboard payload must include:
|
||||
- `422` semantic rule violation
|
||||
- `500` server error
|
||||
|
||||
## 10.10 Current Implementation API Addenda
|
||||
|
||||
The current app also exposes V1-supporting surfaces for:
|
||||
|
||||
- issue thread interactions (`suggest_tasks`, `ask_user_questions`, `request_confirmation`)
|
||||
- issue approvals, issue references/search, labels, read state, inbox/archive state, and work products
|
||||
- execution workspaces, project workspaces, workspace runtime services, and workspace operations
|
||||
- routines and scheduled/API/webhook triggers
|
||||
- plugin installation, configuration, state, jobs, logs, webhooks, and plugin database namespace migration
|
||||
- company import/export preview/apply, feedback export/vote routes, instance backup/config routes, invites, join requests, memberships, and permission grants
|
||||
|
||||
## 11. Heartbeat and Adapter Contract
|
||||
|
||||
## 11.1 Adapter Interface
|
||||
@@ -731,13 +785,14 @@ Required UX behaviors:
|
||||
|
||||
- Node 20+
|
||||
- `DATABASE_URL` optional
|
||||
- if unset, auto-use PGlite and push schema
|
||||
- if unset, auto-use embedded PostgreSQL under `~/.paperclip/instances/default/db`
|
||||
|
||||
## 15.2 Migrations
|
||||
|
||||
- Drizzle migrations are source of truth
|
||||
- local/dev startup applies pending migrations automatically where supported
|
||||
- `pnpm db:migrate` applies pending migrations manually
|
||||
- no destructive migration in-place for V1 upgrade path
|
||||
- provide migration script from existing minimal tables to company-scoped schema
|
||||
|
||||
## 15.3 Logging and Audit
|
||||
|
||||
@@ -792,6 +847,8 @@ A release candidate is blocked unless these pass:
|
||||
|
||||
## 18. Delivery Plan
|
||||
|
||||
Current implementation note: the milestones below describe the original V1 sequencing. Several systems originally framed as future work have since shipped or advanced materially, including issue documents/interactions, blockers, routines, execution workspaces, import/export portability, authenticated deployment modes, multi-user basics, and the local/self-hosted plugin runtime.
|
||||
|
||||
## Milestone 1: Company Core and Auth
|
||||
|
||||
- add `companies` and company scoping to existing entities
|
||||
@@ -844,7 +901,7 @@ V1 is complete only when all criteria are true:
|
||||
|
||||
## 20. Post-V1 Backlog (Explicitly Deferred)
|
||||
|
||||
- plugin architecture
|
||||
- cloud-grade plugin marketplace/distribution
|
||||
- richer workflow-state customization per team
|
||||
- milestones/labels/dependency graph depth beyond V1 minimum
|
||||
- realtime transport optimization (SSE/WebSockets)
|
||||
|
||||
BIN
doc/assets/pap-2189/desktop-1440x900-dark.png
Normal file
BIN
doc/assets/pap-2189/desktop-1440x900-dark.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 174 KiB |
BIN
doc/assets/pap-2189/desktop-1440x900-light.png
Normal file
BIN
doc/assets/pap-2189/desktop-1440x900-light.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 174 KiB |
BIN
doc/assets/pap-2189/mobile-390x844-dark.png
Normal file
BIN
doc/assets/pap-2189/mobile-390x844-dark.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 177 KiB |
BIN
doc/assets/pap-2189/mobile-390x844-light.png
Normal file
BIN
doc/assets/pap-2189/mobile-390x844-light.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 177 KiB |
@@ -1,7 +1,7 @@
|
||||
# Execution Semantics
|
||||
|
||||
Status: Current implementation guide
|
||||
Date: 2026-04-13
|
||||
Date: 2026-04-26
|
||||
Audience: Product and engineering
|
||||
|
||||
This document explains how Paperclip interprets issue assignment, issue status, execution runs, wakeups, parent/sub-issue structure, and blocker relationships.
|
||||
@@ -150,11 +150,23 @@ Blocked issues should stay idle while blockers remain unresolved. Paperclip shou
|
||||
|
||||
If a parent is truly waiting on a child, model that with blockers. Do not rely on the parent/child relationship alone.
|
||||
|
||||
## 7. Consistent Execution Path Rules
|
||||
## 7. Non-Terminal Issue Liveness Contract
|
||||
|
||||
For agent-assigned, non-terminal, actionable issues, Paperclip should not leave work in a state where nobody is working it and nothing will wake it.
|
||||
For agent-owned, non-terminal issues, Paperclip should never leave work in a state where nobody is responsible for the next move and nothing will wake or surface it.
|
||||
|
||||
The relevant execution path depends on status.
|
||||
This is a visibility contract, not an auto-completion contract. If Paperclip cannot safely infer the next action, it should surface the ambiguity with a blocked state, a visible comment, or an explicit recovery issue. It must not silently mark work done from prose comments or guess that a dependency is complete.
|
||||
|
||||
An issue is healthy when the product can answer "what moves this forward next?" without requiring a human to reconstruct intent from the whole thread. An issue is stalled when it is non-terminal but has no live execution path, no explicit waiting path, and no recovery path.
|
||||
|
||||
The valid action-path primitives are:
|
||||
|
||||
- an active run linked to the issue
|
||||
- a queued wake or continuation that can be delivered to the responsible agent
|
||||
- a typed execution-policy participant, such as `executionState.currentParticipant`
|
||||
- a pending issue-thread interaction or linked approval that is waiting for a specific responder
|
||||
- a human owner via `assigneeUserId`
|
||||
- a first-class blocker chain whose unresolved leaf issues are themselves healthy
|
||||
- an open explicit recovery issue that names the owner and action needed to restore liveness
|
||||
|
||||
### Agent-assigned `todo`
|
||||
|
||||
@@ -162,9 +174,11 @@ This is dispatch state: ready to start, not yet actively claimed.
|
||||
|
||||
A healthy dispatch state means at least one of these is true:
|
||||
|
||||
- the issue already has a queued/running wake path
|
||||
- the issue is intentionally resting in `todo` after a successful agent heartbeat, not after an interrupted dispatch
|
||||
- the issue has been explicitly surfaced as stranded
|
||||
- the issue already has a queued wake path
|
||||
- the issue is intentionally resting in `todo` after a completed agent heartbeat, with no interrupted dispatch evidence
|
||||
- the issue has been explicitly surfaced as stranded through a visible blocked/recovery path
|
||||
|
||||
An assigned `todo` issue is stalled when dispatch was interrupted, no wake remains queued or running, and no recovery path has been opened.
|
||||
|
||||
### Agent-assigned `in_progress`
|
||||
|
||||
@@ -174,7 +188,39 @@ A healthy active-work state means at least one of these is true:
|
||||
|
||||
- there is an active run for the issue
|
||||
- there is already a queued continuation wake
|
||||
- the issue has been explicitly surfaced as stranded
|
||||
- there is an open explicit recovery issue for the lost execution path
|
||||
|
||||
An agent-owned `in_progress` issue is stalled when it has no active run, no queued continuation, and no explicit recovery surface. A still-running but silent process is not automatically stalled; it is handled by the active-run watchdog contract.
|
||||
|
||||
### `in_review`
|
||||
|
||||
This is review/approval state: execution is paused because the next move belongs to a reviewer, approver, board user, or recovery owner.
|
||||
|
||||
A healthy `in_review` issue has at least one valid action path:
|
||||
|
||||
- a typed execution-policy participant who can approve or request changes
|
||||
- a pending issue-thread interaction or linked approval waiting for a named responder
|
||||
- a human owner via `assigneeUserId`
|
||||
- an active run or queued wake that is expected to process the review state
|
||||
- an open explicit recovery issue for an ambiguous review handoff
|
||||
|
||||
Agent-assigned `in_review` with no typed participant is only healthy when one of the other paths exists. Assignment to the same agent that produced the handoff is not, by itself, a review path.
|
||||
|
||||
An `in_review` issue is stalled when it has no typed participant, no pending interaction or approval, no user owner, no active run, no queued wake, and no explicit recovery issue. Paperclip should surface that state as recovery work rather than silently completing the issue or leaving blocker chains parked indefinitely.
|
||||
|
||||
### `blocked`
|
||||
|
||||
This is explicit waiting state.
|
||||
|
||||
A healthy `blocked` issue has an explicit waiting path:
|
||||
|
||||
- first-class blockers exist, and each unresolved leaf has a valid action path under this contract
|
||||
- the issue is blocked on an explicit recovery issue that itself has a live or waiting path
|
||||
- the issue is waiting on a pending interaction, linked approval, human owner, or clearly named external owner/action
|
||||
|
||||
A blocker chain is covered only when its unresolved leaf is live or explicitly waiting. An intermediate `blocked` issue does not make the chain healthy by itself.
|
||||
|
||||
A `blocked` issue is stalled when the unresolved blocker leaf has no active run, queued wake, typed participant, pending interaction or approval, user owner, external owner/action, or recovery issue. In that case the parent should show the first stalled leaf instead of presenting the dependency as calmly covered.
|
||||
|
||||
## 8. Crash and Restart Recovery
|
||||
|
||||
@@ -218,15 +264,83 @@ This is an active-work continuity recovery.
|
||||
|
||||
Startup recovery and periodic recovery are different from normal wakeup delivery.
|
||||
|
||||
On startup and on the periodic recovery loop, Paperclip now does three things in sequence:
|
||||
On startup and on the periodic recovery loop, Paperclip now does four things in sequence:
|
||||
|
||||
1. reap orphaned `running` runs
|
||||
2. resume persisted `queued` runs
|
||||
3. reconcile stranded assigned work
|
||||
4. scan silent active runs and create or update explicit watchdog review issues
|
||||
|
||||
That last step is what closes the gap where issue state survives a crash but the wake/run path does not.
|
||||
The stranded-work pass closes the gap where issue state survives a crash but the wake/run path does not. The silent-run scan covers the separate case where a live process exists but has stopped producing observable output.
|
||||
|
||||
## 10. What This Does Not Mean
|
||||
## 10. Silent Active-Run Watchdog
|
||||
|
||||
An active run can still be unhealthy even when its process is `running`. Paperclip treats prolonged output silence as a watchdog signal, not as proof that the run is failed.
|
||||
|
||||
The recovery service owns this contract:
|
||||
|
||||
- classify active-run output silence as `ok`, `suspicious`, `critical`, `snoozed`, or `not_applicable`
|
||||
- collect bounded evidence from run logs, recent run events, child issues, and blockers
|
||||
- preserve redaction and truncation before evidence is written to issue descriptions
|
||||
- create at most one open `stale_active_run_evaluation` issue per run
|
||||
- honor active snooze decisions before creating more review work
|
||||
- build the `outputSilence` summary shown by live-run and active-run API responses
|
||||
|
||||
Suspicious silence creates a medium-priority review issue for the selected recovery owner. Critical silence raises that review issue to high priority and blocks the source issue on the explicit evaluation task without cancelling the active process.
|
||||
|
||||
Watchdog decisions are explicit operator/recovery-owner decisions:
|
||||
|
||||
- `snooze` records an operator-chosen future quiet-until time and suppresses scan-created review work during that window
|
||||
- `continue` records that the current evidence is acceptable, does not cancel or mutate the active run, and sets a 30-minute default re-arm window before the watchdog evaluates the still-silent run again
|
||||
- `dismissed_false_positive` records why the review was not actionable
|
||||
|
||||
Operators should prefer `snooze` for known time-bounded quiet periods. `continue` is only a short acknowledgement of the current evidence; if the run remains silent after the re-arm window, the periodic watchdog scan can create or update review work again.
|
||||
|
||||
The board can record watchdog decisions. The assigned owner of the watchdog evaluation issue can also record them. Other agents cannot.
|
||||
|
||||
## 11. Auto-Recover vs Explicit Recovery vs Human Escalation
|
||||
|
||||
Paperclip uses three different recovery outcomes, depending on how much it can safely infer.
|
||||
|
||||
### Auto-Recover
|
||||
|
||||
Auto-recovery is allowed when ownership is clear and the control plane only lost execution continuity.
|
||||
|
||||
Examples:
|
||||
|
||||
- requeue one dispatch wake for an assigned `todo` issue whose latest run failed, timed out, or was cancelled
|
||||
- requeue one continuation wake for an assigned `in_progress` issue whose live execution path disappeared
|
||||
- assign an orphan blocker back to its creator when that blocker is already preventing other work
|
||||
|
||||
Auto-recovery preserves the existing owner. It does not choose a replacement agent.
|
||||
|
||||
### Explicit Recovery Issue
|
||||
|
||||
Paperclip creates an explicit recovery issue when the system can identify a problem but cannot safely complete the work itself.
|
||||
|
||||
Examples:
|
||||
|
||||
- automatic stranded-work retry was already exhausted
|
||||
- a dependency graph has an invalid/uninvokable owner, unassigned blocker, or invalid review participant
|
||||
- an active run is silent past the watchdog threshold
|
||||
|
||||
The source issue remains visible and blocked on the recovery issue when blocking is necessary for correctness. The recovery owner must restore a live path, resolve the source issue manually, or record the reason it is a false positive.
|
||||
|
||||
Instance-level issue-graph liveness auto-recovery is disabled by default. When enabled, its lookback window means "dependency paths updated within the last N hours"; older findings remain advisory and are counted as outside the configured lookback instead of creating recovery issues automatically. This is an operator noise control, not the older staleness delay for determining whether a chain is old enough to surface.
|
||||
|
||||
### Human Escalation
|
||||
|
||||
Human escalation is required when the next safe action depends on board judgment, budget/approval policy, or information unavailable to the control plane.
|
||||
|
||||
Examples:
|
||||
|
||||
- all candidate recovery owners are paused, terminated, pending approval, or budget-blocked
|
||||
- the issue is human-owned rather than agent-owned
|
||||
- the run is intentionally quiet but needs an operator decision before cancellation or continuation
|
||||
|
||||
In these cases Paperclip should leave a visible issue/comment trail instead of silently retrying.
|
||||
|
||||
## 12. What This Does Not Mean
|
||||
|
||||
These semantics do not change V1 into an auto-reassignment system.
|
||||
|
||||
@@ -240,9 +354,10 @@ The recovery model is intentionally conservative:
|
||||
|
||||
- preserve ownership
|
||||
- retry once when the control plane lost execution continuity
|
||||
- create explicit recovery work when the system can identify a bounded recovery owner/action
|
||||
- escalate visibly when the system cannot safely keep going
|
||||
|
||||
## 11. Practical Interpretation
|
||||
## 13. Practical Interpretation
|
||||
|
||||
For a board operator, the intended meaning is:
|
||||
|
||||
|
||||
30
docker/.env.aws.example
Normal file
30
docker/.env.aws.example
Normal file
@@ -0,0 +1,30 @@
|
||||
# AWS ECS Fargate deployment environment
|
||||
# Copy to .env.aws and fill in values before deploying
|
||||
#
|
||||
# Secrets (DATABASE_URL, BETTER_AUTH_SECRET, ANTHROPIC_API_KEY, OPENAI_API_KEY,
|
||||
# GITHUB_TOKEN) are injected via AWS Secrets Manager — do NOT set them here.
|
||||
|
||||
# Deployment mode
|
||||
PAPERCLIP_DEPLOYMENT_MODE=authenticated
|
||||
PAPERCLIP_DEPLOYMENT_EXPOSURE=public
|
||||
PAPERCLIP_PUBLIC_URL=https://paperclip.example.com
|
||||
|
||||
# Server
|
||||
HOST=0.0.0.0
|
||||
PORT=3100
|
||||
NODE_ENV=production
|
||||
SERVE_UI=true
|
||||
|
||||
# Paperclip paths
|
||||
PAPERCLIP_HOME=/paperclip
|
||||
PAPERCLIP_INSTANCE_ID=default
|
||||
PAPERCLIP_CONFIG=/paperclip/instances/default/config.json
|
||||
|
||||
# Auto-apply migrations on startup
|
||||
PAPERCLIP_MIGRATION_AUTO_APPLY=true
|
||||
|
||||
# Enable heartbeat scheduler for remote agents
|
||||
HEARTBEAT_SCHEDULER_ENABLED=true
|
||||
|
||||
# Post-deploy hardening (uncomment after first user signs up)
|
||||
# PAPERCLIP_AUTH_DISABLE_SIGN_UP=true
|
||||
90
docker/ecs-task-definition.json
Normal file
90
docker/ecs-task-definition.json
Normal file
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"family": "paperclip-server",
|
||||
"networkMode": "awsvpc",
|
||||
"requiresCompatibilities": ["FARGATE"],
|
||||
"cpu": "2048",
|
||||
"memory": "4096",
|
||||
"executionRoleArn": "arn:aws:iam::<ACCOUNT_ID>:role/paperclip-ecs-execution",
|
||||
"taskRoleArn": "arn:aws:iam::<ACCOUNT_ID>:role/paperclip-ecs-task",
|
||||
"containerDefinitions": [
|
||||
{
|
||||
"name": "paperclip-server",
|
||||
"image": "<ACCOUNT_ID>.dkr.ecr.<REGION>.amazonaws.com/paperclip-server:latest",
|
||||
"essential": true,
|
||||
"portMappings": [
|
||||
{
|
||||
"containerPort": 3100,
|
||||
"protocol": "tcp"
|
||||
}
|
||||
],
|
||||
"environment": [
|
||||
{ "name": "NODE_ENV", "value": "production" },
|
||||
{ "name": "HOST", "value": "0.0.0.0" },
|
||||
{ "name": "PORT", "value": "3100" },
|
||||
{ "name": "SERVE_UI", "value": "true" },
|
||||
{ "name": "PAPERCLIP_HOME", "value": "/paperclip" },
|
||||
{ "name": "PAPERCLIP_INSTANCE_ID", "value": "default" },
|
||||
{ "name": "PAPERCLIP_CONFIG", "value": "/paperclip/instances/default/config.json" },
|
||||
{ "name": "PAPERCLIP_DEPLOYMENT_MODE", "value": "authenticated" },
|
||||
{ "name": "PAPERCLIP_DEPLOYMENT_EXPOSURE", "value": "public" },
|
||||
{ "name": "PAPERCLIP_PUBLIC_URL", "value": "https://<DOMAIN>" },
|
||||
{ "name": "PAPERCLIP_MIGRATION_AUTO_APPLY", "value": "true" },
|
||||
{ "name": "HEARTBEAT_SCHEDULER_ENABLED", "value": "true" }
|
||||
],
|
||||
"secrets": [
|
||||
{
|
||||
"name": "DATABASE_URL",
|
||||
"valueFrom": "arn:aws:secretsmanager:<REGION>:<ACCOUNT_ID>:secret:paperclip/database-url"
|
||||
},
|
||||
{
|
||||
"name": "BETTER_AUTH_SECRET",
|
||||
"valueFrom": "arn:aws:secretsmanager:<REGION>:<ACCOUNT_ID>:secret:paperclip/better-auth-secret"
|
||||
},
|
||||
{
|
||||
"name": "ANTHROPIC_API_KEY",
|
||||
"valueFrom": "arn:aws:secretsmanager:<REGION>:<ACCOUNT_ID>:secret:paperclip/anthropic-api-key"
|
||||
},
|
||||
{
|
||||
"name": "OPENAI_API_KEY",
|
||||
"valueFrom": "arn:aws:secretsmanager:<REGION>:<ACCOUNT_ID>:secret:paperclip/openai-api-key"
|
||||
},
|
||||
{
|
||||
"name": "GITHUB_TOKEN",
|
||||
"valueFrom": "arn:aws:secretsmanager:<REGION>:<ACCOUNT_ID>:secret:paperclip/github-token"
|
||||
}
|
||||
],
|
||||
"mountPoints": [
|
||||
{
|
||||
"sourceVolume": "paperclip-data",
|
||||
"containerPath": "/paperclip",
|
||||
"readOnly": false
|
||||
}
|
||||
],
|
||||
"healthCheck": {
|
||||
"command": ["CMD-SHELL", "curl -f http://localhost:3100/api/health || exit 1"],
|
||||
"interval": 30,
|
||||
"timeout": 5,
|
||||
"retries": 3,
|
||||
"startPeriod": 60
|
||||
},
|
||||
"logConfiguration": {
|
||||
"logDriver": "awslogs",
|
||||
"options": {
|
||||
"awslogs-group": "/ecs/paperclip",
|
||||
"awslogs-region": "<REGION>",
|
||||
"awslogs-stream-prefix": "server"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "paperclip-data",
|
||||
"efsVolumeConfiguration": {
|
||||
"fileSystemId": "<EFS_ID>",
|
||||
"rootDirectory": "/",
|
||||
"transitEncryption": "ENABLED"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
580
docs/deploy/aws-ecs.md
Normal file
580
docs/deploy/aws-ecs.md
Normal file
@@ -0,0 +1,580 @@
|
||||
---
|
||||
title: AWS ECS Fargate
|
||||
summary: Deploy Paperclip to AWS using ECS Fargate, RDS Postgres, and EFS
|
||||
---
|
||||
|
||||
Deploy Paperclip to AWS with ECS Fargate (compute), RDS Postgres 17 (database), and EFS (persistent storage). This guide uses the AWS CLI and produces a single-task ECS service behind an ALB with HTTPS.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- AWS CLI v2 configured with a profile that has admin-level permissions
|
||||
- Docker installed locally (for building and pushing the image)
|
||||
- A registered domain with DNS you control (for the TLS certificate)
|
||||
- The Paperclip repo cloned locally
|
||||
|
||||
Set these shell variables for the rest of the guide:
|
||||
|
||||
```bash
|
||||
export AWS_REGION=us-east-1
|
||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
|
||||
export PAPERCLIP_DOMAIN=paperclip.example.com # your domain
|
||||
export DB_PASSWORD=$(openssl rand -base64 24 | tr -d '/+=' | head -c 32)
|
||||
export AUTH_SECRET=$(openssl rand -base64 32)
|
||||
```
|
||||
|
||||
## 1. Create ECR Repository
|
||||
|
||||
```bash
|
||||
aws ecr create-repository \
|
||||
--repository-name paperclip-server \
|
||||
--image-scanning-configuration scanOnPush=true \
|
||||
--region $AWS_REGION
|
||||
```
|
||||
|
||||
## 2. Build and Push Docker Image
|
||||
|
||||
```bash
|
||||
cd /path/to/paperclip
|
||||
|
||||
# Authenticate Docker to ECR
|
||||
aws ecr get-login-password --region $AWS_REGION \
|
||||
| docker login --username AWS --password-stdin \
|
||||
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
||||
|
||||
# Build
|
||||
docker build -t paperclip-server .
|
||||
|
||||
# Tag and push
|
||||
docker tag paperclip-server:latest \
|
||||
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/paperclip-server:latest
|
||||
|
||||
docker push \
|
||||
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/paperclip-server:latest
|
||||
```
|
||||
|
||||
## 3. Networking (VPC, Subnets, Security Groups)
|
||||
|
||||
Use the default VPC or create a dedicated one. The guide assumes the default VPC with public and private subnets in two AZs.
|
||||
|
||||
```bash
|
||||
# Get default VPC
|
||||
VPC_ID=$(aws ec2 describe-vpcs \
|
||||
--filters Name=isDefault,Values=true \
|
||||
--query 'Vpcs[0].VpcId' --output text)
|
||||
|
||||
# Get two public subnets (for ALB)
|
||||
SUBNET_IDS=$(aws ec2 describe-subnets \
|
||||
--filters Name=vpc-id,Values=$VPC_ID \
|
||||
--query 'Subnets[?MapPublicIpOnLaunch==`true`] | [0:2].SubnetId' \
|
||||
--output text)
|
||||
SUBNET_1=$(echo $SUBNET_IDS | awk '{print $1}')
|
||||
SUBNET_2=$(echo $SUBNET_IDS | awk '{print $2}')
|
||||
```
|
||||
|
||||
Create security groups:
|
||||
|
||||
```bash
|
||||
# ALB security group — inbound HTTPS
|
||||
ALB_SG=$(aws ec2 create-security-group \
|
||||
--group-name paperclip-alb \
|
||||
--description "Paperclip ALB" \
|
||||
--vpc-id $VPC_ID \
|
||||
--query 'GroupId' --output text)
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $ALB_SG \
|
||||
--protocol tcp --port 443 --cidr 0.0.0.0/0
|
||||
|
||||
# Also open port 80 so the ALB can accept HTTP and redirect to HTTPS
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $ALB_SG \
|
||||
--protocol tcp --port 80 --cidr 0.0.0.0/0
|
||||
|
||||
# ECS task security group — inbound from ALB only
|
||||
ECS_SG=$(aws ec2 create-security-group \
|
||||
--group-name paperclip-ecs \
|
||||
--description "Paperclip ECS tasks" \
|
||||
--vpc-id $VPC_ID \
|
||||
--query 'GroupId' --output text)
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $ECS_SG \
|
||||
--protocol tcp --port 3100 \
|
||||
--source-group $ALB_SG
|
||||
|
||||
# RDS security group — inbound from ECS only
|
||||
RDS_SG=$(aws ec2 create-security-group \
|
||||
--group-name paperclip-rds \
|
||||
--description "Paperclip RDS" \
|
||||
--vpc-id $VPC_ID \
|
||||
--query 'GroupId' --output text)
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $RDS_SG \
|
||||
--protocol tcp --port 5432 \
|
||||
--source-group $ECS_SG
|
||||
|
||||
# EFS security group — inbound NFS from ECS only
|
||||
EFS_SG=$(aws ec2 create-security-group \
|
||||
--group-name paperclip-efs \
|
||||
--description "Paperclip EFS" \
|
||||
--vpc-id $VPC_ID \
|
||||
--query 'GroupId' --output text)
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $EFS_SG \
|
||||
--protocol tcp --port 2049 \
|
||||
--source-group $ECS_SG
|
||||
```
|
||||
|
||||
## 4. Create RDS Postgres Instance
|
||||
|
||||
```bash
|
||||
# Custom VPCs don't come with a default DB subnet group — create one
|
||||
# that spans our two subnets so RDS can place the instance.
|
||||
aws rds create-db-subnet-group \
|
||||
--db-subnet-group-name paperclip-db-subnet \
|
||||
--db-subnet-group-description "Paperclip RDS subnets" \
|
||||
--subnet-ids $SUBNET_1 $SUBNET_2
|
||||
|
||||
aws rds create-db-instance \
|
||||
--db-instance-identifier paperclip-db \
|
||||
--db-instance-class db.t4g.micro \
|
||||
--engine postgres \
|
||||
--engine-version 17 \
|
||||
--master-username paperclip \
|
||||
--master-user-password "$DB_PASSWORD" \
|
||||
--allocated-storage 20 \
|
||||
--storage-type gp3 \
|
||||
--vpc-security-group-ids $RDS_SG \
|
||||
--db-subnet-group-name paperclip-db-subnet \
|
||||
--no-publicly-accessible \
|
||||
--backup-retention-period 7 \
|
||||
--no-multi-az \
|
||||
--db-name paperclip \
|
||||
--region $AWS_REGION
|
||||
|
||||
# Wait for it to become available (takes 5-10 min)
|
||||
aws rds wait db-instance-available \
|
||||
--db-instance-identifier paperclip-db
|
||||
|
||||
# Get the endpoint
|
||||
RDS_ENDPOINT=$(aws rds describe-db-instances \
|
||||
--db-instance-identifier paperclip-db \
|
||||
--query 'DBInstances[0].Endpoint.Address' --output text)
|
||||
|
||||
DATABASE_URL="postgresql://paperclip:${DB_PASSWORD}@${RDS_ENDPOINT}:5432/paperclip"
|
||||
```
|
||||
|
||||
## 5. Create EFS Filesystem
|
||||
|
||||
```bash
|
||||
EFS_ID=$(aws efs create-file-system \
|
||||
--performance-mode generalPurpose \
|
||||
--throughput-mode bursting \
|
||||
--encrypted \
|
||||
--tags Key=Name,Value=paperclip-data \
|
||||
--query 'FileSystemId' --output text)
|
||||
|
||||
# Create mount targets in each subnet
|
||||
for SUBNET in $SUBNET_1 $SUBNET_2; do
|
||||
aws efs create-mount-target \
|
||||
--file-system-id $EFS_ID \
|
||||
--subnet-id $SUBNET \
|
||||
--security-groups $EFS_SG
|
||||
done
|
||||
|
||||
# Wait for mount targets
|
||||
aws efs describe-mount-targets --file-system-id $EFS_ID
|
||||
```
|
||||
|
||||
## 6. Store Secrets
|
||||
|
||||
```bash
|
||||
aws secretsmanager create-secret \
|
||||
--name paperclip/database-url \
|
||||
--secret-string "$DATABASE_URL"
|
||||
|
||||
aws secretsmanager create-secret \
|
||||
--name paperclip/anthropic-api-key \
|
||||
--secret-string "YOUR_ANTHROPIC_KEY"
|
||||
|
||||
aws secretsmanager create-secret \
|
||||
--name paperclip/better-auth-secret \
|
||||
--secret-string "$AUTH_SECRET"
|
||||
|
||||
aws secretsmanager create-secret \
|
||||
--name paperclip/openai-api-key \
|
||||
--secret-string "YOUR_OPENAI_KEY"
|
||||
|
||||
aws secretsmanager create-secret \
|
||||
--name paperclip/github-token \
|
||||
--secret-string "YOUR_GITHUB_PAT"
|
||||
```
|
||||
|
||||
## 7. IAM Roles
|
||||
|
||||
Create the ECS task execution role (pulls images, reads secrets) and the task role (application permissions).
|
||||
|
||||
```bash
|
||||
# Task execution role
|
||||
aws iam create-role \
|
||||
--role-name paperclip-ecs-execution \
|
||||
--assume-role-policy-document '{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}]
|
||||
}'
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name paperclip-ecs-execution \
|
||||
--policy-arn arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy
|
||||
|
||||
# Allow reading secrets
|
||||
aws iam put-role-policy \
|
||||
--role-name paperclip-ecs-execution \
|
||||
--policy-name SecretsAccess \
|
||||
--policy-document '{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Action": ["secretsmanager:GetSecretValue"],
|
||||
"Resource": "arn:aws:secretsmanager:'$AWS_REGION':'$AWS_ACCOUNT_ID':secret:paperclip/*"
|
||||
}]
|
||||
}'
|
||||
|
||||
# Task role (application — add permissions as needed)
|
||||
aws iam create-role \
|
||||
--role-name paperclip-ecs-task \
|
||||
--assume-role-policy-document '{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
## 8. ECS Cluster and Task Definition
|
||||
|
||||
```bash
|
||||
aws ecs create-cluster --cluster-name paperclip
|
||||
|
||||
aws logs create-log-group --log-group-name /ecs/paperclip
|
||||
```
|
||||
|
||||
Register the task definition using the template at `docker/ecs-task-definition.json`. Before registering, replace the placeholder values:
|
||||
|
||||
```bash
|
||||
sed -e "s|<ACCOUNT_ID>|$AWS_ACCOUNT_ID|g" \
|
||||
-e "s|<REGION>|$AWS_REGION|g" \
|
||||
-e "s|<EFS_ID>|$EFS_ID|g" \
|
||||
-e "s|<DOMAIN>|$PAPERCLIP_DOMAIN|g" \
|
||||
docker/ecs-task-definition.json > /tmp/paperclip-task-def.json
|
||||
|
||||
aws ecs register-task-definition \
|
||||
--cli-input-json file:///tmp/paperclip-task-def.json
|
||||
```
|
||||
|
||||
## 9. ALB and TLS Certificate
|
||||
|
||||
Request a certificate (you must validate via DNS):
|
||||
|
||||
```bash
|
||||
CERT_ARN=$(aws acm request-certificate \
|
||||
--domain-name $PAPERCLIP_DOMAIN \
|
||||
--validation-method DNS \
|
||||
--query 'CertificateArn' --output text)
|
||||
|
||||
# Get the CNAME record to add to your DNS
|
||||
aws acm describe-certificate \
|
||||
--certificate-arn $CERT_ARN \
|
||||
--query 'Certificate.DomainValidationOptions[0].ResourceRecord'
|
||||
```
|
||||
|
||||
Add the CNAME to your DNS provider, then wait for validation:
|
||||
|
||||
```bash
|
||||
aws acm wait certificate-validated --certificate-arn $CERT_ARN
|
||||
```
|
||||
|
||||
Create the ALB:
|
||||
|
||||
```bash
|
||||
ALB_ARN=$(aws elbv2 create-load-balancer \
|
||||
--name paperclip-alb \
|
||||
--subnets $SUBNET_1 $SUBNET_2 \
|
||||
--security-groups $ALB_SG \
|
||||
--scheme internet-facing \
|
||||
--type application \
|
||||
--query 'LoadBalancers[0].LoadBalancerArn' --output text)
|
||||
|
||||
ALB_DNS=$(aws elbv2 describe-load-balancers \
|
||||
--load-balancer-arns $ALB_ARN \
|
||||
--query 'LoadBalancers[0].DNSName' --output text)
|
||||
|
||||
# Target group
|
||||
TG_ARN=$(aws elbv2 create-target-group \
|
||||
--name paperclip-tg \
|
||||
--protocol HTTP \
|
||||
--port 3100 \
|
||||
--vpc-id $VPC_ID \
|
||||
--target-type ip \
|
||||
--health-check-path /api/health \
|
||||
--health-check-interval-seconds 30 \
|
||||
--healthy-threshold-count 2 \
|
||||
--unhealthy-threshold-count 3 \
|
||||
--query 'TargetGroups[0].TargetGroupArn' --output text)
|
||||
|
||||
# HTTPS listener
|
||||
LISTENER_ARN=$(aws elbv2 create-listener \
|
||||
--load-balancer-arn $ALB_ARN \
|
||||
--protocol HTTPS \
|
||||
--port 443 \
|
||||
--certificates CertificateArn=$CERT_ARN \
|
||||
--default-actions Type=forward,TargetGroupArn=$TG_ARN \
|
||||
--query 'Listeners[0].ListenerArn' --output text)
|
||||
|
||||
# HTTP listener — redirect all :80 traffic to :443
|
||||
HTTP_LISTENER_ARN=$(aws elbv2 create-listener \
|
||||
--load-balancer-arn $ALB_ARN \
|
||||
--protocol HTTP \
|
||||
--port 80 \
|
||||
--default-actions Type=redirect,RedirectConfig='{Protocol=HTTPS,Port=443,StatusCode=HTTP_301}' \
|
||||
--query 'Listeners[0].ListenerArn' --output text)
|
||||
```
|
||||
|
||||
Point your DNS to the ALB:
|
||||
- Create a CNAME or ALIAS record for `$PAPERCLIP_DOMAIN` -> `$ALB_DNS`
|
||||
|
||||
## 10. Create ECS Service
|
||||
|
||||
```bash
|
||||
aws ecs create-service \
|
||||
--cluster paperclip \
|
||||
--service-name paperclip-server \
|
||||
--task-definition paperclip-server \
|
||||
--desired-count 1 \
|
||||
--launch-type FARGATE \
|
||||
--deployment-configuration '{
|
||||
"deploymentCircuitBreaker": {"enable": true, "rollback": true},
|
||||
"maximumPercent": 200,
|
||||
"minimumHealthyPercent": 100
|
||||
}' \
|
||||
--network-configuration '{
|
||||
"awsvpcConfiguration": {
|
||||
"subnets": ["'$SUBNET_1'", "'$SUBNET_2'"],
|
||||
"securityGroups": ["'$ECS_SG'"],
|
||||
"assignPublicIp": "ENABLED"
|
||||
}
|
||||
}' \
|
||||
--load-balancers '[{
|
||||
"targetGroupArn": "'$TG_ARN'",
|
||||
"containerName": "paperclip-server",
|
||||
"containerPort": 3100
|
||||
}]'
|
||||
```
|
||||
|
||||
> **Note:** `assignPublicIp: ENABLED` is needed if using public subnets without a NAT Gateway. For private subnets, set to `DISABLED` and ensure a NAT Gateway is configured for outbound internet access.
|
||||
|
||||
## 11. Verify Deployment
|
||||
|
||||
```bash
|
||||
# Watch task come up
|
||||
aws ecs describe-services \
|
||||
--cluster paperclip \
|
||||
--services paperclip-server \
|
||||
--query 'services[0].{desired:desiredCount,running:runningCount,status:status}'
|
||||
|
||||
# Check task health
|
||||
aws ecs list-tasks --cluster paperclip --service-name paperclip-server
|
||||
TASK_ARN=$(aws ecs list-tasks --cluster paperclip --service-name paperclip-server --query 'taskArns[0]' --output text)
|
||||
aws ecs describe-tasks --cluster paperclip --tasks $TASK_ARN \
|
||||
--query 'tasks[0].{status:lastStatus,health:healthStatus}'
|
||||
|
||||
# Check logs
|
||||
aws logs tail /ecs/paperclip --since 10m --follow
|
||||
|
||||
# Hit the health endpoint
|
||||
curl -sf https://$PAPERCLIP_DOMAIN/api/health
|
||||
```
|
||||
|
||||
**Healthy indicators:**
|
||||
- ECS task status: `RUNNING`, health: `HEALTHY`
|
||||
- Logs show `plugin job coordinator started` and `plugin-loader: loadAll complete`
|
||||
- `/api/health` returns 200
|
||||
|
||||
## Post-Deploy Security Hardening
|
||||
|
||||
After the first user has signed up (which grants admin role), lock down the instance:
|
||||
|
||||
```bash
|
||||
# Disable public sign-up (prevents unauthorized users from creating accounts)
|
||||
# Add to the task definition environment section, then redeploy:
|
||||
# { "name": "PAPERCLIP_AUTH_DISABLE_SIGN_UP", "value": "true" }
|
||||
|
||||
# Or update via Secrets Manager / task def override, then force new deployment
|
||||
aws ecs update-service \
|
||||
--cluster paperclip \
|
||||
--service paperclip-server \
|
||||
--force-new-deployment
|
||||
```
|
||||
|
||||
Use the invite flow (added in v2026.416.0) to grant access to additional users after sign-up is disabled.
|
||||
|
||||
## Deploying Updates
|
||||
|
||||
Build, push, and force a new deployment:
|
||||
|
||||
```bash
|
||||
# Build and push new image
|
||||
docker build -t paperclip-server .
|
||||
docker tag paperclip-server:latest \
|
||||
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/paperclip-server:latest
|
||||
docker push \
|
||||
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/paperclip-server:latest
|
||||
|
||||
# Roll out
|
||||
aws ecs update-service \
|
||||
--cluster paperclip \
|
||||
--service paperclip-server \
|
||||
--force-new-deployment
|
||||
|
||||
# Watch the deployment
|
||||
aws ecs describe-services \
|
||||
--cluster paperclip \
|
||||
--services paperclip-server \
|
||||
--query 'services[0].deployments[*].{status:status,running:runningCount,desired:desiredCount,rollout:rolloutState}'
|
||||
```
|
||||
|
||||
ECS performs a rolling update: starts a new task, waits for it to pass health checks, then drains the old task.
|
||||
|
||||
## Rollback
|
||||
|
||||
If the new deployment is unhealthy:
|
||||
|
||||
```bash
|
||||
# ECS automatically rolls back if the new task fails health checks
|
||||
# (circuit breaker is enabled in the service configuration above).
|
||||
# To force rollback manually:
|
||||
|
||||
# 1. Find the previous task definition revision
|
||||
aws ecs list-task-definitions \
|
||||
--family-prefix paperclip-server \
|
||||
--sort DESC \
|
||||
--query 'taskDefinitionArns[0:3]'
|
||||
|
||||
# 2. Update service to the previous revision
|
||||
aws ecs update-service \
|
||||
--cluster paperclip \
|
||||
--service paperclip-server \
|
||||
--task-definition paperclip-server:<PREVIOUS_REVISION>
|
||||
```
|
||||
|
||||
## Scaling to Zero (Cost Savings)
|
||||
|
||||
Scale down when not in use:
|
||||
|
||||
```bash
|
||||
# Stop
|
||||
aws ecs update-service \
|
||||
--cluster paperclip \
|
||||
--service paperclip-server \
|
||||
--desired-count 0
|
||||
|
||||
# Start
|
||||
aws ecs update-service \
|
||||
--cluster paperclip \
|
||||
--service paperclip-server \
|
||||
--desired-count 1
|
||||
```
|
||||
|
||||
RDS can also be stopped (auto-restarts after 7 days):
|
||||
|
||||
```bash
|
||||
aws rds stop-db-instance --db-instance-identifier paperclip-db
|
||||
aws rds start-db-instance --db-instance-identifier paperclip-db
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
Remove all resources in reverse order:
|
||||
|
||||
```bash
|
||||
# 1. ECS service and cluster
|
||||
aws ecs update-service --cluster paperclip --service paperclip-server --desired-count 0
|
||||
aws ecs delete-service --cluster paperclip --service paperclip-server --force
|
||||
aws ecs delete-cluster --cluster paperclip
|
||||
|
||||
# 2. ALB and ACM cert
|
||||
aws elbv2 delete-listener --listener-arn $HTTP_LISTENER_ARN
|
||||
aws elbv2 delete-listener --listener-arn $LISTENER_ARN
|
||||
aws elbv2 delete-target-group --target-group-arn $TG_ARN
|
||||
aws elbv2 delete-load-balancer --load-balancer-arn $ALB_ARN
|
||||
aws acm delete-certificate --certificate-arn $CERT_ARN
|
||||
|
||||
# 3. RDS (creates final snapshot)
|
||||
aws rds delete-db-instance \
|
||||
--db-instance-identifier paperclip-db \
|
||||
--final-db-snapshot-identifier paperclip-db-final
|
||||
aws rds wait db-instance-deleted --db-instance-identifier paperclip-db
|
||||
aws rds delete-db-subnet-group --db-subnet-group-name paperclip-db-subnet
|
||||
|
||||
# 4. EFS (mount targets must be deleted first)
|
||||
for MT in $(aws efs describe-mount-targets --file-system-id $EFS_ID --query 'MountTargets[*].MountTargetId' --output text); do
|
||||
aws efs delete-mount-target --mount-target-id $MT
|
||||
done
|
||||
# Mount-target deletion is async; poll until none remain before deleting
|
||||
# the filesystem, otherwise delete-file-system fails with FileSystemInUse.
|
||||
echo "Waiting for mount targets to delete..."
|
||||
while aws efs describe-mount-targets \
|
||||
--file-system-id $EFS_ID \
|
||||
--query 'MountTargets[0].MountTargetId' --output text 2>/dev/null | grep -q 'fsmt-'; do
|
||||
sleep 5
|
||||
done
|
||||
aws efs delete-file-system --file-system-id $EFS_ID
|
||||
|
||||
# 5. Secrets
|
||||
for s in database-url anthropic-api-key better-auth-secret openai-api-key github-token; do
|
||||
aws secretsmanager delete-secret --secret-id paperclip/$s --force-delete-without-recovery
|
||||
done
|
||||
|
||||
# 6. Security groups (after all dependents are gone)
|
||||
for sg in $EFS_SG $RDS_SG $ECS_SG $ALB_SG; do
|
||||
aws ec2 delete-security-group --group-id $sg
|
||||
done
|
||||
|
||||
# 7. ECR
|
||||
aws ecr delete-repository --repository-name paperclip-server --force
|
||||
|
||||
# 8. IAM roles
|
||||
aws iam delete-role-policy --role-name paperclip-ecs-execution --policy-name SecretsAccess
|
||||
aws iam detach-role-policy --role-name paperclip-ecs-execution \
|
||||
--policy-arn arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy
|
||||
aws iam delete-role --role-name paperclip-ecs-execution
|
||||
aws iam delete-role --role-name paperclip-ecs-task
|
||||
|
||||
# 9. Log group
|
||||
aws logs delete-log-group --log-group-name /ecs/paperclip
|
||||
```
|
||||
|
||||
## Cost Reference
|
||||
|
||||
| Service | Config | Monthly |
|
||||
|---------|--------|---------|
|
||||
| ECS Fargate | 2 vCPU, 4 GB, 24/7 | ~$70 |
|
||||
| RDS Postgres | db.t4g.micro, 20 GB | ~$15 |
|
||||
| ALB | 1 LCU average | ~$22 |
|
||||
| NAT Gateway | 1 AZ (if using private subnets) | ~$35 |
|
||||
| EFS | 1 GB Standard | ~$0.30 |
|
||||
| Secrets Manager | 5 secrets | ~$2 |
|
||||
| CloudWatch Logs | ~1 GB/mo | ~$0.50 |
|
||||
| ECR | ~1 GB | ~$0.10 |
|
||||
| **Total (public subnets, no NAT)** | | **~$110/mo** |
|
||||
| **Total (private subnets + NAT)** | | **~$145/mo** |
|
||||
|
||||
Use Fargate Spot and scheduled scaling to 0 during off-hours to reduce to ~$60-85/mo.
|
||||
@@ -40,7 +40,7 @@ Paperclip supports three deployment configurations, from zero-friction local to
|
||||
|
||||
- **Just trying Paperclip?** Use `local_trusted` (the default)
|
||||
- **Sharing with a team on private network?** Use `authenticated` + `private`
|
||||
- **Deploying to the cloud?** Use `authenticated` + `public`
|
||||
- **Deploying to the cloud?** Use `authenticated` + `public` — see [AWS ECS Fargate guide](aws-ecs.md)
|
||||
|
||||
Set the mode during onboarding:
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ You do **not** need to tell the CEO to engage specific agents. After you approve
|
||||
- **Breaks goals into concrete tasks** with clear descriptions, priorities, and acceptance criteria
|
||||
- **Assigns tasks to the right agent** based on role and capabilities (e.g., engineering tasks go to the CTO or engineers, marketing tasks go to the CMO)
|
||||
- **Creates subtasks** when work needs to be decomposed further
|
||||
- **Hires new agents** when the team lacks capacity for a goal (subject to your approval)
|
||||
- **Hires new agents** when the team lacks capacity for a goal, with hire approvals available when enabled in company settings
|
||||
- **Monitors progress** on each heartbeat, checking task status and unblocking reports
|
||||
- **Escalates to you** when it encounters something it can't resolve — budget issues, blocked approvals, or strategic ambiguity
|
||||
|
||||
|
||||
BIN
docs/pr-screenshots/pr-4616/sidebar-agent-actions.png
Normal file
BIN
docs/pr-screenshots/pr-4616/sidebar-agent-actions.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 76 KiB |
BIN
docs/pr-screenshots/pr-4616/sidebar-agent-row.png
Normal file
BIN
docs/pr-screenshots/pr-4616/sidebar-agent-row.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 74 KiB |
@@ -57,9 +57,9 @@ The CEO is the primary delegator. When you set company goals, the CEO:
|
||||
1. Creates a strategy and submits it for your approval
|
||||
2. Breaks approved goals into tasks
|
||||
3. Assigns tasks to agents based on their role and capabilities
|
||||
4. Hires new agents when needed (subject to your approval)
|
||||
4. Hires new agents when needed, with hire approvals available when you enable them
|
||||
|
||||
You don't need to manually assign every task — set the goals and let the CEO organize the work. You approve key decisions (strategy, hiring) and monitor progress. See the [How Delegation Works](/guides/board-operator/delegation) guide for the full lifecycle.
|
||||
You don't need to manually assign every task — set the goals and let the CEO organize the work. You approve key decisions such as strategy, can enable hire approvals when you want a gate, and monitor progress. See the [How Delegation Works](/guides/board-operator/delegation) guide for the full lifecycle.
|
||||
|
||||
## Heartbeats
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"typecheck": "pnpm run preflight:workspace-links && pnpm -r typecheck",
|
||||
"test": "pnpm run test:run",
|
||||
"test:watch": "pnpm run preflight:workspace-links && vitest",
|
||||
"test:run": "pnpm run preflight:workspace-links && vitest run",
|
||||
"test:run": "pnpm run preflight:workspace-links && node scripts/run-vitest-stable.mjs",
|
||||
"db:generate": "pnpm --filter @paperclipai/db generate",
|
||||
"db:migrate": "pnpm --filter @paperclipai/db migrate",
|
||||
"issue-references:backfill": "pnpm run preflight:workspace-links && tsx scripts/backfill-issue-reference-mentions.ts",
|
||||
@@ -35,13 +35,16 @@
|
||||
"smoke:openclaw-join": "./scripts/smoke/openclaw-join.sh",
|
||||
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh",
|
||||
"smoke:openclaw-sse-standalone": "./scripts/smoke/openclaw-sse-standalone.sh",
|
||||
"smoke:terminal-bench-loop-skill": "node scripts/smoke/terminal-bench-loop-skill-smoke.mjs",
|
||||
"test:release-registry": "node --test scripts/verify-release-registry-state.test.mjs",
|
||||
"test:e2e": "npx playwright test --config tests/e2e/playwright.config.ts",
|
||||
"test:e2e:headed": "npx playwright test --config tests/e2e/playwright.config.ts --headed",
|
||||
"test:e2e:multiuser-authenticated": "npx playwright test --config tests/e2e/playwright-multiuser-authenticated.config.ts",
|
||||
"evals:smoke": "cd evals/promptfoo && npx promptfoo@0.103.3 eval",
|
||||
"test:release-smoke": "npx playwright test --config tests/release-smoke/playwright.config.ts",
|
||||
"test:release-smoke:headed": "npx playwright test --config tests/release-smoke/playwright.config.ts --headed",
|
||||
"metrics:paperclip-commits": "tsx scripts/paperclip-commit-metrics.ts"
|
||||
"metrics:paperclip-commits": "tsx scripts/paperclip-commit-metrics.ts",
|
||||
"perf:issue-chat-long-thread": "node scripts/measure-issue-chat-long-thread.mjs"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.58.2",
|
||||
|
||||
128
packages/adapter-utils/src/command-managed-runtime.test.ts
Normal file
128
packages/adapter-utils/src/command-managed-runtime.test.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
import { mkdir, mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { execFile as execFileCallback } from "node:child_process";
|
||||
import { promisify } from "node:util";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { prepareCommandManagedRuntime } from "./command-managed-runtime.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
describe("command managed runtime", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
it("keeps the runtime overlay out of sandbox workspace sync by default", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-command-runtime-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
await mkdir(path.join(localWorkspaceDir, ".paperclip-runtime"), { recursive: true });
|
||||
await mkdir(remoteWorkspaceDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "local workspace\n", "utf8");
|
||||
await writeFile(path.join(localWorkspaceDir, ".paperclip-runtime", "state.json"), "{\"keep\":true}\n", "utf8");
|
||||
|
||||
const calls: Array<{
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
}> = [];
|
||||
const runner = {
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
}): Promise<RunProcessResult> => {
|
||||
calls.push({ ...input });
|
||||
const startedAt = new Date().toISOString();
|
||||
const env = {
|
||||
...process.env,
|
||||
...input.env,
|
||||
};
|
||||
const command = input.command === "sh" ? "/bin/sh" : input.command;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (input.stdin != null && input.command === "sh" && args[0] === "-lc" && typeof args[1] === "string") {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
args[1] = `printf '%s' \"$PAPERCLIP_TEST_STDIN\" | (${args[1]})`;
|
||||
}
|
||||
try {
|
||||
const result = await execFile(command, args, {
|
||||
cwd: input.cwd,
|
||||
env,
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
timeout: input.timeoutMs,
|
||||
});
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const err = error as NodeJS.ErrnoException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
code?: string | number | null;
|
||||
signal?: NodeJS.Signals | null;
|
||||
killed?: boolean;
|
||||
};
|
||||
return {
|
||||
exitCode: typeof err.code === "number" ? err.code : null,
|
||||
signal: err.signal ?? null,
|
||||
timedOut: Boolean(err.killed && input.timeoutMs),
|
||||
stdout: err.stdout ?? "",
|
||||
stderr: err.stderr ?? "",
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "claude",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
});
|
||||
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, "README.md"), "utf8")).resolves.toBe("local workspace\n");
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, ".paperclip-runtime", "state.json"), "utf8")).rejects
|
||||
.toMatchObject({ code: "ENOENT" });
|
||||
expect(calls.every((call) => call.stdin == null)).toBe(true);
|
||||
|
||||
await mkdir(path.join(remoteWorkspaceDir, ".paperclip-runtime"), { recursive: true });
|
||||
await writeFile(path.join(remoteWorkspaceDir, "README.md"), "remote workspace\n", "utf8");
|
||||
await writeFile(path.join(remoteWorkspaceDir, ".paperclip-runtime", "remote-state.json"), "{\"remote\":true}\n", "utf8");
|
||||
await prepared.restoreWorkspace();
|
||||
|
||||
await expect(readFile(path.join(localWorkspaceDir, "README.md"), "utf8")).resolves.toBe("remote workspace\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".paperclip-runtime", "state.json"), "utf8")).resolves
|
||||
.toBe("{\"keep\":true}\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".paperclip-runtime", "remote-state.json"), "utf8")).rejects
|
||||
.toMatchObject({ code: "ENOENT" });
|
||||
expect(calls.every((call) => call.stdin == null)).toBe(true);
|
||||
});
|
||||
});
|
||||
182
packages/adapter-utils/src/command-managed-runtime.ts
Normal file
182
packages/adapter-utils/src/command-managed-runtime.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import path from "node:path";
|
||||
import {
|
||||
prepareSandboxManagedRuntime,
|
||||
type PreparedSandboxManagedRuntime,
|
||||
type SandboxManagedRuntimeAsset,
|
||||
type SandboxManagedRuntimeClient,
|
||||
type SandboxRemoteExecutionSpec,
|
||||
} from "./sandbox-managed-runtime.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
export interface CommandManagedRuntimeRunner {
|
||||
execute(input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}): Promise<RunProcessResult>;
|
||||
}
|
||||
|
||||
export interface CommandManagedRuntimeSpec {
|
||||
providerKey?: string | null;
|
||||
leaseId?: string | null;
|
||||
remoteCwd: string;
|
||||
timeoutMs?: number | null;
|
||||
paperclipApiUrl?: string | null;
|
||||
}
|
||||
|
||||
export type CommandManagedRuntimeAsset = SandboxManagedRuntimeAsset;
|
||||
|
||||
function shellQuote(value: string) {
|
||||
return `'${value.replace(/'/g, `'"'"'`)}'`;
|
||||
}
|
||||
|
||||
function mergeRuntimeExcludes(entries: string[] | undefined): string[] {
|
||||
return [...new Set([".paperclip-runtime", ...(entries ?? [])])];
|
||||
}
|
||||
|
||||
const REMOTE_WRITE_BASE64_CHUNK_SIZE = 32 * 1024;
|
||||
|
||||
function toBuffer(bytes: Buffer | Uint8Array | ArrayBuffer): Buffer {
|
||||
if (Buffer.isBuffer(bytes)) return bytes;
|
||||
if (bytes instanceof ArrayBuffer) return Buffer.from(bytes);
|
||||
return Buffer.from(bytes.buffer, bytes.byteOffset, bytes.byteLength);
|
||||
}
|
||||
|
||||
function requireSuccessfulResult(result: RunProcessResult, action: string): void {
|
||||
if (result.exitCode === 0 && !result.timedOut) return;
|
||||
const stderr = result.stderr.trim();
|
||||
const detail = stderr.length > 0 ? `: ${stderr}` : "";
|
||||
throw new Error(`${action} failed with exit code ${result.exitCode ?? "null"}${detail}`);
|
||||
}
|
||||
|
||||
export function createCommandManagedRuntimeClient(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
timeoutMs: number;
|
||||
}): SandboxManagedRuntimeClient {
|
||||
const runShell = async (script: string, opts: { stdin?: string; timeoutMs?: number } = {}) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", script],
|
||||
cwd: input.remoteCwd,
|
||||
stdin: opts.stdin,
|
||||
timeoutMs: opts.timeoutMs ?? input.timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, script);
|
||||
return result;
|
||||
};
|
||||
|
||||
return {
|
||||
makeDir: async (remotePath) => {
|
||||
await runShell(`mkdir -p ${shellQuote(remotePath)}`);
|
||||
},
|
||||
writeFile: async (remotePath, bytes) => {
|
||||
const body = toBuffer(bytes).toString("base64");
|
||||
const remoteDir = path.posix.dirname(remotePath);
|
||||
const remoteTempPath = `${remotePath}.paperclip-upload.b64`;
|
||||
|
||||
await runShell(
|
||||
`mkdir -p ${shellQuote(remoteDir)} && rm -f ${shellQuote(remoteTempPath)} && : > ${shellQuote(remoteTempPath)}`,
|
||||
);
|
||||
for (let offset = 0; offset < body.length; offset += REMOTE_WRITE_BASE64_CHUNK_SIZE) {
|
||||
const chunk = body.slice(offset, offset + REMOTE_WRITE_BASE64_CHUNK_SIZE);
|
||||
await runShell(`printf '%s' ${shellQuote(chunk)} >> ${shellQuote(remoteTempPath)}`);
|
||||
}
|
||||
await runShell(
|
||||
`base64 -d < ${shellQuote(remoteTempPath)} > ${shellQuote(remotePath)} && rm -f ${shellQuote(remoteTempPath)}`,
|
||||
);
|
||||
},
|
||||
readFile: async (remotePath) => {
|
||||
const result = await runShell(`base64 < ${shellQuote(remotePath)}`);
|
||||
return Buffer.from(result.stdout.replace(/\s+/g, ""), "base64");
|
||||
},
|
||||
listFiles: async (remotePath) => {
|
||||
const result = await runShell(
|
||||
`if [ -d ${shellQuote(remotePath)} ]; then ` +
|
||||
`for entry in ${shellQuote(remotePath)}/*; do ` +
|
||||
`[ -f "$entry" ] || continue; ` +
|
||||
`basename "$entry"; ` +
|
||||
`done; ` +
|
||||
`fi`,
|
||||
);
|
||||
return result.stdout
|
||||
.split(/\r?\n/)
|
||||
.map((entry) => entry.trim())
|
||||
.filter((entry) => entry.length > 0)
|
||||
.sort((left, right) => left.localeCompare(right));
|
||||
},
|
||||
remove: async (remotePath) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", `rm -rf ${shellQuote(remotePath)}`],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs: input.timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, `remove ${remotePath}`);
|
||||
},
|
||||
run: async (command, options) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", command],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs: options.timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, command);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function prepareCommandManagedRuntime(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
spec: CommandManagedRuntimeSpec;
|
||||
adapterKey: string;
|
||||
workspaceLocalDir: string;
|
||||
workspaceRemoteDir?: string;
|
||||
workspaceExclude?: string[];
|
||||
preserveAbsentOnRestore?: string[];
|
||||
assets?: CommandManagedRuntimeAsset[];
|
||||
installCommand?: string | null;
|
||||
}): Promise<PreparedSandboxManagedRuntime> {
|
||||
const timeoutMs = input.spec.timeoutMs && input.spec.timeoutMs > 0 ? input.spec.timeoutMs : 300_000;
|
||||
const workspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
const runtimeSpec: SandboxRemoteExecutionSpec = {
|
||||
transport: "sandbox",
|
||||
provider: input.spec.providerKey ?? "sandbox",
|
||||
sandboxId: input.spec.leaseId ?? "managed",
|
||||
remoteCwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
apiKey: null,
|
||||
paperclipApiUrl: input.spec.paperclipApiUrl ?? null,
|
||||
};
|
||||
const client = createCommandManagedRuntimeClient({
|
||||
runner: input.runner,
|
||||
remoteCwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
});
|
||||
|
||||
if (input.installCommand?.trim()) {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", input.installCommand.trim()],
|
||||
cwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, input.installCommand.trim());
|
||||
}
|
||||
|
||||
return await prepareSandboxManagedRuntime({
|
||||
spec: runtimeSpec,
|
||||
client,
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
workspaceRemoteDir,
|
||||
workspaceExclude: mergeRuntimeExcludes(input.workspaceExclude),
|
||||
preserveAbsentOnRestore: input.preserveAbsentOnRestore,
|
||||
assets: input.assets,
|
||||
});
|
||||
}
|
||||
292
packages/adapter-utils/src/execution-target-sandbox.test.ts
Normal file
292
packages/adapter-utils/src/execution-target-sandbox.test.ts
Normal file
@@ -0,0 +1,292 @@
|
||||
import { createServer } from "node:http";
|
||||
import { mkdir, mkdtemp, rm } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetToRemoteSpec,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
type AdapterSandboxExecutionTarget,
|
||||
} from "./execution-target.js";
|
||||
import { runChildProcess } from "./server-utils.js";
|
||||
|
||||
describe("sandbox adapter execution targets", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
function createLocalSandboxRunner() {
|
||||
let counter = 0;
|
||||
return {
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}) => {
|
||||
counter += 1;
|
||||
return runChildProcess(`sandbox-run-${counter}`, input.command, input.args ?? [], {
|
||||
cwd: input.cwd ?? process.cwd(),
|
||||
env: input.env ?? {},
|
||||
stdin: input.stdin,
|
||||
timeoutSec: Math.max(1, Math.ceil((input.timeoutMs ?? 30_000) / 1000)),
|
||||
graceSec: 5,
|
||||
onLog: input.onLog ?? (async () => {}),
|
||||
onSpawn: input.onSpawn
|
||||
? async (meta) => input.onSpawn?.({ pid: meta.pid, startedAt: meta.startedAt })
|
||||
: undefined,
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
it("executes through the provider-neutral runner without a remote spec", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "ok\n",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
};
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "acme-sandbox",
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd: "/workspace",
|
||||
timeoutMs: 30_000,
|
||||
runner,
|
||||
};
|
||||
|
||||
expect(adapterExecutionTargetToRemoteSpec(target)).toBeNull();
|
||||
|
||||
const result = await runAdapterExecutionTargetProcess("run-1", target, "agent-cli", ["--json"], {
|
||||
cwd: "/local/workspace",
|
||||
env: { TOKEN: "token" },
|
||||
stdin: "prompt",
|
||||
timeoutSec: 5,
|
||||
graceSec: 1,
|
||||
onLog: async () => {},
|
||||
});
|
||||
|
||||
expect(result.stdout).toBe("ok\n");
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "agent-cli",
|
||||
args: ["--json"],
|
||||
cwd: "/workspace",
|
||||
env: { TOKEN: "token" },
|
||||
stdin: "prompt",
|
||||
timeoutMs: 5000,
|
||||
}));
|
||||
expect(adapterExecutionTargetSessionIdentity(target)).toEqual({
|
||||
transport: "sandbox",
|
||||
providerKey: "acme-sandbox",
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd: "/workspace",
|
||||
paperclipTransport: "bridge",
|
||||
});
|
||||
});
|
||||
|
||||
it("runs shell commands through the same runner", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "/home/sandbox",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
};
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
remoteCwd: "/workspace",
|
||||
runner,
|
||||
};
|
||||
|
||||
await runAdapterExecutionTargetShellCommand("run-2", target, 'printf %s "$HOME"', {
|
||||
cwd: "/local/workspace",
|
||||
env: {},
|
||||
timeoutSec: 7,
|
||||
});
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "sh",
|
||||
args: ["-lc", 'printf %s "$HOME"'],
|
||||
cwd: "/workspace",
|
||||
timeoutMs: 7000,
|
||||
}));
|
||||
});
|
||||
|
||||
it("starts a localhost Paperclip bridge for sandbox targets in bridge mode", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-execution-target-bridge-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const remoteCwd = path.join(rootDir, "workspace");
|
||||
const runtimeRootDir = path.join(remoteCwd, ".paperclip-runtime", "codex");
|
||||
await mkdir(runtimeRootDir, { recursive: true });
|
||||
|
||||
const requests: Array<{ method: string; url: string; auth: string | null; runId: string | null }> = [];
|
||||
const apiServer = createServer((req, res) => {
|
||||
requests.push({
|
||||
method: req.method ?? "GET",
|
||||
url: req.url ?? "/",
|
||||
auth: req.headers.authorization ?? null,
|
||||
runId: typeof req.headers["x-paperclip-run-id"] === "string" ? req.headers["x-paperclip-run-id"] : null,
|
||||
});
|
||||
res.writeHead(200, { "content-type": "application/json" });
|
||||
res.end(JSON.stringify({ ok: true }));
|
||||
});
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
apiServer.once("error", reject);
|
||||
apiServer.listen(0, "127.0.0.1", () => resolve());
|
||||
});
|
||||
const address = apiServer.address();
|
||||
if (!address || typeof address === "string") {
|
||||
throw new Error("Expected the bridge test API server to listen on a TCP port.");
|
||||
}
|
||||
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "e2b",
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd,
|
||||
paperclipTransport: "bridge",
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
|
||||
const bridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId: "run-bridge",
|
||||
target,
|
||||
runtimeRootDir,
|
||||
adapterKey: "codex",
|
||||
hostApiToken: "real-run-jwt",
|
||||
hostApiUrl: `http://127.0.0.1:${address.port}`,
|
||||
});
|
||||
try {
|
||||
expect(bridge).not.toBeNull();
|
||||
expect(bridge?.env.PAPERCLIP_API_URL).toMatch(/^http:\/\/127\.0\.0\.1:\d+$/);
|
||||
expect(bridge?.env.PAPERCLIP_API_KEY).not.toBe("real-run-jwt");
|
||||
expect(bridge?.env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
|
||||
const response = await fetch(`${bridge!.env.PAPERCLIP_API_URL}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridge!.env.PAPERCLIP_API_KEY}`,
|
||||
accept: "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(await response.json()).toEqual({ ok: true });
|
||||
expect(requests).toEqual([{
|
||||
method: "GET",
|
||||
url: "/api/agents/me",
|
||||
auth: "Bearer real-run-jwt",
|
||||
runId: "run-bridge",
|
||||
}]);
|
||||
} finally {
|
||||
await bridge?.stop();
|
||||
await new Promise<void>((resolve) => apiServer.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
|
||||
it("fails oversized host responses with a 502 before returning them to the sandbox client", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-execution-target-bridge-limit-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const remoteCwd = path.join(rootDir, "workspace");
|
||||
const runtimeRootDir = path.join(remoteCwd, ".paperclip-runtime", "codex");
|
||||
await mkdir(runtimeRootDir, { recursive: true });
|
||||
|
||||
const requests: Array<{ method: string; url: string; auth: string | null; runId: string | null }> = [];
|
||||
const largeBody = "x".repeat(64);
|
||||
const apiServer = createServer((req, res) => {
|
||||
requests.push({
|
||||
method: req.method ?? "GET",
|
||||
url: req.url ?? "/",
|
||||
auth: req.headers.authorization ?? null,
|
||||
runId: typeof req.headers["x-paperclip-run-id"] === "string" ? req.headers["x-paperclip-run-id"] : null,
|
||||
});
|
||||
res.writeHead(200, {
|
||||
"content-type": "application/json",
|
||||
"content-length": String(Buffer.byteLength(largeBody, "utf8")),
|
||||
});
|
||||
res.end(largeBody);
|
||||
});
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
apiServer.once("error", reject);
|
||||
apiServer.listen(0, "127.0.0.1", () => resolve());
|
||||
});
|
||||
const address = apiServer.address();
|
||||
if (!address || typeof address === "string") {
|
||||
throw new Error("Expected the bridge test API server to listen on a TCP port.");
|
||||
}
|
||||
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "e2b",
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd,
|
||||
paperclipTransport: "bridge",
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
|
||||
const bridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId: "run-bridge-limit",
|
||||
target,
|
||||
runtimeRootDir,
|
||||
adapterKey: "codex",
|
||||
hostApiToken: "real-run-jwt",
|
||||
hostApiUrl: `http://127.0.0.1:${address.port}`,
|
||||
maxBodyBytes: 32,
|
||||
});
|
||||
try {
|
||||
const response = await fetch(`${bridge!.env.PAPERCLIP_API_URL}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridge!.env.PAPERCLIP_API_KEY}`,
|
||||
accept: "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
expect(response.status).toBe(502);
|
||||
await expect(response.json()).resolves.toEqual({
|
||||
error: "Bridge response body exceeded the configured size limit of 32 bytes.",
|
||||
});
|
||||
expect(requests).toEqual([{
|
||||
method: "GET",
|
||||
url: "/api/agents/me",
|
||||
auth: "Bearer real-run-jwt",
|
||||
runId: "run-bridge-limit",
|
||||
}]);
|
||||
} finally {
|
||||
await bridge?.stop();
|
||||
await new Promise<void>((resolve) => apiServer.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -2,6 +2,7 @@ import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import * as ssh from "./ssh.js";
|
||||
import {
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
} from "./execution-target.js";
|
||||
|
||||
@@ -159,3 +160,49 @@ describe("runAdapterExecutionTargetShellCommand", () => {
|
||||
})).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveAdapterExecutionTargetCwd", () => {
|
||||
const sshTarget = {
|
||||
kind: "remote" as const,
|
||||
transport: "ssh" as const,
|
||||
remoteCwd: "/srv/paperclip/workspace",
|
||||
spec: {
|
||||
host: "ssh.example.test",
|
||||
port: 22,
|
||||
username: "ssh-user",
|
||||
remoteCwd: "/srv/paperclip/workspace",
|
||||
remoteWorkspacePath: "/srv/paperclip/workspace",
|
||||
privateKey: null,
|
||||
knownHosts: null,
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
};
|
||||
|
||||
it("falls back to the remote cwd when no adapter cwd is configured", () => {
|
||||
expect(resolveAdapterExecutionTargetCwd(sshTarget, "", "/Users/host/repo/server")).toBe(
|
||||
"/srv/paperclip/workspace",
|
||||
);
|
||||
expect(resolveAdapterExecutionTargetCwd(sshTarget, " ", "/Users/host/repo/server")).toBe(
|
||||
"/srv/paperclip/workspace",
|
||||
);
|
||||
expect(resolveAdapterExecutionTargetCwd(sshTarget, null, "/Users/host/repo/server")).toBe(
|
||||
"/srv/paperclip/workspace",
|
||||
);
|
||||
});
|
||||
|
||||
it("preserves an explicit adapter cwd when one is configured", () => {
|
||||
expect(
|
||||
resolveAdapterExecutionTargetCwd(
|
||||
sshTarget,
|
||||
"/srv/paperclip/custom-agent-dir",
|
||||
"/Users/host/repo/server",
|
||||
),
|
||||
).toBe("/srv/paperclip/custom-agent-dir");
|
||||
});
|
||||
|
||||
it("keeps the local fallback cwd for local targets", () => {
|
||||
expect(resolveAdapterExecutionTargetCwd(null, "", "/Users/host/repo/server")).toBe(
|
||||
"/Users/host/repo/server",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,11 +1,23 @@
|
||||
import path from "node:path";
|
||||
import type { SshRemoteExecutionSpec } from "./ssh.js";
|
||||
import {
|
||||
prepareCommandManagedRuntime,
|
||||
type CommandManagedRuntimeRunner,
|
||||
} from "./command-managed-runtime.js";
|
||||
import {
|
||||
buildRemoteExecutionSessionIdentity,
|
||||
prepareRemoteManagedRuntime,
|
||||
remoteExecutionSessionMatches,
|
||||
type RemoteManagedRuntimeAsset,
|
||||
} from "./remote-managed-runtime.js";
|
||||
import {
|
||||
createCommandManagedSandboxCallbackBridgeQueueClient,
|
||||
createSandboxCallbackBridgeAsset,
|
||||
createSandboxCallbackBridgeToken,
|
||||
DEFAULT_SANDBOX_CALLBACK_BRIDGE_MAX_BODY_BYTES,
|
||||
startSandboxCallbackBridgeServer,
|
||||
startSandboxCallbackBridgeWorker,
|
||||
} from "./sandbox-callback-bridge.js";
|
||||
import { parseSshRemoteExecutionSpec, runSshCommand, shellQuote } from "./ssh.js";
|
||||
import {
|
||||
ensureCommandResolvable,
|
||||
@@ -31,9 +43,23 @@ export interface AdapterSshExecutionTarget {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
}
|
||||
|
||||
export interface AdapterSandboxExecutionTarget {
|
||||
kind: "remote";
|
||||
transport: "sandbox";
|
||||
providerKey?: string | null;
|
||||
environmentId?: string | null;
|
||||
leaseId?: string | null;
|
||||
remoteCwd: string;
|
||||
paperclipApiUrl?: string | null;
|
||||
paperclipTransport?: "direct" | "bridge";
|
||||
timeoutMs?: number | null;
|
||||
runner?: CommandManagedRuntimeRunner;
|
||||
}
|
||||
|
||||
export type AdapterExecutionTarget =
|
||||
| AdapterLocalExecutionTarget
|
||||
| AdapterSshExecutionTarget;
|
||||
| AdapterSshExecutionTarget
|
||||
| AdapterSandboxExecutionTarget;
|
||||
|
||||
export type AdapterRemoteExecutionSpec = SshRemoteExecutionSpec;
|
||||
|
||||
@@ -65,6 +91,11 @@ export interface AdapterExecutionTargetShellOptions {
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
}
|
||||
|
||||
export interface AdapterExecutionTargetPaperclipBridgeHandle {
|
||||
env: Record<string, string>;
|
||||
stop(): Promise<void>;
|
||||
}
|
||||
|
||||
function parseObject(value: unknown): Record<string, unknown> {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
@@ -79,12 +110,38 @@ function readStringMeta(parsed: Record<string, unknown>, key: string): string |
|
||||
return readString(parsed[key]);
|
||||
}
|
||||
|
||||
function resolveHostForUrl(rawHost: string): string {
|
||||
const host = rawHost.trim();
|
||||
if (!host || host === "0.0.0.0" || host === "::") return "localhost";
|
||||
if (host.includes(":") && !host.startsWith("[") && !host.endsWith("]")) return `[${host}]`;
|
||||
return host;
|
||||
}
|
||||
|
||||
function resolveDefaultPaperclipApiUrl(): string {
|
||||
const runtimeHost = resolveHostForUrl(
|
||||
process.env.PAPERCLIP_LISTEN_HOST ?? process.env.HOST ?? "localhost",
|
||||
);
|
||||
// 3100 matches the default Paperclip dev server port when the runtime does not provide one.
|
||||
const runtimePort = process.env.PAPERCLIP_LISTEN_PORT ?? process.env.PORT ?? "3100";
|
||||
return `http://${runtimeHost}:${runtimePort}`;
|
||||
}
|
||||
|
||||
function resolveSandboxPaperclipTransport(
|
||||
target: Pick<AdapterSandboxExecutionTarget, "paperclipTransport" | "paperclipApiUrl">,
|
||||
): "direct" | "bridge" {
|
||||
if (target.paperclipTransport === "direct" || target.paperclipTransport === "bridge") {
|
||||
return target.paperclipTransport;
|
||||
}
|
||||
return target.paperclipApiUrl ? "direct" : "bridge";
|
||||
}
|
||||
|
||||
function isAdapterExecutionTargetInstance(value: unknown): value is AdapterExecutionTarget {
|
||||
const parsed = parseObject(value);
|
||||
if (parsed.kind === "local") return true;
|
||||
if (parsed.kind !== "remote") return false;
|
||||
if (parsed.transport === "ssh") return parseSshRemoteExecutionSpec(parseObject(parsed.spec)) !== null;
|
||||
return false;
|
||||
if (parsed.transport !== "sandbox") return false;
|
||||
return readStringMeta(parsed, "remoteCwd") !== null;
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetToRemoteSpec(
|
||||
@@ -102,10 +159,7 @@ export function adapterExecutionTargetIsRemote(
|
||||
export function adapterExecutionTargetUsesManagedHome(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): boolean {
|
||||
// SSH execution targets sync the runtime assets they need into the remote cwd today,
|
||||
// so neither local nor remote targets provision a separate managed adapter home.
|
||||
void target;
|
||||
return false;
|
||||
return target?.kind === "remote" && target.transport === "sandbox";
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetRemoteCwd(
|
||||
@@ -115,18 +169,49 @@ export function adapterExecutionTargetRemoteCwd(
|
||||
return target?.kind === "remote" ? target.remoteCwd : localCwd;
|
||||
}
|
||||
|
||||
export function resolveAdapterExecutionTargetCwd(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
configuredCwd: string | null | undefined,
|
||||
localFallbackCwd: string,
|
||||
): string {
|
||||
if (typeof configuredCwd === "string" && configuredCwd.trim().length > 0) {
|
||||
return configuredCwd;
|
||||
}
|
||||
return adapterExecutionTargetRemoteCwd(target, localFallbackCwd);
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetPaperclipApiUrl(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): string | null {
|
||||
if (target?.kind !== "remote") return null;
|
||||
return target.paperclipApiUrl ?? target.spec.paperclipApiUrl ?? null;
|
||||
if (target.transport === "ssh") return target.paperclipApiUrl ?? target.spec.paperclipApiUrl ?? null;
|
||||
if (resolveSandboxPaperclipTransport(target) === "bridge") return null;
|
||||
return target.paperclipApiUrl ?? null;
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetUsesPaperclipBridge(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): boolean {
|
||||
return target?.kind === "remote" &&
|
||||
target.transport === "sandbox" &&
|
||||
resolveSandboxPaperclipTransport(target) === "bridge";
|
||||
}
|
||||
|
||||
export function describeAdapterExecutionTarget(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): string {
|
||||
if (!target || target.kind === "local") return "local environment";
|
||||
return `SSH environment ${target.spec.username}@${target.spec.host}:${target.spec.port}`;
|
||||
if (target.transport === "ssh") {
|
||||
return `SSH environment ${target.spec.username}@${target.spec.host}:${target.spec.port}`;
|
||||
}
|
||||
return `sandbox environment${target.providerKey ? ` (${target.providerKey})` : ""}`;
|
||||
}
|
||||
|
||||
function requireSandboxRunner(target: AdapterSandboxExecutionTarget): CommandManagedRuntimeRunner {
|
||||
if (target.runner) return target.runner;
|
||||
throw new Error(
|
||||
"Sandbox execution target is missing its provider runtime runner. Sandbox commands must execute through the environment runtime.",
|
||||
);
|
||||
}
|
||||
|
||||
export async function ensureAdapterExecutionTargetCommandResolvable(
|
||||
@@ -135,6 +220,9 @@ export async function ensureAdapterExecutionTargetCommandResolvable(
|
||||
cwd: string,
|
||||
env: NodeJS.ProcessEnv,
|
||||
) {
|
||||
if (target?.kind === "remote" && target.transport === "sandbox") {
|
||||
return;
|
||||
}
|
||||
await ensureCommandResolvable(command, cwd, env, {
|
||||
remoteExecution: adapterExecutionTargetToRemoteSpec(target),
|
||||
});
|
||||
@@ -146,6 +234,9 @@ export async function resolveAdapterExecutionTargetCommandForLogs(
|
||||
cwd: string,
|
||||
env: NodeJS.ProcessEnv,
|
||||
): Promise<string> {
|
||||
if (target?.kind === "remote" && target.transport === "sandbox") {
|
||||
return `sandbox://${target.providerKey ?? "provider"}/${target.leaseId ?? "lease"}/${target.remoteCwd} :: ${command}`;
|
||||
}
|
||||
return await resolveCommandForLogs(command, cwd, env, {
|
||||
remoteExecution: adapterExecutionTargetToRemoteSpec(target),
|
||||
});
|
||||
@@ -158,6 +249,22 @@ export async function runAdapterExecutionTargetProcess(
|
||||
args: string[],
|
||||
options: AdapterExecutionTargetProcessOptions,
|
||||
): Promise<RunProcessResult> {
|
||||
if (target?.kind === "remote" && target.transport === "sandbox") {
|
||||
const runner = requireSandboxRunner(target);
|
||||
return await runner.execute({
|
||||
command,
|
||||
args,
|
||||
cwd: target.remoteCwd,
|
||||
env: options.env,
|
||||
stdin: options.stdin,
|
||||
timeoutMs: options.timeoutSec > 0 ? options.timeoutSec * 1000 : target.timeoutMs ?? undefined,
|
||||
onLog: options.onLog,
|
||||
onSpawn: options.onSpawn
|
||||
? async (meta) => options.onSpawn?.({ ...meta, processGroupId: null })
|
||||
: undefined,
|
||||
});
|
||||
}
|
||||
|
||||
return await runChildProcess(runId, command, args, {
|
||||
cwd: options.cwd,
|
||||
env: options.env,
|
||||
@@ -180,57 +287,68 @@ export async function runAdapterExecutionTargetShellCommand(
|
||||
const onLog = options.onLog ?? (async () => {});
|
||||
if (target?.kind === "remote") {
|
||||
const startedAt = new Date().toISOString();
|
||||
try {
|
||||
const result = await runSshCommand(target.spec, `sh -lc ${shellQuote(command)}`, {
|
||||
timeoutMs: (options.timeoutSec ?? 15) * 1000,
|
||||
});
|
||||
if (result.stdout) await onLog("stdout", result.stdout);
|
||||
if (result.stderr) await onLog("stderr", result.stderr);
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const timedOutError = error as NodeJS.ErrnoException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
signal?: string | null;
|
||||
};
|
||||
const stdout = timedOutError.stdout ?? "";
|
||||
const stderr = timedOutError.stderr ?? "";
|
||||
if (typeof timedOutError.code === "number") {
|
||||
if (target.transport === "ssh") {
|
||||
try {
|
||||
const result = await runSshCommand(target.spec, `sh -lc ${shellQuote(command)}`, {
|
||||
timeoutMs: (options.timeoutSec ?? 15) * 1000,
|
||||
});
|
||||
if (result.stdout) await onLog("stdout", result.stdout);
|
||||
if (result.stderr) await onLog("stderr", result.stderr);
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const timedOutError = error as NodeJS.ErrnoException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
signal?: string | null;
|
||||
};
|
||||
const stdout = timedOutError.stdout ?? "";
|
||||
const stderr = timedOutError.stderr ?? "";
|
||||
if (typeof timedOutError.code === "number") {
|
||||
if (stdout) await onLog("stdout", stdout);
|
||||
if (stderr) await onLog("stderr", stderr);
|
||||
return {
|
||||
exitCode: timedOutError.code,
|
||||
signal: timedOutError.signal ?? null,
|
||||
timedOut: false,
|
||||
stdout,
|
||||
stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
if (timedOutError.code !== "ETIMEDOUT") {
|
||||
throw error;
|
||||
}
|
||||
if (stdout) await onLog("stdout", stdout);
|
||||
if (stderr) await onLog("stderr", stderr);
|
||||
return {
|
||||
exitCode: timedOutError.code,
|
||||
exitCode: null,
|
||||
signal: timedOutError.signal ?? null,
|
||||
timedOut: false,
|
||||
timedOut: true,
|
||||
stdout,
|
||||
stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
if (timedOutError.code !== "ETIMEDOUT") {
|
||||
throw error;
|
||||
}
|
||||
if (stdout) await onLog("stdout", stdout);
|
||||
if (stderr) await onLog("stderr", stderr);
|
||||
return {
|
||||
exitCode: null,
|
||||
signal: timedOutError.signal ?? null,
|
||||
timedOut: true,
|
||||
stdout,
|
||||
stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
|
||||
return await requireSandboxRunner(target).execute({
|
||||
command: "sh",
|
||||
args: ["-lc", command],
|
||||
cwd: target.remoteCwd,
|
||||
env: options.env,
|
||||
timeoutMs: (options.timeoutSec ?? 15) * 1000,
|
||||
onLog,
|
||||
});
|
||||
}
|
||||
|
||||
return await runAdapterExecutionTargetProcess(
|
||||
@@ -277,11 +395,79 @@ export async function ensureAdapterExecutionTargetFile(
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure a working directory exists (and is a directory) on the execution target.
|
||||
*
|
||||
* For local targets this delegates to the local `ensureAbsoluteDirectory` helper
|
||||
* (Node fs). For remote (SSH/sandbox) targets it shells out and runs
|
||||
* `mkdir -p` (when allowed) followed by a `[ -d ]` check so the result reflects
|
||||
* the directory state inside the environment, not on the Paperclip host.
|
||||
*
|
||||
* Throws an Error with a human-readable message on failure.
|
||||
*/
|
||||
export async function ensureAdapterExecutionTargetDirectory(
|
||||
runId: string,
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
cwd: string,
|
||||
options: AdapterExecutionTargetShellOptions & { createIfMissing?: boolean },
|
||||
): Promise<void> {
|
||||
const createIfMissing = options.createIfMissing ?? false;
|
||||
|
||||
if (!target || target.kind === "local") {
|
||||
const { ensureAbsoluteDirectory } = await import("./server-utils.js");
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing });
|
||||
return;
|
||||
}
|
||||
|
||||
// Remote (SSH or sandbox): both expect POSIX absolute paths inside the env.
|
||||
if (!cwd.startsWith("/")) {
|
||||
throw new Error(`Working directory must be an absolute POSIX path on the remote target: "${cwd}"`);
|
||||
}
|
||||
|
||||
const quoted = shellQuote(cwd);
|
||||
const script = createIfMissing
|
||||
? `mkdir -p ${quoted} && [ -d ${quoted} ]`
|
||||
: `[ -d ${quoted} ]`;
|
||||
|
||||
const result = await runAdapterExecutionTargetShellCommand(runId, target, script, {
|
||||
cwd: target.kind === "remote" ? target.remoteCwd : cwd,
|
||||
env: options.env,
|
||||
timeoutSec: options.timeoutSec ?? 15,
|
||||
graceSec: options.graceSec ?? 5,
|
||||
onLog: options.onLog,
|
||||
});
|
||||
|
||||
if (result.timedOut) {
|
||||
throw new Error(`Timed out checking working directory on remote target: "${cwd}"`);
|
||||
}
|
||||
if ((result.exitCode ?? 1) !== 0) {
|
||||
const detail = (result.stderr || result.stdout || "").trim();
|
||||
if (createIfMissing) {
|
||||
throw new Error(
|
||||
`Could not create working directory "${cwd}" on remote target${detail ? `: ${detail}` : "."}`,
|
||||
);
|
||||
}
|
||||
throw new Error(
|
||||
`Working directory does not exist on remote target: "${cwd}"${detail ? ` (${detail})` : ""}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetSessionIdentity(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): Record<string, unknown> | null {
|
||||
if (!target || target.kind === "local") return null;
|
||||
return buildRemoteExecutionSessionIdentity(target.spec);
|
||||
if (target.transport === "ssh") return buildRemoteExecutionSessionIdentity(target.spec);
|
||||
const paperclipTransport = resolveSandboxPaperclipTransport(target);
|
||||
return {
|
||||
transport: "sandbox",
|
||||
providerKey: target.providerKey ?? null,
|
||||
environmentId: target.environmentId ?? null,
|
||||
leaseId: target.leaseId ?? null,
|
||||
remoteCwd: target.remoteCwd,
|
||||
paperclipTransport,
|
||||
...(paperclipTransport === "direct" && target.paperclipApiUrl ? { paperclipApiUrl: target.paperclipApiUrl } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetSessionMatches(
|
||||
@@ -291,7 +477,18 @@ export function adapterExecutionTargetSessionMatches(
|
||||
if (!target || target.kind === "local") {
|
||||
return Object.keys(parseObject(saved)).length === 0;
|
||||
}
|
||||
return remoteExecutionSessionMatches(saved, target.spec);
|
||||
if (target.transport === "ssh") return remoteExecutionSessionMatches(saved, target.spec);
|
||||
const current = adapterExecutionTargetSessionIdentity(target);
|
||||
const parsedSaved = parseObject(saved);
|
||||
return (
|
||||
readStringMeta(parsedSaved, "transport") === current?.transport &&
|
||||
readStringMeta(parsedSaved, "providerKey") === current?.providerKey &&
|
||||
readStringMeta(parsedSaved, "environmentId") === current?.environmentId &&
|
||||
readStringMeta(parsedSaved, "leaseId") === current?.leaseId &&
|
||||
readStringMeta(parsedSaved, "remoteCwd") === current?.remoteCwd &&
|
||||
readStringMeta(parsedSaved, "paperclipTransport") === (current?.paperclipTransport ?? null) &&
|
||||
readStringMeta(parsedSaved, "paperclipApiUrl") === (current?.paperclipApiUrl ?? null)
|
||||
);
|
||||
}
|
||||
|
||||
export function parseAdapterExecutionTarget(value: unknown): AdapterExecutionTarget | null {
|
||||
@@ -320,6 +517,26 @@ export function parseAdapterExecutionTarget(value: unknown): AdapterExecutionTar
|
||||
};
|
||||
}
|
||||
|
||||
if (kind === "remote" && readStringMeta(parsed, "transport") === "sandbox") {
|
||||
const remoteCwd = readStringMeta(parsed, "remoteCwd");
|
||||
const paperclipTransport = readStringMeta(parsed, "paperclipTransport");
|
||||
if (!remoteCwd) return null;
|
||||
return {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: readStringMeta(parsed, "providerKey"),
|
||||
environmentId: readStringMeta(parsed, "environmentId"),
|
||||
leaseId: readStringMeta(parsed, "leaseId"),
|
||||
remoteCwd,
|
||||
paperclipApiUrl: readStringMeta(parsed, "paperclipApiUrl"),
|
||||
paperclipTransport:
|
||||
paperclipTransport === "direct" || paperclipTransport === "bridge"
|
||||
? paperclipTransport
|
||||
: undefined,
|
||||
timeoutMs: typeof parsed.timeoutMs === "number" ? parsed.timeoutMs : null,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -376,11 +593,36 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
};
|
||||
}
|
||||
|
||||
const prepared = await prepareRemoteManagedRuntime({
|
||||
spec: target.spec,
|
||||
if (target.transport === "ssh") {
|
||||
const prepared = await prepareRemoteManagedRuntime({
|
||||
spec: target.spec,
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
assets: input.assets,
|
||||
});
|
||||
return {
|
||||
target,
|
||||
runtimeRootDir: prepared.runtimeRootDir,
|
||||
assetDirs: prepared.assetDirs,
|
||||
restoreWorkspace: prepared.restoreWorkspace,
|
||||
};
|
||||
}
|
||||
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner: requireSandboxRunner(target),
|
||||
spec: {
|
||||
providerKey: target.providerKey,
|
||||
leaseId: target.leaseId,
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs,
|
||||
paperclipApiUrl: target.paperclipApiUrl,
|
||||
},
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
workspaceExclude: input.workspaceExclude,
|
||||
preserveAbsentOnRestore: input.preserveAbsentOnRestore,
|
||||
assets: input.assets,
|
||||
installCommand: input.installCommand,
|
||||
});
|
||||
return {
|
||||
target,
|
||||
@@ -397,3 +639,172 @@ export function runtimeAssetDir(
|
||||
): string {
|
||||
return prepared.assetDirs[key] ?? path.posix.join(fallbackRemoteCwd, ".paperclip-runtime", key);
|
||||
}
|
||||
|
||||
function buildBridgeResponseHeaders(response: Response): Record<string, string> {
|
||||
const out: Record<string, string> = {};
|
||||
for (const key of ["content-type", "etag", "last-modified"]) {
|
||||
const value = response.headers.get(key);
|
||||
if (value && value.trim().length > 0) out[key] = value.trim();
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function buildBridgeForwardUrl(baseUrl: string, request: { path: string; query: string }): URL {
|
||||
const url = new URL(request.path, baseUrl);
|
||||
const query = request.query.trim();
|
||||
url.search = query.startsWith("?") ? query.slice(1) : query;
|
||||
return url;
|
||||
}
|
||||
|
||||
function bridgeResponseBodyLimitError(maxBodyBytes: number): Error {
|
||||
return new Error(`Bridge response body exceeded the configured size limit of ${maxBodyBytes} bytes.`);
|
||||
}
|
||||
|
||||
async function readBridgeForwardResponseBody(response: Response, maxBodyBytes: number): Promise<string> {
|
||||
const rawContentLength = response.headers.get("content-length");
|
||||
if (rawContentLength) {
|
||||
const contentLength = Number.parseInt(rawContentLength, 10);
|
||||
if (Number.isFinite(contentLength) && contentLength > maxBodyBytes) {
|
||||
throw bridgeResponseBodyLimitError(maxBodyBytes);
|
||||
}
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const chunks: Buffer[] = [];
|
||||
let totalBytes = 0;
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
if (!value) continue;
|
||||
totalBytes += value.byteLength;
|
||||
if (totalBytes > maxBodyBytes) {
|
||||
await reader.cancel().catch(() => undefined);
|
||||
throw bridgeResponseBodyLimitError(maxBodyBytes);
|
||||
}
|
||||
chunks.push(Buffer.from(value));
|
||||
}
|
||||
return Buffer.concat(chunks, totalBytes).toString("utf8");
|
||||
}
|
||||
|
||||
export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
runtimeRootDir: string | null | undefined;
|
||||
adapterKey: string;
|
||||
hostApiToken: string | null | undefined;
|
||||
hostApiUrl?: string | null;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
maxBodyBytes?: number | null;
|
||||
}): Promise<AdapterExecutionTargetPaperclipBridgeHandle | null> {
|
||||
if (!adapterExecutionTargetUsesPaperclipBridge(input.target)) {
|
||||
return null;
|
||||
}
|
||||
if (!input.target || input.target.kind !== "remote" || input.target.transport !== "sandbox") {
|
||||
return null;
|
||||
}
|
||||
|
||||
const target = input.target;
|
||||
const onLog = input.onLog ?? (async () => {});
|
||||
const hostApiToken = input.hostApiToken?.trim() ?? "";
|
||||
if (hostApiToken.length === 0) {
|
||||
throw new Error("Sandbox bridge mode requires a host-side Paperclip API token.");
|
||||
}
|
||||
|
||||
const runtimeRootDir =
|
||||
input.runtimeRootDir?.trim().length
|
||||
? input.runtimeRootDir.trim()
|
||||
: path.posix.join(target.remoteCwd, ".paperclip-runtime", input.adapterKey);
|
||||
const bridgeRuntimeDir = path.posix.join(runtimeRootDir, "paperclip-bridge");
|
||||
const queueDir = path.posix.join(bridgeRuntimeDir, "queue");
|
||||
const assetRemoteDir = path.posix.join(bridgeRuntimeDir, "server");
|
||||
const bridgeToken = createSandboxCallbackBridgeToken();
|
||||
const maxBodyBytes =
|
||||
typeof input.maxBodyBytes === "number" && Number.isFinite(input.maxBodyBytes) && input.maxBodyBytes > 0
|
||||
? Math.trunc(input.maxBodyBytes)
|
||||
: DEFAULT_SANDBOX_CALLBACK_BRIDGE_MAX_BODY_BYTES;
|
||||
const hostApiUrl =
|
||||
input.hostApiUrl?.trim() ||
|
||||
process.env.PAPERCLIP_RUNTIME_API_URL?.trim() ||
|
||||
process.env.PAPERCLIP_API_URL?.trim() ||
|
||||
resolveDefaultPaperclipApiUrl();
|
||||
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] Starting sandbox callback bridge for ${input.adapterKey} in ${bridgeRuntimeDir}.\n`,
|
||||
);
|
||||
|
||||
const bridgeAsset = await createSandboxCallbackBridgeAsset();
|
||||
let server: Awaited<ReturnType<typeof startSandboxCallbackBridgeServer>> | null = null;
|
||||
let worker: Awaited<ReturnType<typeof startSandboxCallbackBridgeWorker>> | null = null;
|
||||
try {
|
||||
const client = createCommandManagedSandboxCallbackBridgeQueueClient({
|
||||
runner: requireSandboxRunner(target),
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs,
|
||||
});
|
||||
worker = await startSandboxCallbackBridgeWorker({
|
||||
client,
|
||||
queueDir,
|
||||
maxBodyBytes,
|
||||
handleRequest: async (request) => {
|
||||
const headers = new Headers();
|
||||
for (const [key, value] of Object.entries(request.headers)) {
|
||||
if (value.trim().length === 0) continue;
|
||||
headers.set(key, value);
|
||||
}
|
||||
headers.set("authorization", `Bearer ${hostApiToken}`);
|
||||
headers.set("x-paperclip-run-id", input.runId);
|
||||
const method = request.method.trim().toUpperCase() || "GET";
|
||||
const response = await fetch(buildBridgeForwardUrl(hostApiUrl, request), {
|
||||
method,
|
||||
headers,
|
||||
...(method === "GET" || method === "HEAD" ? {} : { body: request.body }),
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
return {
|
||||
status: response.status,
|
||||
headers: buildBridgeResponseHeaders(response),
|
||||
body: await readBridgeForwardResponseBody(response, maxBodyBytes),
|
||||
};
|
||||
},
|
||||
});
|
||||
server = await startSandboxCallbackBridgeServer({
|
||||
runner: requireSandboxRunner(target),
|
||||
remoteCwd: target.remoteCwd,
|
||||
assetRemoteDir,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
bridgeAsset,
|
||||
timeoutMs: target.timeoutMs,
|
||||
maxBodyBytes,
|
||||
});
|
||||
} catch (error) {
|
||||
await Promise.allSettled([
|
||||
server?.stop(),
|
||||
worker?.stop(),
|
||||
bridgeAsset.cleanup(),
|
||||
]);
|
||||
throw error;
|
||||
}
|
||||
|
||||
return {
|
||||
env: {
|
||||
PAPERCLIP_API_URL: server.baseUrl,
|
||||
PAPERCLIP_API_KEY: bridgeToken,
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {
|
||||
await Promise.allSettled([
|
||||
server?.stop(),
|
||||
]);
|
||||
await Promise.allSettled([
|
||||
worker?.stop(),
|
||||
bridgeAsset.cleanup(),
|
||||
]);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -54,3 +54,15 @@ export {
|
||||
redactTranscriptEntryPaths,
|
||||
} from "./log-redaction.js";
|
||||
export { inferOpenAiCompatibleBiller } from "./billing.js";
|
||||
// Keep the root adapter-utils entry browser-safe because the UI imports it.
|
||||
// The sandbox callback bridge stays available via its dedicated subpath export.
|
||||
export type {
|
||||
SandboxCallbackBridgeRequest,
|
||||
SandboxCallbackBridgeResponse,
|
||||
SandboxCallbackBridgeAsset,
|
||||
SandboxCallbackBridgeDirectories,
|
||||
SandboxCallbackBridgeRouteRule,
|
||||
SandboxCallbackBridgeQueueClient,
|
||||
SandboxCallbackBridgeWorkerHandle,
|
||||
StartedSandboxCallbackBridgeServer,
|
||||
} from "./sandbox-callback-bridge.js";
|
||||
|
||||
610
packages/adapter-utils/src/sandbox-callback-bridge.test.ts
Normal file
610
packages/adapter-utils/src/sandbox-callback-bridge.test.ts
Normal file
@@ -0,0 +1,610 @@
|
||||
import { execFile as execFileCallback } from "node:child_process";
|
||||
import { mkdir, mkdtemp, readFile, readdir, rm, writeFile } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { prepareCommandManagedRuntime } from "./command-managed-runtime.js";
|
||||
import {
|
||||
createFileSystemSandboxCallbackBridgeQueueClient,
|
||||
createSandboxCallbackBridgeAsset,
|
||||
createSandboxCallbackBridgeToken,
|
||||
sandboxCallbackBridgeDirectories,
|
||||
startSandboxCallbackBridgeServer,
|
||||
startSandboxCallbackBridgeWorker,
|
||||
} from "./sandbox-callback-bridge.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
describe("sandbox callback bridge", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
const cleanupFns: Array<() => Promise<void>> = [];
|
||||
|
||||
function createExecRunner() {
|
||||
return {
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
}): Promise<RunProcessResult> => {
|
||||
const startedAt = new Date().toISOString();
|
||||
const env = {
|
||||
...process.env,
|
||||
...input.env,
|
||||
};
|
||||
const command = input.command === "sh" ? "/bin/sh" : input.command;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (input.stdin != null && input.command === "sh" && args[0] === "-lc" && typeof args[1] === "string") {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
args[1] = `printf '%s' \"$PAPERCLIP_TEST_STDIN\" | (${args[1]})`;
|
||||
}
|
||||
try {
|
||||
const result = await execFile(command, args, {
|
||||
cwd: input.cwd,
|
||||
env,
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
timeout: input.timeoutMs,
|
||||
});
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const err = error as NodeJS.ErrnoException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
code?: string | number | null;
|
||||
signal?: NodeJS.Signals | null;
|
||||
killed?: boolean;
|
||||
};
|
||||
return {
|
||||
exitCode: typeof err.code === "number" ? err.code : null,
|
||||
signal: err.signal ?? null,
|
||||
timedOut: Boolean(err.killed && input.timeoutMs),
|
||||
stdout: err.stdout ?? "",
|
||||
stderr: err.stderr ?? "",
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function waitForJsonFile(directory: string, timeoutMs = 2_000): Promise<string> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const entries = await readdir(directory).catch(() => []);
|
||||
const match = entries.find((entry) => entry.endsWith(".json"));
|
||||
if (match) return match;
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
}
|
||||
throw new Error(`Timed out waiting for a JSON file in ${directory}.`);
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
while (cleanupFns.length > 0) {
|
||||
const cleanup = cleanupFns.pop();
|
||||
if (!cleanup) continue;
|
||||
await cleanup().catch(() => undefined);
|
||||
}
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
it("round-trips localhost bridge requests over the sandbox queue without forwarding the bridge token", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-runtime-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
await mkdir(localWorkspaceDir, { recursive: true });
|
||||
await mkdir(remoteWorkspaceDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "bridge test\n", "utf8");
|
||||
|
||||
const runner = createExecRunner();
|
||||
|
||||
const bridgeAsset = await createSandboxCallbackBridgeAsset();
|
||||
cleanupFns.push(bridgeAsset.cleanup);
|
||||
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
assets: [
|
||||
{
|
||||
key: "bridge",
|
||||
localDir: bridgeAsset.localDir,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const queueDir = path.posix.join(prepared.runtimeRootDir, "paperclip-bridge");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
const bridgeToken = createSandboxCallbackBridgeToken();
|
||||
const seenRequests: Array<{
|
||||
method: string;
|
||||
path: string;
|
||||
query: string;
|
||||
headers: Record<string, string>;
|
||||
body: string;
|
||||
}> = [];
|
||||
|
||||
const worker = await startSandboxCallbackBridgeWorker({
|
||||
client: createFileSystemSandboxCallbackBridgeQueueClient(),
|
||||
queueDir,
|
||||
authorizeRequest: async (request) =>
|
||||
request.path === "/api/agents/me" ? null : `Route not allowed: ${request.method} ${request.path}`,
|
||||
handleRequest: async (request) => {
|
||||
seenRequests.push({
|
||||
method: request.method,
|
||||
path: request.path,
|
||||
query: request.query,
|
||||
headers: request.headers,
|
||||
body: request.body,
|
||||
});
|
||||
return {
|
||||
status: 200,
|
||||
headers: {
|
||||
"content-type": "application/json",
|
||||
etag: '"bridge-rev-1"',
|
||||
"last-modified": "Tue, 01 Apr 2025 00:00:00 GMT",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
ok: true,
|
||||
method: request.method,
|
||||
path: request.path,
|
||||
}),
|
||||
};
|
||||
},
|
||||
});
|
||||
cleanupFns.push(async () => {
|
||||
await worker.stop();
|
||||
});
|
||||
|
||||
const bridge = await startSandboxCallbackBridgeServer({
|
||||
runner,
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
assetRemoteDir: prepared.assetDirs.bridge,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
timeoutMs: 30_000,
|
||||
});
|
||||
cleanupFns.push(async () => {
|
||||
await bridge.stop();
|
||||
});
|
||||
|
||||
const okResponse = await fetch(`${bridge.baseUrl}/api/agents/me?view=compact`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
accept: "application/json",
|
||||
"if-none-match": '"client-cache-key"',
|
||||
"x-paperclip-run-id": "run-bridge-1",
|
||||
"x-bridge-debug": "drop-me",
|
||||
},
|
||||
});
|
||||
expect(okResponse.status).toBe(200);
|
||||
expect(okResponse.headers.get("content-type")).toContain("application/json");
|
||||
expect(okResponse.headers.get("etag")).toBe('"bridge-rev-1"');
|
||||
expect(okResponse.headers.get("last-modified")).toBe("Tue, 01 Apr 2025 00:00:00 GMT");
|
||||
await expect(okResponse.json()).resolves.toMatchObject({
|
||||
ok: true,
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
});
|
||||
|
||||
const deniedResponse = await fetch(`${bridge.baseUrl}/api/issues/issue-1`, {
|
||||
method: "PATCH",
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
"content-type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ status: "in_progress" }),
|
||||
});
|
||||
expect(deniedResponse.status).toBe(403);
|
||||
await expect(deniedResponse.json()).resolves.toMatchObject({
|
||||
error: "Route not allowed: PATCH /api/issues/issue-1",
|
||||
});
|
||||
|
||||
const unauthorizedResponse = await fetch(`${bridge.baseUrl}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: "Bearer wrong-token",
|
||||
},
|
||||
});
|
||||
expect(unauthorizedResponse.status).toBe(401);
|
||||
await expect(unauthorizedResponse.json()).resolves.toMatchObject({
|
||||
error: "Invalid bridge token.",
|
||||
});
|
||||
|
||||
expect(seenRequests).toHaveLength(1);
|
||||
expect(seenRequests[0]).toMatchObject({
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "?view=compact",
|
||||
body: "",
|
||||
headers: {
|
||||
accept: "application/json",
|
||||
"if-none-match": '"client-cache-key"',
|
||||
},
|
||||
});
|
||||
expect(seenRequests[0]?.headers.authorization).toBeUndefined();
|
||||
expect(seenRequests[0]?.headers["x-paperclip-run-id"]).toBeUndefined();
|
||||
|
||||
});
|
||||
|
||||
it("denies non-allowlisted requests by default", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-default-policy-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const queueDir = path.posix.join(rootDir, "queue");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
let handled = 0;
|
||||
|
||||
const worker = await startSandboxCallbackBridgeWorker({
|
||||
client: createFileSystemSandboxCallbackBridgeQueueClient(),
|
||||
queueDir,
|
||||
handleRequest: async () => {
|
||||
handled += 1;
|
||||
return {
|
||||
status: 200,
|
||||
body: "should not happen",
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "req-1.json"),
|
||||
`${JSON.stringify({
|
||||
id: "req-1",
|
||||
method: "DELETE",
|
||||
path: "/api/secrets",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await worker.stop({ drainTimeoutMs: 1_000 });
|
||||
|
||||
const response = JSON.parse(
|
||||
await readFile(path.posix.join(directories.responsesDir, "req-1.json"), "utf8"),
|
||||
) as { status: number; body: string };
|
||||
expect(handled).toBe(0);
|
||||
expect(response.status).toBe(403);
|
||||
expect(JSON.parse(response.body)).toEqual({
|
||||
error: "Route not allowed: DELETE /api/secrets",
|
||||
});
|
||||
});
|
||||
|
||||
it("drains already-queued requests on stop", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-drain-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const queueDir = path.posix.join(rootDir, "queue");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
const processed: string[] = [];
|
||||
|
||||
const worker = await startSandboxCallbackBridgeWorker({
|
||||
client: createFileSystemSandboxCallbackBridgeQueueClient(),
|
||||
queueDir,
|
||||
authorizeRequest: async () => null,
|
||||
handleRequest: async (request) => {
|
||||
processed.push(request.id);
|
||||
await new Promise((resolve) => setTimeout(resolve, 25));
|
||||
return {
|
||||
status: 200,
|
||||
body: request.id,
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "req-a.json"),
|
||||
`${JSON.stringify({
|
||||
id: "req-a",
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "req-b.json"),
|
||||
`${JSON.stringify({
|
||||
id: "req-b",
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await worker.stop({ drainTimeoutMs: 1_000 });
|
||||
|
||||
expect(processed).toEqual(["req-a", "req-b"]);
|
||||
await expect(readFile(path.posix.join(directories.responsesDir, "req-a.json"), "utf8")).resolves.toContain("\"req-a\"");
|
||||
await expect(readFile(path.posix.join(directories.responsesDir, "req-b.json"), "utf8")).resolves.toContain("\"req-b\"");
|
||||
});
|
||||
|
||||
it("writes fast 503 responses for queued requests that miss the drain deadline", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-drain-timeout-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const queueDir = path.posix.join(rootDir, "queue");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
const processed: string[] = [];
|
||||
|
||||
const worker = await startSandboxCallbackBridgeWorker({
|
||||
client: createFileSystemSandboxCallbackBridgeQueueClient(),
|
||||
queueDir,
|
||||
authorizeRequest: async () => null,
|
||||
handleRequest: async (request) => {
|
||||
processed.push(request.id);
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
return {
|
||||
status: 200,
|
||||
body: request.id,
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "req-a.json"),
|
||||
`${JSON.stringify({
|
||||
id: "req-a",
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "req-b.json"),
|
||||
`${JSON.stringify({
|
||||
id: "req-b",
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
for (let attempt = 0; attempt < 50 && processed.length === 0; attempt += 1) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 5));
|
||||
}
|
||||
|
||||
await worker.stop({ drainTimeoutMs: 10 });
|
||||
|
||||
expect(processed).toEqual(["req-a"]);
|
||||
await expect(readFile(path.posix.join(directories.responsesDir, "req-a.json"), "utf8")).resolves.toContain("\"req-a\"");
|
||||
await expect(readFile(path.posix.join(directories.responsesDir, "req-b.json"), "utf8")).resolves.toContain(
|
||||
"Bridge worker stopped before request could be handled.",
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects non-JSON request bodies and full queues at the bridge server", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-server-guards-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
await mkdir(localWorkspaceDir, { recursive: true });
|
||||
await mkdir(remoteWorkspaceDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "bridge guard test\n", "utf8");
|
||||
|
||||
const runner = createExecRunner();
|
||||
|
||||
const bridgeAsset = await createSandboxCallbackBridgeAsset();
|
||||
cleanupFns.push(bridgeAsset.cleanup);
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
assets: [{ key: "bridge", localDir: bridgeAsset.localDir }],
|
||||
});
|
||||
|
||||
const queueDir = path.posix.join(prepared.runtimeRootDir, "paperclip-bridge");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
const bridgeToken = createSandboxCallbackBridgeToken();
|
||||
|
||||
const bridge = await startSandboxCallbackBridgeServer({
|
||||
runner,
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
assetRemoteDir: prepared.assetDirs.bridge,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
timeoutMs: 30_000,
|
||||
maxQueueDepth: 1,
|
||||
});
|
||||
cleanupFns.push(async () => {
|
||||
await bridge.stop();
|
||||
});
|
||||
|
||||
await writeFile(
|
||||
path.posix.join(directories.requestsDir, "existing.json"),
|
||||
`${JSON.stringify({
|
||||
id: "existing",
|
||||
method: "GET",
|
||||
path: "/api/agents/me",
|
||||
query: "",
|
||||
headers: {},
|
||||
body: "",
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const queueFullResponse = await fetch(`${bridge.baseUrl}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
},
|
||||
});
|
||||
expect(queueFullResponse.status).toBe(503);
|
||||
await expect(queueFullResponse.json()).resolves.toEqual({
|
||||
error: "Bridge request queue is full.",
|
||||
});
|
||||
|
||||
await rm(path.posix.join(directories.requestsDir, "existing.json"), { force: true });
|
||||
|
||||
const nonJsonResponse = await fetch(`${bridge.baseUrl}/api/issues/issue-1/comments`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
"content-type": "text/plain",
|
||||
},
|
||||
body: "not json",
|
||||
});
|
||||
expect(nonJsonResponse.status).toBe(415);
|
||||
await expect(nonJsonResponse.json()).resolves.toEqual({
|
||||
error: "Bridge only accepts JSON request bodies.",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns a 502 when the host response times out", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-timeout-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
await mkdir(localWorkspaceDir, { recursive: true });
|
||||
await mkdir(remoteWorkspaceDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "bridge timeout test\n", "utf8");
|
||||
|
||||
const runner = createExecRunner();
|
||||
const bridgeAsset = await createSandboxCallbackBridgeAsset();
|
||||
cleanupFns.push(bridgeAsset.cleanup);
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
assets: [{ key: "bridge", localDir: bridgeAsset.localDir }],
|
||||
});
|
||||
|
||||
const queueDir = path.posix.join(prepared.runtimeRootDir, "paperclip-bridge");
|
||||
const bridgeToken = createSandboxCallbackBridgeToken();
|
||||
const bridge = await startSandboxCallbackBridgeServer({
|
||||
runner,
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
assetRemoteDir: prepared.assetDirs.bridge,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
timeoutMs: 30_000,
|
||||
pollIntervalMs: 10,
|
||||
responseTimeoutMs: 75,
|
||||
});
|
||||
cleanupFns.push(async () => {
|
||||
await bridge.stop();
|
||||
});
|
||||
|
||||
const response = await fetch(`${bridge.baseUrl}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
expect(response.status).toBe(502);
|
||||
await expect(response.json()).resolves.toEqual({
|
||||
error: "Timed out waiting for host bridge response.",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns a 502 for malformed host response files", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-malformed-response-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
await mkdir(localWorkspaceDir, { recursive: true });
|
||||
await mkdir(remoteWorkspaceDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "bridge malformed response test\n", "utf8");
|
||||
|
||||
const runner = createExecRunner();
|
||||
const bridgeAsset = await createSandboxCallbackBridgeAsset();
|
||||
cleanupFns.push(bridgeAsset.cleanup);
|
||||
const prepared = await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
assets: [{ key: "bridge", localDir: bridgeAsset.localDir }],
|
||||
});
|
||||
|
||||
const queueDir = path.posix.join(prepared.runtimeRootDir, "paperclip-bridge");
|
||||
const directories = sandboxCallbackBridgeDirectories(queueDir);
|
||||
const bridgeToken = createSandboxCallbackBridgeToken();
|
||||
const bridge = await startSandboxCallbackBridgeServer({
|
||||
runner,
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
assetRemoteDir: prepared.assetDirs.bridge,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
timeoutMs: 30_000,
|
||||
pollIntervalMs: 10,
|
||||
responseTimeoutMs: 1_000,
|
||||
});
|
||||
cleanupFns.push(async () => {
|
||||
await bridge.stop();
|
||||
});
|
||||
|
||||
const responsePromise = fetch(`${bridge.baseUrl}/api/agents/me`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${bridgeToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
const requestFile = await waitForJsonFile(directories.requestsDir);
|
||||
await writeFile(
|
||||
path.posix.join(directories.responsesDir, requestFile),
|
||||
'{"status":200,"headers":{"content-type":"application/json"},"body"',
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const response = await responsePromise;
|
||||
expect(response.status).toBe(502);
|
||||
await expect(response.json()).resolves.toMatchObject({
|
||||
error: expect.stringMatching(/JSON|Unexpected|Unterminated/i),
|
||||
});
|
||||
});
|
||||
});
|
||||
822
packages/adapter-utils/src/sandbox-callback-bridge.ts
Normal file
822
packages/adapter-utils/src/sandbox-callback-bridge.ts
Normal file
@@ -0,0 +1,822 @@
|
||||
import { randomBytes, randomUUID } from "node:crypto";
|
||||
import { promises as fs } from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import type { CommandManagedRuntimeRunner } from "./command-managed-runtime.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
const DEFAULT_BRIDGE_TOKEN_BYTES = 24;
|
||||
const DEFAULT_BRIDGE_POLL_INTERVAL_MS = 100;
|
||||
const DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS = 30_000;
|
||||
const DEFAULT_BRIDGE_STOP_TIMEOUT_MS = 2_000;
|
||||
const DEFAULT_BRIDGE_MAX_QUEUE_DEPTH = 64;
|
||||
const DEFAULT_BRIDGE_MAX_BODY_BYTES = 256 * 1024;
|
||||
const REMOTE_WRITE_BASE64_CHUNK_SIZE = 32 * 1024;
|
||||
const SANDBOX_CALLBACK_BRIDGE_ENTRYPOINT = "paperclip-bridge-server.mjs";
|
||||
|
||||
export const DEFAULT_SANDBOX_CALLBACK_BRIDGE_MAX_BODY_BYTES = DEFAULT_BRIDGE_MAX_BODY_BYTES;
|
||||
|
||||
export interface SandboxCallbackBridgeRouteRule {
|
||||
method: string;
|
||||
path: RegExp;
|
||||
}
|
||||
|
||||
export const DEFAULT_SANDBOX_CALLBACK_BRIDGE_ROUTE_ALLOWLIST: readonly SandboxCallbackBridgeRouteRule[] = [
|
||||
{ method: "GET", path: /^\/api\/agents\/me$/ },
|
||||
{ method: "GET", path: /^\/api\/issues\/[^/]+\/heartbeat-context$/ },
|
||||
{ method: "GET", path: /^\/api\/issues\/[^/]+\/comments(?:\/[^/]+)?$/ },
|
||||
{ method: "GET", path: /^\/api\/issues\/[^/]+\/documents(?:\/[^/]+)?$/ },
|
||||
{ method: "POST", path: /^\/api\/issues\/[^/]+\/checkout$/ },
|
||||
{ method: "POST", path: /^\/api\/issues\/[^/]+\/comments$/ },
|
||||
{ method: "POST", path: /^\/api\/issues\/[^/]+\/interactions(?:\/[^/]+)?$/ },
|
||||
{ method: "PATCH", path: /^\/api\/issues\/[^/]+$/ },
|
||||
] as const;
|
||||
|
||||
export const DEFAULT_SANDBOX_CALLBACK_BRIDGE_HEADER_ALLOWLIST = [
|
||||
"accept",
|
||||
"content-type",
|
||||
"if-match",
|
||||
"if-none-match",
|
||||
] as const;
|
||||
|
||||
export interface SandboxCallbackBridgeRequest {
|
||||
id: string;
|
||||
method: string;
|
||||
path: string;
|
||||
query: string;
|
||||
headers: Record<string, string>;
|
||||
/**
|
||||
* UTF-8 body contents. The bridge rejects non-JSON request bodies; binary
|
||||
* payloads are intentionally out of scope for this queue protocol.
|
||||
*/
|
||||
body: string;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
export interface SandboxCallbackBridgeResponse {
|
||||
id: string;
|
||||
status: number;
|
||||
headers: Record<string, string>;
|
||||
body: string;
|
||||
completedAt: string;
|
||||
}
|
||||
|
||||
export interface SandboxCallbackBridgeAsset {
|
||||
localDir: string;
|
||||
entrypoint: string;
|
||||
cleanup(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface SandboxCallbackBridgeDirectories {
|
||||
rootDir: string;
|
||||
requestsDir: string;
|
||||
responsesDir: string;
|
||||
logsDir: string;
|
||||
readyFile: string;
|
||||
pidFile: string;
|
||||
logFile: string;
|
||||
}
|
||||
|
||||
export interface SandboxCallbackBridgeQueueClient {
|
||||
makeDir(remotePath: string): Promise<void>;
|
||||
listJsonFiles(remotePath: string): Promise<string[]>;
|
||||
readTextFile(remotePath: string): Promise<string>;
|
||||
writeTextFile(remotePath: string, body: string): Promise<void>;
|
||||
rename(fromPath: string, toPath: string): Promise<void>;
|
||||
remove(remotePath: string): Promise<void>;
|
||||
}
|
||||
|
||||
export interface SandboxCallbackBridgeWorkerHandle {
|
||||
stop(options?: { drainTimeoutMs?: number }): Promise<void>;
|
||||
}
|
||||
|
||||
export interface StartedSandboxCallbackBridgeServer {
|
||||
baseUrl: string;
|
||||
host: string;
|
||||
port: number;
|
||||
pid: number;
|
||||
directories: SandboxCallbackBridgeDirectories;
|
||||
stop(): Promise<void>;
|
||||
}
|
||||
|
||||
function shellQuote(value: string) {
|
||||
return `'${value.replace(/'/g, `'"'"'`)}'`;
|
||||
}
|
||||
|
||||
function normalizeMethod(value: string | null | undefined): string {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim().toUpperCase() : "GET";
|
||||
}
|
||||
|
||||
function normalizeTimeoutMs(value: number | null | undefined, fallback: number): number {
|
||||
return typeof value === "number" && Number.isFinite(value) && value > 0 ? Math.trunc(value) : fallback;
|
||||
}
|
||||
|
||||
function toBuffer(bytes: Buffer | Uint8Array | ArrayBuffer): Buffer {
|
||||
if (Buffer.isBuffer(bytes)) return bytes;
|
||||
if (bytes instanceof ArrayBuffer) return Buffer.from(bytes);
|
||||
return Buffer.from(bytes.buffer, bytes.byteOffset, bytes.byteLength);
|
||||
}
|
||||
|
||||
function buildRunnerFailureMessage(action: string, result: RunProcessResult): string {
|
||||
const stderr = result.stderr.trim();
|
||||
const stdout = result.stdout.trim();
|
||||
const detail = stderr || stdout;
|
||||
if (result.timedOut) {
|
||||
return `${action} timed out${detail ? `: ${detail}` : ""}`;
|
||||
}
|
||||
return `${action} failed with exit code ${result.exitCode ?? "null"}${detail ? `: ${detail}` : ""}`;
|
||||
}
|
||||
|
||||
async function runShell(
|
||||
runner: CommandManagedRuntimeRunner,
|
||||
cwd: string,
|
||||
script: string,
|
||||
timeoutMs: number,
|
||||
): Promise<RunProcessResult> {
|
||||
return await runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", script],
|
||||
cwd,
|
||||
timeoutMs,
|
||||
});
|
||||
}
|
||||
|
||||
function requireSuccessfulResult(action: string, result: RunProcessResult): RunProcessResult {
|
||||
if (!result.timedOut && result.exitCode === 0) return result;
|
||||
throw new Error(buildRunnerFailureMessage(action, result));
|
||||
}
|
||||
|
||||
function base64Chunks(body: string): string[] {
|
||||
const out: string[] = [];
|
||||
for (let offset = 0; offset < body.length; offset += REMOTE_WRITE_BASE64_CHUNK_SIZE) {
|
||||
out.push(body.slice(offset, offset + REMOTE_WRITE_BASE64_CHUNK_SIZE));
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
export function createSandboxCallbackBridgeToken(bytes = DEFAULT_BRIDGE_TOKEN_BYTES): string {
|
||||
return randomBytes(bytes).toString("base64url");
|
||||
}
|
||||
|
||||
export function authorizeSandboxCallbackBridgeRequestWithRoutes(
|
||||
request: Pick<SandboxCallbackBridgeRequest, "method" | "path">,
|
||||
routes: readonly SandboxCallbackBridgeRouteRule[] = DEFAULT_SANDBOX_CALLBACK_BRIDGE_ROUTE_ALLOWLIST,
|
||||
): string | null {
|
||||
const method = normalizeMethod(request.method);
|
||||
return routes.some((route) => route.method === method && route.path.test(request.path))
|
||||
? null
|
||||
: `Route not allowed: ${method} ${request.path}`;
|
||||
}
|
||||
|
||||
export function sanitizeSandboxCallbackBridgeHeaders(
|
||||
headers: Record<string, string>,
|
||||
allowlist: readonly string[] = DEFAULT_SANDBOX_CALLBACK_BRIDGE_HEADER_ALLOWLIST,
|
||||
): Record<string, string> {
|
||||
const allowed = new Set(allowlist.map((header) => header.toLowerCase()));
|
||||
return Object.fromEntries(
|
||||
Object.entries(headers).filter(([key]) => allowed.has(key.toLowerCase())),
|
||||
);
|
||||
}
|
||||
|
||||
export function sandboxCallbackBridgeDirectories(rootDir: string): SandboxCallbackBridgeDirectories {
|
||||
return {
|
||||
rootDir,
|
||||
requestsDir: path.posix.join(rootDir, "requests"),
|
||||
responsesDir: path.posix.join(rootDir, "responses"),
|
||||
logsDir: path.posix.join(rootDir, "logs"),
|
||||
readyFile: path.posix.join(rootDir, "ready.json"),
|
||||
pidFile: path.posix.join(rootDir, "server.pid"),
|
||||
logFile: path.posix.join(rootDir, "logs", "bridge.log"),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildSandboxCallbackBridgeEnv(input: {
|
||||
queueDir: string;
|
||||
bridgeToken: string;
|
||||
host?: string;
|
||||
port?: number | null;
|
||||
pollIntervalMs?: number | null;
|
||||
responseTimeoutMs?: number | null;
|
||||
maxQueueDepth?: number | null;
|
||||
maxBodyBytes?: number | null;
|
||||
}): Record<string, string> {
|
||||
return {
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
PAPERCLIP_BRIDGE_QUEUE_DIR: input.queueDir,
|
||||
PAPERCLIP_BRIDGE_TOKEN: input.bridgeToken,
|
||||
PAPERCLIP_BRIDGE_HOST: input.host?.trim() || "127.0.0.1",
|
||||
PAPERCLIP_BRIDGE_PORT: String(input.port && input.port > 0 ? Math.trunc(input.port) : 0),
|
||||
PAPERCLIP_BRIDGE_POLL_INTERVAL_MS: String(
|
||||
normalizeTimeoutMs(input.pollIntervalMs, DEFAULT_BRIDGE_POLL_INTERVAL_MS),
|
||||
),
|
||||
PAPERCLIP_BRIDGE_RESPONSE_TIMEOUT_MS: String(
|
||||
normalizeTimeoutMs(input.responseTimeoutMs, DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS),
|
||||
),
|
||||
PAPERCLIP_BRIDGE_MAX_QUEUE_DEPTH: String(
|
||||
normalizeTimeoutMs(input.maxQueueDepth, DEFAULT_BRIDGE_MAX_QUEUE_DEPTH),
|
||||
),
|
||||
PAPERCLIP_BRIDGE_MAX_BODY_BYTES: String(
|
||||
normalizeTimeoutMs(input.maxBodyBytes, DEFAULT_BRIDGE_MAX_BODY_BYTES),
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
export async function createSandboxCallbackBridgeAsset(): Promise<SandboxCallbackBridgeAsset> {
|
||||
const localDir = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-asset-"));
|
||||
const entrypoint = path.join(localDir, SANDBOX_CALLBACK_BRIDGE_ENTRYPOINT);
|
||||
await fs.writeFile(entrypoint, getSandboxCallbackBridgeServerSource(), "utf8");
|
||||
return {
|
||||
localDir,
|
||||
entrypoint,
|
||||
cleanup: async () => {
|
||||
await fs.rm(localDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function createFileSystemSandboxCallbackBridgeQueueClient(): SandboxCallbackBridgeQueueClient {
|
||||
return {
|
||||
makeDir: async (remotePath) => {
|
||||
await fs.mkdir(remotePath, { recursive: true });
|
||||
},
|
||||
listJsonFiles: async (remotePath) => {
|
||||
const entries = await fs.readdir(remotePath, { withFileTypes: true }).catch(() => []);
|
||||
return entries
|
||||
.filter((entry) => entry.isFile() && entry.name.endsWith(".json"))
|
||||
.map((entry) => entry.name)
|
||||
.sort((left, right) => left.localeCompare(right));
|
||||
},
|
||||
readTextFile: async (remotePath) => await fs.readFile(remotePath, "utf8"),
|
||||
writeTextFile: async (remotePath, body) => {
|
||||
await fs.mkdir(path.posix.dirname(remotePath), { recursive: true });
|
||||
await fs.writeFile(remotePath, body, "utf8");
|
||||
},
|
||||
rename: async (fromPath, toPath) => {
|
||||
await fs.mkdir(path.posix.dirname(toPath), { recursive: true });
|
||||
await fs.rename(fromPath, toPath);
|
||||
},
|
||||
remove: async (remotePath) => {
|
||||
await fs.rm(remotePath, { recursive: true, force: true }).catch(() => undefined);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function createCommandManagedSandboxCallbackBridgeQueueClient(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
timeoutMs?: number | null;
|
||||
}): SandboxCallbackBridgeQueueClient {
|
||||
const timeoutMs = normalizeTimeoutMs(input.timeoutMs, DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS);
|
||||
const runChecked = async (action: string, script: string) =>
|
||||
requireSuccessfulResult(action, await runShell(input.runner, input.remoteCwd, script, timeoutMs));
|
||||
|
||||
return {
|
||||
makeDir: async (remotePath) => {
|
||||
await runChecked(`mkdir ${remotePath}`, `mkdir -p ${shellQuote(remotePath)}`);
|
||||
},
|
||||
listJsonFiles: async (remotePath) => {
|
||||
const result = await runShell(
|
||||
input.runner,
|
||||
input.remoteCwd,
|
||||
[
|
||||
`if [ -d ${shellQuote(remotePath)} ]; then`,
|
||||
` for file in ${shellQuote(remotePath)}/*.json; do`,
|
||||
` [ -f "$file" ] || continue`,
|
||||
" basename \"$file\"",
|
||||
" done",
|
||||
"fi",
|
||||
].join("\n"),
|
||||
timeoutMs,
|
||||
);
|
||||
requireSuccessfulResult(`list ${remotePath}`, result);
|
||||
return result.stdout
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0)
|
||||
.sort((left, right) => left.localeCompare(right));
|
||||
},
|
||||
readTextFile: async (remotePath) => {
|
||||
const result = await runChecked(`read ${remotePath}`, `base64 < ${shellQuote(remotePath)}`);
|
||||
return Buffer.from(result.stdout.replace(/\s+/g, ""), "base64").toString("utf8");
|
||||
},
|
||||
writeTextFile: async (remotePath, body) => {
|
||||
const remoteDir = path.posix.dirname(remotePath);
|
||||
const tempPath = `${remotePath}.paperclip-upload.b64`;
|
||||
await runChecked(
|
||||
`prepare upload ${remotePath}`,
|
||||
`mkdir -p ${shellQuote(remoteDir)} && rm -f ${shellQuote(tempPath)} && : > ${shellQuote(tempPath)}`,
|
||||
);
|
||||
const base64Body = toBuffer(Buffer.from(body, "utf8")).toString("base64");
|
||||
for (const chunk of base64Chunks(base64Body)) {
|
||||
await runChecked(
|
||||
`append upload chunk ${remotePath}`,
|
||||
`printf '%s' ${shellQuote(chunk)} >> ${shellQuote(tempPath)}`,
|
||||
);
|
||||
}
|
||||
await runChecked(
|
||||
`finalize upload ${remotePath}`,
|
||||
`base64 -d < ${shellQuote(tempPath)} > ${shellQuote(remotePath)} && rm -f ${shellQuote(tempPath)}`,
|
||||
);
|
||||
},
|
||||
rename: async (fromPath, toPath) => {
|
||||
await runChecked(
|
||||
`rename ${fromPath}`,
|
||||
`mkdir -p ${shellQuote(path.posix.dirname(toPath))} && mv ${shellQuote(fromPath)} ${shellQuote(toPath)}`,
|
||||
);
|
||||
},
|
||||
remove: async (remotePath) => {
|
||||
await runChecked(`remove ${remotePath}`, `rm -rf ${shellQuote(remotePath)}`);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function writeBridgeResponse(
|
||||
client: SandboxCallbackBridgeQueueClient,
|
||||
responsePath: string,
|
||||
response: SandboxCallbackBridgeResponse,
|
||||
) {
|
||||
const tempPath = `${responsePath}.tmp`;
|
||||
await client.writeTextFile(tempPath, `${JSON.stringify(response)}\n`);
|
||||
await client.rename(tempPath, responsePath);
|
||||
}
|
||||
|
||||
export async function startSandboxCallbackBridgeWorker(input: {
|
||||
client: SandboxCallbackBridgeQueueClient;
|
||||
queueDir: string;
|
||||
pollIntervalMs?: number | null;
|
||||
authorizeRequest?: (request: SandboxCallbackBridgeRequest) => string | null | Promise<string | null>;
|
||||
handleRequest: (request: SandboxCallbackBridgeRequest) => Promise<{
|
||||
status: number;
|
||||
headers?: Record<string, string>;
|
||||
body?: string;
|
||||
}>;
|
||||
maxBodyBytes?: number | null;
|
||||
}): Promise<SandboxCallbackBridgeWorkerHandle> {
|
||||
const pollIntervalMs = normalizeTimeoutMs(input.pollIntervalMs, DEFAULT_BRIDGE_POLL_INTERVAL_MS);
|
||||
const maxBodyBytes = normalizeTimeoutMs(input.maxBodyBytes, DEFAULT_BRIDGE_MAX_BODY_BYTES);
|
||||
const directories = sandboxCallbackBridgeDirectories(input.queueDir);
|
||||
await input.client.makeDir(directories.rootDir);
|
||||
await input.client.makeDir(directories.requestsDir);
|
||||
await input.client.makeDir(directories.responsesDir);
|
||||
await input.client.makeDir(directories.logsDir);
|
||||
|
||||
let stopping = false;
|
||||
let inFlight = 0;
|
||||
let settled = false;
|
||||
let stopDeadline = Number.POSITIVE_INFINITY;
|
||||
let settleResolve: (() => void) | null = null;
|
||||
const settledPromise = new Promise<void>((resolve) => {
|
||||
settleResolve = resolve;
|
||||
});
|
||||
const authorizeRequest = input.authorizeRequest ??
|
||||
((request: SandboxCallbackBridgeRequest) => authorizeSandboxCallbackBridgeRequestWithRoutes(request));
|
||||
|
||||
const processRequestFile = async (fileName: string) => {
|
||||
const requestPath = path.posix.join(directories.requestsDir, fileName);
|
||||
const responsePath = path.posix.join(directories.responsesDir, fileName);
|
||||
const raw = await input.client.readTextFile(requestPath);
|
||||
let request: SandboxCallbackBridgeRequest;
|
||||
try {
|
||||
request = JSON.parse(raw) as SandboxCallbackBridgeRequest;
|
||||
} catch {
|
||||
const requestId = fileName.replace(/\.json$/i, "") || randomUUID();
|
||||
await writeBridgeResponse(input.client, responsePath, {
|
||||
id: requestId,
|
||||
status: 400,
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({ error: "Invalid bridge request payload." }),
|
||||
completedAt: new Date().toISOString(),
|
||||
});
|
||||
await input.client.remove(requestPath);
|
||||
return;
|
||||
}
|
||||
|
||||
const denialReason = await authorizeRequest(request);
|
||||
if (denialReason) {
|
||||
await writeBridgeResponse(input.client, responsePath, {
|
||||
id: request.id,
|
||||
status: 403,
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({ error: denialReason }),
|
||||
completedAt: new Date().toISOString(),
|
||||
});
|
||||
await input.client.remove(requestPath);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await input.handleRequest(request);
|
||||
const responseBody = result.body ?? "";
|
||||
if (Buffer.byteLength(responseBody, "utf8") > maxBodyBytes) {
|
||||
throw new Error(`Bridge response body exceeded the configured size limit of ${maxBodyBytes} bytes.`);
|
||||
}
|
||||
await writeBridgeResponse(input.client, responsePath, {
|
||||
id: request.id,
|
||||
status: result.status,
|
||||
headers: result.headers ?? {},
|
||||
body: responseBody,
|
||||
completedAt: new Date().toISOString(),
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
`[paperclip] sandbox callback bridge handler failed for ${request.id}: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
await writeBridgeResponse(input.client, responsePath, {
|
||||
id: request.id,
|
||||
status: 502,
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
}),
|
||||
completedAt: new Date().toISOString(),
|
||||
});
|
||||
} finally {
|
||||
await input.client.remove(requestPath);
|
||||
}
|
||||
};
|
||||
|
||||
const failPendingRequests = async (message: string) => {
|
||||
const fileNames = await input.client.listJsonFiles(directories.requestsDir).catch(() => []);
|
||||
for (const fileName of fileNames) {
|
||||
const requestPath = path.posix.join(directories.requestsDir, fileName);
|
||||
const responsePath = path.posix.join(directories.responsesDir, fileName);
|
||||
const requestId = fileName.replace(/\.json$/i, "") || randomUUID();
|
||||
try {
|
||||
const raw = await input.client.readTextFile(requestPath);
|
||||
const parsed = JSON.parse(raw) as Partial<SandboxCallbackBridgeRequest>;
|
||||
await writeBridgeResponse(input.client, responsePath, {
|
||||
id: typeof parsed.id === "string" && parsed.id.length > 0 ? parsed.id : requestId,
|
||||
status: 503,
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({ error: message }),
|
||||
completedAt: new Date().toISOString(),
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
`[paperclip] sandbox callback bridge failed to abort pending request ${requestId}: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
} finally {
|
||||
await input.client.remove(requestPath).catch(() => undefined);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const loop = (async () => {
|
||||
try {
|
||||
while (true) {
|
||||
const fileNames = await input.client.listJsonFiles(directories.requestsDir);
|
||||
if (fileNames.length === 0) {
|
||||
if (stopping) {
|
||||
break;
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
||||
continue;
|
||||
}
|
||||
for (const fileName of fileNames) {
|
||||
if (stopping && Date.now() >= stopDeadline) break;
|
||||
inFlight += 1;
|
||||
try {
|
||||
await processRequestFile(fileName);
|
||||
} finally {
|
||||
inFlight -= 1;
|
||||
}
|
||||
}
|
||||
if (stopping && Date.now() >= stopDeadline) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
settled = true;
|
||||
if (settleResolve) {
|
||||
settleResolve();
|
||||
}
|
||||
}
|
||||
})();
|
||||
|
||||
void loop;
|
||||
|
||||
return {
|
||||
stop: async (options = {}) => {
|
||||
stopping = true;
|
||||
const drainMs = normalizeTimeoutMs(options.drainTimeoutMs, DEFAULT_BRIDGE_STOP_TIMEOUT_MS);
|
||||
stopDeadline = Date.now() + drainMs;
|
||||
if (!settled) {
|
||||
await Promise.race([
|
||||
settledPromise,
|
||||
new Promise<void>((resolve) => setTimeout(resolve, drainMs)),
|
||||
]);
|
||||
}
|
||||
await failPendingRequests("Bridge worker stopped before request could be handled.");
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function startSandboxCallbackBridgeServer(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
assetRemoteDir: string;
|
||||
queueDir: string;
|
||||
bridgeToken: string;
|
||||
bridgeAsset?: SandboxCallbackBridgeAsset | null;
|
||||
host?: string;
|
||||
port?: number | null;
|
||||
pollIntervalMs?: number | null;
|
||||
responseTimeoutMs?: number | null;
|
||||
timeoutMs?: number | null;
|
||||
nodeCommand?: string;
|
||||
maxQueueDepth?: number | null;
|
||||
maxBodyBytes?: number | null;
|
||||
}): Promise<StartedSandboxCallbackBridgeServer> {
|
||||
const timeoutMs = normalizeTimeoutMs(input.timeoutMs, DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS);
|
||||
const directories = sandboxCallbackBridgeDirectories(input.queueDir);
|
||||
const remoteEntrypoint = path.posix.join(input.assetRemoteDir, SANDBOX_CALLBACK_BRIDGE_ENTRYPOINT);
|
||||
if (input.bridgeAsset) {
|
||||
const assetClient = createCommandManagedSandboxCallbackBridgeQueueClient({
|
||||
runner: input.runner,
|
||||
remoteCwd: input.remoteCwd,
|
||||
timeoutMs,
|
||||
});
|
||||
await assetClient.makeDir(input.assetRemoteDir);
|
||||
const entrypointSource = await fs.readFile(input.bridgeAsset.entrypoint, "utf8");
|
||||
await assetClient.writeTextFile(remoteEntrypoint, entrypointSource);
|
||||
}
|
||||
const env = buildSandboxCallbackBridgeEnv({
|
||||
queueDir: input.queueDir,
|
||||
bridgeToken: input.bridgeToken,
|
||||
host: input.host,
|
||||
port: input.port,
|
||||
pollIntervalMs: input.pollIntervalMs,
|
||||
responseTimeoutMs: input.responseTimeoutMs,
|
||||
maxQueueDepth: input.maxQueueDepth,
|
||||
maxBodyBytes: input.maxBodyBytes,
|
||||
});
|
||||
const nodeCommand = input.nodeCommand?.trim() || "node";
|
||||
const startResult = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: [
|
||||
"-lc",
|
||||
[
|
||||
`mkdir -p ${shellQuote(directories.requestsDir)} ${shellQuote(directories.responsesDir)} ${shellQuote(directories.logsDir)}`,
|
||||
`rm -f ${shellQuote(directories.readyFile)} ${shellQuote(directories.pidFile)}`,
|
||||
`nohup env ${Object.entries(env).map(([key, value]) => `${key}=${shellQuote(value)}`).join(" ")} ` +
|
||||
`${shellQuote(nodeCommand)} ${shellQuote(remoteEntrypoint)} ` +
|
||||
`>> ${shellQuote(directories.logFile)} 2>&1 < /dev/null &`,
|
||||
"pid=$!",
|
||||
`printf '%s\\n' \"$pid\" > ${shellQuote(directories.pidFile)}`,
|
||||
"printf '{\"pid\":%s}\\n' \"$pid\"",
|
||||
].join("\n"),
|
||||
],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult("start sandbox callback bridge", startResult);
|
||||
|
||||
const readyResult = await runShell(
|
||||
input.runner,
|
||||
input.remoteCwd,
|
||||
[
|
||||
"i=0",
|
||||
`while [ \"$i\" -lt 200 ]; do`,
|
||||
` if [ -s ${shellQuote(directories.readyFile)} ]; then`,
|
||||
` cat ${shellQuote(directories.readyFile)}`,
|
||||
" exit 0",
|
||||
" fi",
|
||||
` if [ -s ${shellQuote(directories.logFile)} ] && ! kill -0 \"$(cat ${shellQuote(directories.pidFile)} 2>/dev/null)\" 2>/dev/null; then`,
|
||||
` cat ${shellQuote(directories.logFile)} >&2`,
|
||||
" exit 1",
|
||||
" fi",
|
||||
" i=$((i + 1))",
|
||||
" sleep 0.05",
|
||||
"done",
|
||||
`echo "Timed out waiting for bridge readiness." >&2`,
|
||||
`if [ -s ${shellQuote(directories.logFile)} ]; then cat ${shellQuote(directories.logFile)} >&2; fi`,
|
||||
"exit 1",
|
||||
].join("\n"),
|
||||
timeoutMs,
|
||||
);
|
||||
requireSuccessfulResult("wait for sandbox callback bridge readiness", readyResult);
|
||||
|
||||
let readyData: { host?: string; port?: number; baseUrl?: string; pid?: number };
|
||||
try {
|
||||
readyData = JSON.parse(readyResult.stdout.trim()) as { host?: string; port?: number; baseUrl?: string; pid?: number };
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Sandbox callback bridge wrote invalid readiness JSON: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
|
||||
const host = typeof readyData.host === "string" && readyData.host.trim().length > 0
|
||||
? readyData.host.trim()
|
||||
: "127.0.0.1";
|
||||
const port = typeof readyData.port === "number" && Number.isFinite(readyData.port) ? readyData.port : 0;
|
||||
if (!port) {
|
||||
throw new Error("Sandbox callback bridge did not report a listening port.");
|
||||
}
|
||||
const baseUrl =
|
||||
typeof readyData.baseUrl === "string" && readyData.baseUrl.trim().length > 0
|
||||
? readyData.baseUrl.trim()
|
||||
: `http://${host}:${port}`;
|
||||
|
||||
return {
|
||||
baseUrl,
|
||||
host,
|
||||
port,
|
||||
pid: typeof readyData.pid === "number" && Number.isFinite(readyData.pid) ? readyData.pid : 0,
|
||||
directories,
|
||||
stop: async () => {
|
||||
const stopResult = await input.runner.execute({
|
||||
command: "sh",
|
||||
args: [
|
||||
"-lc",
|
||||
[
|
||||
`if [ -s ${shellQuote(directories.pidFile)} ]; then`,
|
||||
` pid="$(cat ${shellQuote(directories.pidFile)})"`,
|
||||
" kill \"$pid\" 2>/dev/null || true",
|
||||
" i=0",
|
||||
" while kill -0 \"$pid\" 2>/dev/null && [ \"$i\" -lt 40 ]; do",
|
||||
" i=$((i + 1))",
|
||||
" sleep 0.05",
|
||||
" done",
|
||||
"fi",
|
||||
`rm -f ${shellQuote(directories.pidFile)} ${shellQuote(directories.readyFile)}`,
|
||||
].join("\n"),
|
||||
],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs,
|
||||
});
|
||||
if (stopResult.timedOut) {
|
||||
throw new Error(buildRunnerFailureMessage("stop sandbox callback bridge", stopResult));
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function getSandboxCallbackBridgeServerSource(): string {
|
||||
return `import { randomUUID, timingSafeEqual } from "node:crypto";
|
||||
import { createServer } from "node:http";
|
||||
import { promises as fs } from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
const queueDir = process.env.PAPERCLIP_BRIDGE_QUEUE_DIR;
|
||||
const bridgeToken = process.env.PAPERCLIP_BRIDGE_TOKEN;
|
||||
const host = process.env.PAPERCLIP_BRIDGE_HOST || "127.0.0.1";
|
||||
const port = Number(process.env.PAPERCLIP_BRIDGE_PORT || "0");
|
||||
const pollIntervalMs = Number(process.env.PAPERCLIP_BRIDGE_POLL_INTERVAL_MS || "100");
|
||||
const responseTimeoutMs = Number(process.env.PAPERCLIP_BRIDGE_RESPONSE_TIMEOUT_MS || "30000");
|
||||
const maxQueueDepth = Number(process.env.PAPERCLIP_BRIDGE_MAX_QUEUE_DEPTH || "${DEFAULT_BRIDGE_MAX_QUEUE_DEPTH}");
|
||||
const maxBodyBytes = Number(process.env.PAPERCLIP_BRIDGE_MAX_BODY_BYTES || "${DEFAULT_BRIDGE_MAX_BODY_BYTES}");
|
||||
const allowedHeaders = new Set(${JSON.stringify([...DEFAULT_SANDBOX_CALLBACK_BRIDGE_HEADER_ALLOWLIST])});
|
||||
|
||||
if (!queueDir || !bridgeToken) {
|
||||
throw new Error("PAPERCLIP_BRIDGE_QUEUE_DIR and PAPERCLIP_BRIDGE_TOKEN are required.");
|
||||
}
|
||||
|
||||
const requestsDir = path.posix.join(queueDir, "requests");
|
||||
const responsesDir = path.posix.join(queueDir, "responses");
|
||||
const logsDir = path.posix.join(queueDir, "logs");
|
||||
const readyFile = path.posix.join(queueDir, "ready.json");
|
||||
|
||||
function sleep(ms) {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
function normalizeHeaders(headers) {
|
||||
const out = {};
|
||||
for (const [key, value] of Object.entries(headers)) {
|
||||
if (value == null) continue;
|
||||
const normalizedKey = key.toLowerCase();
|
||||
if (!allowedHeaders.has(normalizedKey)) {
|
||||
continue;
|
||||
}
|
||||
out[normalizedKey] = Array.isArray(value) ? value.join(", ") : String(value);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
async function readBody(req) {
|
||||
const chunks = [];
|
||||
let totalBytes = 0;
|
||||
for await (const chunk of req) {
|
||||
const nextChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
||||
chunks.push(nextChunk);
|
||||
totalBytes += nextChunk.byteLength;
|
||||
if (totalBytes > maxBodyBytes) {
|
||||
throw new Error("Bridge request body exceeded the configured size limit.");
|
||||
}
|
||||
}
|
||||
return Buffer.concat(chunks).toString("utf8");
|
||||
}
|
||||
|
||||
async function queueDepth() {
|
||||
const entries = await fs.readdir(requestsDir, { withFileTypes: true }).catch(() => []);
|
||||
return entries.filter((entry) => entry.isFile() && entry.name.endsWith(".json")).length;
|
||||
}
|
||||
|
||||
function tokensMatch(received) {
|
||||
const expected = Buffer.from(bridgeToken, "utf8");
|
||||
const actual = Buffer.from(typeof received === "string" ? received : "", "utf8");
|
||||
if (expected.length !== actual.length) return false;
|
||||
return timingSafeEqual(expected, actual);
|
||||
}
|
||||
|
||||
async function waitForResponse(requestId) {
|
||||
const responsePath = path.posix.join(responsesDir, \`\${requestId}.json\`);
|
||||
const deadline = Date.now() + responseTimeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const body = await fs.readFile(responsePath, "utf8").catch(() => null);
|
||||
if (body != null) {
|
||||
await fs.rm(responsePath, { force: true }).catch(() => undefined);
|
||||
return JSON.parse(body);
|
||||
}
|
||||
await sleep(pollIntervalMs);
|
||||
}
|
||||
throw new Error("Timed out waiting for host bridge response.");
|
||||
}
|
||||
|
||||
const server = createServer(async (req, res) => {
|
||||
try {
|
||||
const auth = req.headers.authorization || "";
|
||||
const receivedToken = auth.startsWith("Bearer ") ? auth.slice("Bearer ".length) : "";
|
||||
if (!tokensMatch(receivedToken)) {
|
||||
res.statusCode = 401;
|
||||
res.setHeader("content-type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Invalid bridge token." }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (await queueDepth() >= maxQueueDepth) {
|
||||
res.statusCode = 503;
|
||||
res.setHeader("content-type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Bridge request queue is full." }));
|
||||
return;
|
||||
}
|
||||
|
||||
const url = new URL(req.url || "/", "http://127.0.0.1");
|
||||
const contentType = typeof req.headers["content-type"] === "string" ? req.headers["content-type"] : "";
|
||||
if (req.method && req.method !== "GET" && req.method !== "HEAD" && !/json/i.test(contentType)) {
|
||||
res.statusCode = 415;
|
||||
res.setHeader("content-type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Bridge only accepts JSON request bodies." }));
|
||||
return;
|
||||
}
|
||||
const requestId = randomUUID();
|
||||
const requestBody = await readBody(req);
|
||||
const payload = {
|
||||
id: requestId,
|
||||
method: req.method || "GET",
|
||||
path: url.pathname,
|
||||
query: url.search,
|
||||
headers: normalizeHeaders(req.headers),
|
||||
body: requestBody,
|
||||
createdAt: new Date().toISOString(),
|
||||
};
|
||||
const requestPath = path.posix.join(requestsDir, \`\${requestId}.json\`);
|
||||
const tempPath = \`\${requestPath}.tmp\`;
|
||||
await fs.writeFile(tempPath, \`\${JSON.stringify(payload)}\\n\`, "utf8");
|
||||
await fs.rename(tempPath, requestPath);
|
||||
|
||||
const response = await waitForResponse(requestId);
|
||||
res.statusCode = typeof response.status === "number" ? response.status : 200;
|
||||
for (const [key, value] of Object.entries(response.headers || {})) {
|
||||
if (typeof value !== "string" || key.toLowerCase() === "content-length") continue;
|
||||
res.setHeader(key, value);
|
||||
}
|
||||
res.end(typeof response.body === "string" ? response.body : "");
|
||||
} catch (error) {
|
||||
res.statusCode = 502;
|
||||
res.setHeader("content-type", "application/json");
|
||||
res.end(JSON.stringify({ error: error instanceof Error ? error.message : String(error) }));
|
||||
}
|
||||
});
|
||||
|
||||
async function shutdown() {
|
||||
server.close(() => {
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
process.on("SIGINT", () => void shutdown());
|
||||
process.on("SIGTERM", () => void shutdown());
|
||||
|
||||
await fs.mkdir(requestsDir, { recursive: true });
|
||||
await fs.mkdir(responsesDir, { recursive: true });
|
||||
await fs.mkdir(logsDir, { recursive: true });
|
||||
|
||||
server.listen(port, host, async () => {
|
||||
const address = server.address();
|
||||
if (!address || typeof address === "string") {
|
||||
throw new Error("Bridge server did not expose a TCP address.");
|
||||
}
|
||||
const ready = {
|
||||
pid: process.pid,
|
||||
host,
|
||||
port: address.port,
|
||||
baseUrl: \`http://\${host}:\${address.port}\`,
|
||||
startedAt: new Date().toISOString(),
|
||||
};
|
||||
const tempReadyFile = \`\${readyFile}.tmp\`;
|
||||
await fs.writeFile(tempReadyFile, JSON.stringify(ready), "utf8");
|
||||
await fs.rename(tempReadyFile, readyFile);
|
||||
});`;
|
||||
}
|
||||
133
packages/adapter-utils/src/sandbox-managed-runtime.test.ts
Normal file
133
packages/adapter-utils/src/sandbox-managed-runtime.test.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
import { lstat, mkdir, mkdtemp, readFile, readdir, rm, symlink, writeFile } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { execFile as execFileCallback } from "node:child_process";
|
||||
import { promisify } from "node:util";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
|
||||
import {
|
||||
mirrorDirectory,
|
||||
prepareSandboxManagedRuntime,
|
||||
type SandboxManagedRuntimeClient,
|
||||
} from "./sandbox-managed-runtime.js";
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
describe("sandbox managed runtime", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves excluded local workspace artifacts during restore mirroring", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-sandbox-restore-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const sourceDir = path.join(rootDir, "source");
|
||||
const targetDir = path.join(rootDir, "target");
|
||||
await mkdir(path.join(sourceDir, "src"), { recursive: true });
|
||||
await mkdir(path.join(targetDir, ".claude"), { recursive: true });
|
||||
await mkdir(path.join(targetDir, ".paperclip-runtime"), { recursive: true });
|
||||
await writeFile(path.join(sourceDir, "src", "app.ts"), "export const value = 2;\n", "utf8");
|
||||
await writeFile(path.join(targetDir, "stale.txt"), "remove me\n", "utf8");
|
||||
await writeFile(path.join(targetDir, ".claude", "settings.json"), "{\"keep\":true}\n", "utf8");
|
||||
await writeFile(path.join(targetDir, ".claude.json"), "{\"keep\":true}\n", "utf8");
|
||||
await writeFile(path.join(targetDir, ".paperclip-runtime", "state.json"), "{}\n", "utf8");
|
||||
|
||||
await mirrorDirectory(sourceDir, targetDir, {
|
||||
preserveAbsent: [".paperclip-runtime", ".claude", ".claude.json"],
|
||||
});
|
||||
|
||||
await expect(readFile(path.join(targetDir, "src", "app.ts"), "utf8")).resolves.toBe("export const value = 2;\n");
|
||||
await expect(readFile(path.join(targetDir, ".claude", "settings.json"), "utf8")).resolves.toBe("{\"keep\":true}\n");
|
||||
await expect(readFile(path.join(targetDir, ".claude.json"), "utf8")).resolves.toBe("{\"keep\":true}\n");
|
||||
await expect(readFile(path.join(targetDir, ".paperclip-runtime", "state.json"), "utf8")).resolves.toBe("{}\n");
|
||||
await expect(readFile(path.join(targetDir, "stale.txt"), "utf8")).rejects.toMatchObject({ code: "ENOENT" });
|
||||
});
|
||||
|
||||
it("syncs workspace and assets through a provider-neutral sandbox client", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-sandbox-managed-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteWorkspaceDir = path.join(rootDir, "remote-workspace");
|
||||
const localAssetsDir = path.join(rootDir, "local-assets");
|
||||
const linkedAssetPath = path.join(rootDir, "linked-skill.md");
|
||||
await mkdir(path.join(localWorkspaceDir, ".claude"), { recursive: true });
|
||||
await mkdir(localAssetsDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "local workspace\n", "utf8");
|
||||
await writeFile(path.join(localWorkspaceDir, "._README.md"), "appledouble\n", "utf8");
|
||||
await writeFile(path.join(localWorkspaceDir, ".claude", "settings.json"), "{\"local\":true}\n", "utf8");
|
||||
await writeFile(linkedAssetPath, "skill body\n", "utf8");
|
||||
await symlink(linkedAssetPath, path.join(localAssetsDir, "skill.md"));
|
||||
|
||||
const client: SandboxManagedRuntimeClient = {
|
||||
makeDir: async (remotePath) => {
|
||||
await mkdir(remotePath, { recursive: true });
|
||||
},
|
||||
writeFile: async (remotePath, bytes) => {
|
||||
await mkdir(path.dirname(remotePath), { recursive: true });
|
||||
await writeFile(remotePath, Buffer.from(bytes));
|
||||
},
|
||||
readFile: async (remotePath) => await readFile(remotePath),
|
||||
listFiles: async (remotePath) => {
|
||||
const entries = await readdir(remotePath, { withFileTypes: true }).catch(() => []);
|
||||
return entries
|
||||
.filter((entry) => entry.isFile())
|
||||
.map((entry) => entry.name)
|
||||
.sort((left, right) => left.localeCompare(right));
|
||||
},
|
||||
remove: async (remotePath) => {
|
||||
await rm(remotePath, { recursive: true, force: true });
|
||||
},
|
||||
run: async (command) => {
|
||||
await execFile("sh", ["-lc", command], {
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
const prepared = await prepareSandboxManagedRuntime({
|
||||
spec: {
|
||||
transport: "sandbox",
|
||||
provider: "test",
|
||||
sandboxId: "sandbox-1",
|
||||
remoteCwd: remoteWorkspaceDir,
|
||||
timeoutMs: 30_000,
|
||||
apiKey: null,
|
||||
},
|
||||
adapterKey: "test-adapter",
|
||||
client,
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
workspaceExclude: [".claude"],
|
||||
preserveAbsentOnRestore: [".claude"],
|
||||
assets: [{
|
||||
key: "skills",
|
||||
localDir: localAssetsDir,
|
||||
followSymlinks: true,
|
||||
}],
|
||||
});
|
||||
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, "README.md"), "utf8")).resolves.toBe("local workspace\n");
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, "._README.md"), "utf8")).rejects.toMatchObject({ code: "ENOENT" });
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, ".claude", "settings.json"), "utf8")).rejects.toMatchObject({ code: "ENOENT" });
|
||||
await expect(readFile(path.join(prepared.assetDirs.skills, "skill.md"), "utf8")).resolves.toBe("skill body\n");
|
||||
expect((await lstat(path.join(prepared.assetDirs.skills, "skill.md"))).isFile()).toBe(true);
|
||||
|
||||
await writeFile(path.join(remoteWorkspaceDir, "README.md"), "remote workspace\n", "utf8");
|
||||
await writeFile(path.join(remoteWorkspaceDir, "remote-only.txt"), "sync back\n", "utf8");
|
||||
await mkdir(path.join(localWorkspaceDir, ".paperclip-runtime"), { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, ".paperclip-runtime", "state.json"), "{}\n", "utf8");
|
||||
await writeFile(path.join(localWorkspaceDir, "local-stale.txt"), "remove\n", "utf8");
|
||||
await prepared.restoreWorkspace();
|
||||
|
||||
await expect(readFile(path.join(localWorkspaceDir, "README.md"), "utf8")).resolves.toBe("remote workspace\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, "remote-only.txt"), "utf8")).resolves.toBe("sync back\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, "local-stale.txt"), "utf8")).rejects.toMatchObject({ code: "ENOENT" });
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".claude", "settings.json"), "utf8")).resolves.toBe("{\"local\":true}\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".paperclip-runtime", "state.json"), "utf8")).resolves.toBe("{}\n");
|
||||
});
|
||||
});
|
||||
339
packages/adapter-utils/src/sandbox-managed-runtime.ts
Normal file
339
packages/adapter-utils/src/sandbox-managed-runtime.ts
Normal file
@@ -0,0 +1,339 @@
|
||||
import { execFile as execFileCallback } from "node:child_process";
|
||||
import { constants as fsConstants, promises as fs } from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
export interface SandboxRemoteExecutionSpec {
|
||||
transport: "sandbox";
|
||||
provider: string;
|
||||
sandboxId: string;
|
||||
remoteCwd: string;
|
||||
timeoutMs: number;
|
||||
apiKey: string | null;
|
||||
paperclipApiUrl?: string | null;
|
||||
}
|
||||
|
||||
export interface SandboxManagedRuntimeAsset {
|
||||
key: string;
|
||||
localDir: string;
|
||||
followSymlinks?: boolean;
|
||||
exclude?: string[];
|
||||
}
|
||||
|
||||
export interface SandboxManagedRuntimeClient {
|
||||
makeDir(remotePath: string): Promise<void>;
|
||||
writeFile(remotePath: string, bytes: ArrayBuffer): Promise<void>;
|
||||
readFile(remotePath: string): Promise<Buffer | Uint8Array | ArrayBuffer>;
|
||||
listFiles(remotePath: string): Promise<string[]>;
|
||||
remove(remotePath: string): Promise<void>;
|
||||
run(command: string, options: { timeoutMs: number }): Promise<void>;
|
||||
}
|
||||
|
||||
export interface PreparedSandboxManagedRuntime {
|
||||
spec: SandboxRemoteExecutionSpec;
|
||||
workspaceLocalDir: string;
|
||||
workspaceRemoteDir: string;
|
||||
runtimeRootDir: string;
|
||||
assetDirs: Record<string, string>;
|
||||
restoreWorkspace(): Promise<void>;
|
||||
}
|
||||
|
||||
function asObject(value: unknown): Record<string, unknown> {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
: {};
|
||||
}
|
||||
|
||||
function asString(value: unknown): string {
|
||||
return typeof value === "string" ? value : "";
|
||||
}
|
||||
|
||||
function asNumber(value: unknown): number {
|
||||
return typeof value === "number" ? value : Number(value);
|
||||
}
|
||||
|
||||
function shellQuote(value: string) {
|
||||
return `'${value.replace(/'/g, `'\"'\"'`)}'`;
|
||||
}
|
||||
|
||||
export function parseSandboxRemoteExecutionSpec(value: unknown): SandboxRemoteExecutionSpec | null {
|
||||
const parsed = asObject(value);
|
||||
const transport = asString(parsed.transport).trim();
|
||||
const provider = asString(parsed.provider).trim();
|
||||
const sandboxId = asString(parsed.sandboxId).trim();
|
||||
const remoteCwd = asString(parsed.remoteCwd).trim();
|
||||
const timeoutMs = asNumber(parsed.timeoutMs);
|
||||
|
||||
if (
|
||||
transport !== "sandbox" ||
|
||||
provider.length === 0 ||
|
||||
sandboxId.length === 0 ||
|
||||
remoteCwd.length === 0 ||
|
||||
!Number.isFinite(timeoutMs) ||
|
||||
timeoutMs <= 0
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
transport: "sandbox",
|
||||
provider,
|
||||
sandboxId,
|
||||
remoteCwd,
|
||||
timeoutMs,
|
||||
apiKey: asString(parsed.apiKey).trim() || null,
|
||||
paperclipApiUrl: asString(parsed.paperclipApiUrl).trim() || null,
|
||||
};
|
||||
}
|
||||
|
||||
export function buildSandboxExecutionSessionIdentity(spec: SandboxRemoteExecutionSpec | null) {
|
||||
if (!spec) return null;
|
||||
return {
|
||||
transport: "sandbox",
|
||||
provider: spec.provider,
|
||||
sandboxId: spec.sandboxId,
|
||||
remoteCwd: spec.remoteCwd,
|
||||
...(spec.paperclipApiUrl ? { paperclipApiUrl: spec.paperclipApiUrl } : {}),
|
||||
} as const;
|
||||
}
|
||||
|
||||
export function sandboxExecutionSessionMatches(saved: unknown, current: SandboxRemoteExecutionSpec | null): boolean {
|
||||
const currentIdentity = buildSandboxExecutionSessionIdentity(current);
|
||||
if (!currentIdentity) return false;
|
||||
const parsedSaved = asObject(saved);
|
||||
return (
|
||||
asString(parsedSaved.transport) === currentIdentity.transport &&
|
||||
asString(parsedSaved.provider) === currentIdentity.provider &&
|
||||
asString(parsedSaved.sandboxId) === currentIdentity.sandboxId &&
|
||||
asString(parsedSaved.remoteCwd) === currentIdentity.remoteCwd &&
|
||||
asString(parsedSaved.paperclipApiUrl) === asString(currentIdentity.paperclipApiUrl)
|
||||
);
|
||||
}
|
||||
|
||||
async function withTempDir<T>(prefix: string, fn: (dir: string) => Promise<T>): Promise<T> {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
try {
|
||||
return await fn(dir);
|
||||
} finally {
|
||||
await fs.rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
async function execTar(args: string[]): Promise<void> {
|
||||
await execFile("tar", args, {
|
||||
env: {
|
||||
...process.env,
|
||||
COPYFILE_DISABLE: "1",
|
||||
},
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
});
|
||||
}
|
||||
|
||||
async function createTarballFromDirectory(input: {
|
||||
localDir: string;
|
||||
archivePath: string;
|
||||
exclude?: string[];
|
||||
followSymlinks?: boolean;
|
||||
}): Promise<void> {
|
||||
const excludeArgs = ["._*", ...(input.exclude ?? [])].flatMap((entry) => ["--exclude", entry]);
|
||||
await execTar([
|
||||
"-c",
|
||||
...(input.followSymlinks ? ["-h"] : []),
|
||||
"-f",
|
||||
input.archivePath,
|
||||
"-C",
|
||||
input.localDir,
|
||||
...excludeArgs,
|
||||
".",
|
||||
]);
|
||||
}
|
||||
|
||||
async function extractTarballToDirectory(input: {
|
||||
archivePath: string;
|
||||
localDir: string;
|
||||
}): Promise<void> {
|
||||
await fs.mkdir(input.localDir, { recursive: true });
|
||||
await execTar(["-xf", input.archivePath, "-C", input.localDir]);
|
||||
}
|
||||
|
||||
async function walkDirectory(root: string, relative = ""): Promise<string[]> {
|
||||
const current = path.join(root, relative);
|
||||
const entries = await fs.readdir(current, { withFileTypes: true }).catch(() => []);
|
||||
const out: string[] = [];
|
||||
for (const entry of entries) {
|
||||
const nextRelative = relative ? path.posix.join(relative, entry.name) : entry.name;
|
||||
out.push(nextRelative);
|
||||
if (entry.isDirectory()) {
|
||||
out.push(...(await walkDirectory(root, nextRelative)));
|
||||
}
|
||||
}
|
||||
return out.sort((left, right) => right.length - left.length);
|
||||
}
|
||||
|
||||
function isRelativePathOrDescendant(relative: string, candidate: string): boolean {
|
||||
return relative === candidate || relative.startsWith(`${candidate}/`);
|
||||
}
|
||||
|
||||
export async function mirrorDirectory(
|
||||
sourceDir: string,
|
||||
targetDir: string,
|
||||
options: { preserveAbsent?: string[] } = {},
|
||||
): Promise<void> {
|
||||
await fs.mkdir(targetDir, { recursive: true });
|
||||
const preserveAbsent = new Set(options.preserveAbsent ?? []);
|
||||
const shouldPreserveAbsent = (relative: string) =>
|
||||
[...preserveAbsent].some((candidate) => isRelativePathOrDescendant(relative, candidate));
|
||||
|
||||
const sourceEntries = new Set(await walkDirectory(sourceDir));
|
||||
const targetEntries = await walkDirectory(targetDir);
|
||||
for (const relative of targetEntries) {
|
||||
if (shouldPreserveAbsent(relative)) continue;
|
||||
if (!sourceEntries.has(relative)) {
|
||||
await fs.rm(path.join(targetDir, relative), { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
const copyEntry = async (relative: string) => {
|
||||
const sourcePath = path.join(sourceDir, relative);
|
||||
const targetPath = path.join(targetDir, relative);
|
||||
const stats = await fs.lstat(sourcePath);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
await fs.mkdir(targetPath, { recursive: true });
|
||||
return;
|
||||
}
|
||||
|
||||
await fs.mkdir(path.dirname(targetPath), { recursive: true });
|
||||
await fs.rm(targetPath, { recursive: true, force: true }).catch(() => undefined);
|
||||
if (stats.isSymbolicLink()) {
|
||||
const linkTarget = await fs.readlink(sourcePath);
|
||||
await fs.symlink(linkTarget, targetPath);
|
||||
return;
|
||||
}
|
||||
|
||||
await fs.copyFile(sourcePath, targetPath, fsConstants.COPYFILE_FICLONE).catch(async () => {
|
||||
await fs.copyFile(sourcePath, targetPath);
|
||||
});
|
||||
await fs.chmod(targetPath, stats.mode);
|
||||
};
|
||||
|
||||
const entries = (await walkDirectory(sourceDir)).sort((left, right) => left.localeCompare(right));
|
||||
for (const relative of entries) {
|
||||
await copyEntry(relative);
|
||||
}
|
||||
}
|
||||
|
||||
function toArrayBuffer(bytes: Buffer): ArrayBuffer {
|
||||
return Uint8Array.from(bytes).buffer;
|
||||
}
|
||||
|
||||
function toBuffer(bytes: Buffer | Uint8Array | ArrayBuffer): Buffer {
|
||||
if (Buffer.isBuffer(bytes)) return bytes;
|
||||
if (bytes instanceof ArrayBuffer) return Buffer.from(bytes);
|
||||
return Buffer.from(bytes.buffer, bytes.byteOffset, bytes.byteLength);
|
||||
}
|
||||
|
||||
function tarExcludeFlags(exclude: string[] | undefined): string {
|
||||
return ["._*", ...(exclude ?? [])].map((entry) => `--exclude ${shellQuote(entry)}`).join(" ");
|
||||
}
|
||||
|
||||
export async function prepareSandboxManagedRuntime(input: {
|
||||
spec: SandboxRemoteExecutionSpec;
|
||||
adapterKey: string;
|
||||
client: SandboxManagedRuntimeClient;
|
||||
workspaceLocalDir: string;
|
||||
workspaceRemoteDir?: string;
|
||||
workspaceExclude?: string[];
|
||||
preserveAbsentOnRestore?: string[];
|
||||
assets?: SandboxManagedRuntimeAsset[];
|
||||
}): Promise<PreparedSandboxManagedRuntime> {
|
||||
const workspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
const runtimeRootDir = path.posix.join(workspaceRemoteDir, ".paperclip-runtime", input.adapterKey);
|
||||
|
||||
await withTempDir("paperclip-sandbox-sync-", async (tempDir) => {
|
||||
const workspaceTarPath = path.join(tempDir, "workspace.tar");
|
||||
await createTarballFromDirectory({
|
||||
localDir: input.workspaceLocalDir,
|
||||
archivePath: workspaceTarPath,
|
||||
exclude: input.workspaceExclude,
|
||||
});
|
||||
const workspaceTarBytes = await fs.readFile(workspaceTarPath);
|
||||
const remoteWorkspaceTar = path.posix.join(runtimeRootDir, "workspace-upload.tar");
|
||||
await input.client.makeDir(runtimeRootDir);
|
||||
await input.client.writeFile(remoteWorkspaceTar, toArrayBuffer(workspaceTarBytes));
|
||||
const preservedNames = new Set([".paperclip-runtime", ...(input.preserveAbsentOnRestore ?? [])]);
|
||||
const findPreserveArgs = [...preservedNames].map((entry) => `! -name ${shellQuote(entry)}`).join(" ");
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`mkdir -p ${shellQuote(workspaceRemoteDir)} && ` +
|
||||
`find ${shellQuote(workspaceRemoteDir)} -mindepth 1 -maxdepth 1 ${findPreserveArgs} -exec rm -rf -- {} + && ` +
|
||||
`tar -xf ${shellQuote(remoteWorkspaceTar)} -C ${shellQuote(workspaceRemoteDir)} && ` +
|
||||
`rm -f ${shellQuote(remoteWorkspaceTar)}`,
|
||||
)}`,
|
||||
{ timeoutMs: input.spec.timeoutMs },
|
||||
);
|
||||
|
||||
for (const asset of input.assets ?? []) {
|
||||
const assetTarPath = path.join(tempDir, `${asset.key}.tar`);
|
||||
await createTarballFromDirectory({
|
||||
localDir: asset.localDir,
|
||||
archivePath: assetTarPath,
|
||||
followSymlinks: asset.followSymlinks,
|
||||
exclude: asset.exclude,
|
||||
});
|
||||
const assetTarBytes = await fs.readFile(assetTarPath);
|
||||
const remoteAssetDir = path.posix.join(runtimeRootDir, asset.key);
|
||||
const remoteAssetTar = path.posix.join(runtimeRootDir, `${asset.key}-upload.tar`);
|
||||
await input.client.writeFile(remoteAssetTar, toArrayBuffer(assetTarBytes));
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`rm -rf ${shellQuote(remoteAssetDir)} && ` +
|
||||
`mkdir -p ${shellQuote(remoteAssetDir)} && ` +
|
||||
`tar -xf ${shellQuote(remoteAssetTar)} -C ${shellQuote(remoteAssetDir)} && ` +
|
||||
`rm -f ${shellQuote(remoteAssetTar)}`,
|
||||
)}`,
|
||||
{ timeoutMs: input.spec.timeoutMs },
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
const assetDirs = Object.fromEntries(
|
||||
(input.assets ?? []).map((asset) => [asset.key, path.posix.join(runtimeRootDir, asset.key)]),
|
||||
);
|
||||
|
||||
return {
|
||||
spec: input.spec,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
workspaceRemoteDir,
|
||||
runtimeRootDir,
|
||||
assetDirs,
|
||||
restoreWorkspace: async () => {
|
||||
await withTempDir("paperclip-sandbox-restore-", async (tempDir) => {
|
||||
const remoteWorkspaceTar = path.posix.join(runtimeRootDir, "workspace-download.tar");
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`mkdir -p ${shellQuote(runtimeRootDir)} && ` +
|
||||
`tar -cf ${shellQuote(remoteWorkspaceTar)} -C ${shellQuote(workspaceRemoteDir)} ` +
|
||||
`${tarExcludeFlags(input.workspaceExclude)} .`,
|
||||
)}`,
|
||||
{ timeoutMs: input.spec.timeoutMs },
|
||||
);
|
||||
const archiveBytes = await input.client.readFile(remoteWorkspaceTar);
|
||||
await input.client.remove(remoteWorkspaceTar).catch(() => undefined);
|
||||
const localArchivePath = path.join(tempDir, "workspace.tar");
|
||||
const extractedDir = path.join(tempDir, "workspace");
|
||||
await fs.writeFile(localArchivePath, toBuffer(archiveBytes));
|
||||
await extractTarballToDirectory({
|
||||
archivePath: localArchivePath,
|
||||
localDir: extractedDir,
|
||||
});
|
||||
await mirrorDirectory(extractedDir, input.workspaceLocalDir, {
|
||||
preserveAbsent: [".paperclip-runtime", ...(input.preserveAbsentOnRestore ?? [])],
|
||||
});
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
applyPaperclipWorkspaceEnv,
|
||||
appendWithByteCap,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
renderPaperclipWakePrompt,
|
||||
@@ -254,6 +255,7 @@ describe("renderPaperclipWakePrompt", () => {
|
||||
it("keeps the default local-agent prompt action-oriented", () => {
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("Start actionable work in this heartbeat");
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("do not stop at a plan");
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("Prefer the smallest verification that proves the change");
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("Use child issues");
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("instead of polling agents, sessions, or processes");
|
||||
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("Create child issues directly when you know what needs to be done");
|
||||
@@ -326,6 +328,34 @@ describe("renderPaperclipWakePrompt", () => {
|
||||
expect(prompt).toContain("PAP-1723 Finish blocker (todo)");
|
||||
});
|
||||
|
||||
it("renders loose review request instructions for execution handoffs", () => {
|
||||
const prompt = renderPaperclipWakePrompt({
|
||||
reason: "execution_review_requested",
|
||||
issue: {
|
||||
id: "issue-1",
|
||||
identifier: "PAP-2011",
|
||||
title: "Review request handoff",
|
||||
status: "in_review",
|
||||
},
|
||||
executionStage: {
|
||||
wakeRole: "reviewer",
|
||||
stageId: "stage-1",
|
||||
stageType: "review",
|
||||
currentParticipant: { type: "agent", agentId: "agent-1" },
|
||||
returnAssignee: { type: "agent", agentId: "agent-2" },
|
||||
reviewRequest: {
|
||||
instructions: "Please focus on edge cases and leave a short risk summary.",
|
||||
},
|
||||
allowedActions: ["approve", "request_changes"],
|
||||
},
|
||||
fallbackFetchNeeded: false,
|
||||
});
|
||||
|
||||
expect(prompt).toContain("Review request instructions:");
|
||||
expect(prompt).toContain("Please focus on edge cases and leave a short risk summary.");
|
||||
expect(prompt).toContain("You are waking as the active reviewer for this issue.");
|
||||
});
|
||||
|
||||
it("includes continuation and child issue summaries in structured wake context", () => {
|
||||
const payload = {
|
||||
reason: "issue_children_completed",
|
||||
@@ -396,6 +426,50 @@ describe("renderPaperclipWakePrompt", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("applyPaperclipWorkspaceEnv", () => {
|
||||
it("adds shared workspace env vars including AGENT_HOME", () => {
|
||||
const env = applyPaperclipWorkspaceEnv(
|
||||
{},
|
||||
{
|
||||
workspaceCwd: "/tmp/workspace",
|
||||
workspaceSource: "project_primary",
|
||||
workspaceStrategy: "git_worktree",
|
||||
workspaceId: "workspace-1",
|
||||
workspaceRepoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
workspaceRepoRef: "main",
|
||||
workspaceBranch: "feature/test",
|
||||
workspaceWorktreePath: "/tmp/worktree",
|
||||
agentHome: "/tmp/agent-home",
|
||||
},
|
||||
);
|
||||
|
||||
expect(env).toEqual({
|
||||
PAPERCLIP_WORKSPACE_CWD: "/tmp/workspace",
|
||||
PAPERCLIP_WORKSPACE_SOURCE: "project_primary",
|
||||
PAPERCLIP_WORKSPACE_STRATEGY: "git_worktree",
|
||||
PAPERCLIP_WORKSPACE_ID: "workspace-1",
|
||||
PAPERCLIP_WORKSPACE_REPO_URL: "https://github.com/paperclipai/paperclip.git",
|
||||
PAPERCLIP_WORKSPACE_REPO_REF: "main",
|
||||
PAPERCLIP_WORKSPACE_BRANCH: "feature/test",
|
||||
PAPERCLIP_WORKSPACE_WORKTREE_PATH: "/tmp/worktree",
|
||||
AGENT_HOME: "/tmp/agent-home",
|
||||
});
|
||||
});
|
||||
|
||||
it("skips empty workspace env values", () => {
|
||||
const env = applyPaperclipWorkspaceEnv(
|
||||
{},
|
||||
{
|
||||
workspaceCwd: "",
|
||||
workspaceSource: null,
|
||||
agentHome: "",
|
||||
},
|
||||
);
|
||||
|
||||
expect(env).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("appendWithByteCap", () => {
|
||||
it("keeps valid UTF-8 when trimming through multibyte text", () => {
|
||||
const output = appendWithByteCap("prefix ", "hello — world", 7);
|
||||
|
||||
@@ -87,10 +87,12 @@ export const DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE = [
|
||||
"Execution contract:",
|
||||
"- Start actionable work in this heartbeat; do not stop at a plan unless the issue asks for planning.",
|
||||
"- Leave durable progress in comments, documents, or work products with a clear next action.",
|
||||
"- Prefer the smallest verification that proves the change; do not default to full workspace typecheck/build/test on every heartbeat unless the task scope warrants it.",
|
||||
"- Use child issues for parallel or long delegated work instead of polling agents, sessions, or processes.",
|
||||
"- If woken by a human comment on a dependency-blocked issue, respond or triage the comment without treating the blocked deliverable work as unblocked.",
|
||||
"- Create child issues directly when you know what needs to be done; use issue-thread interactions when the board/user must choose suggested tasks, answer structured questions, or confirm a proposal.",
|
||||
"- To ask for that input, create an interaction on the current issue with POST /api/issues/{issueId}/interactions using kind suggest_tasks, ask_user_questions, or request_confirmation. Use continuationPolicy wake_assignee when you need to resume after a response; for request_confirmation this resumes only after acceptance.",
|
||||
"- When you intentionally restart follow-up work on a completed assigned issue, include structured `resume: true` with the POST /api/issues/{issueId}/comments or PATCH /api/issues/{issueId} comment payload. Generic agent comments on closed issues are inert by default.",
|
||||
"- For plan approval, update the plan document first, then create request_confirmation targeting the latest plan revision with idempotencyKey confirmation:{issueId}:plan:{revisionId}. Wait for acceptance before creating implementation subtasks, and create a fresh confirmation after superseding board/user comments if approval is still needed.",
|
||||
"- If blocked, mark the issue blocked and name the unblock owner and action.",
|
||||
"- Respect budget, pause/cancel, approval gates, and company boundaries.",
|
||||
@@ -282,6 +284,9 @@ type PaperclipWakeExecutionStage = {
|
||||
stageType: string | null;
|
||||
currentParticipant: PaperclipWakeExecutionPrincipal | null;
|
||||
returnAssignee: PaperclipWakeExecutionPrincipal | null;
|
||||
reviewRequest: {
|
||||
instructions: string;
|
||||
} | null;
|
||||
lastDecisionOutcome: string | null;
|
||||
allowedActions: string[];
|
||||
};
|
||||
@@ -484,11 +489,14 @@ function normalizePaperclipWakeExecutionStage(value: unknown): PaperclipWakeExec
|
||||
: [];
|
||||
const currentParticipant = normalizePaperclipWakeExecutionPrincipal(stage.currentParticipant);
|
||||
const returnAssignee = normalizePaperclipWakeExecutionPrincipal(stage.returnAssignee);
|
||||
const reviewRequestRaw = parseObject(stage.reviewRequest);
|
||||
const reviewInstructions = asString(reviewRequestRaw.instructions, "").trim();
|
||||
const reviewRequest = reviewInstructions ? { instructions: reviewInstructions } : null;
|
||||
const stageId = asString(stage.stageId, "").trim() || null;
|
||||
const stageType = asString(stage.stageType, "").trim() || null;
|
||||
const lastDecisionOutcome = asString(stage.lastDecisionOutcome, "").trim() || null;
|
||||
|
||||
if (!wakeRole && !stageId && !stageType && !currentParticipant && !returnAssignee && !lastDecisionOutcome && allowedActions.length === 0) {
|
||||
if (!wakeRole && !stageId && !stageType && !currentParticipant && !returnAssignee && !reviewRequest && !lastDecisionOutcome && allowedActions.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -498,6 +506,7 @@ function normalizePaperclipWakeExecutionStage(value: unknown): PaperclipWakeExec
|
||||
stageType,
|
||||
currentParticipant,
|
||||
returnAssignee,
|
||||
reviewRequest,
|
||||
lastDecisionOutcome,
|
||||
allowedActions,
|
||||
};
|
||||
@@ -664,6 +673,13 @@ export function renderPaperclipWakePrompt(
|
||||
if (executionStage.allowedActions.length > 0) {
|
||||
lines.push(`- allowed actions: ${executionStage.allowedActions.join(", ")}`);
|
||||
}
|
||||
if (executionStage.reviewRequest) {
|
||||
lines.push(
|
||||
"",
|
||||
"Review request instructions:",
|
||||
executionStage.reviewRequest.instructions,
|
||||
);
|
||||
}
|
||||
lines.push("");
|
||||
if (executionStage.wakeRole === "reviewer" || executionStage.wakeRole === "approver") {
|
||||
lines.push(
|
||||
@@ -819,6 +835,41 @@ export function buildPaperclipEnv(agent: { id: string; companyId: string }): Rec
|
||||
return vars;
|
||||
}
|
||||
|
||||
export function applyPaperclipWorkspaceEnv(
|
||||
env: Record<string, string>,
|
||||
input: {
|
||||
workspaceCwd?: string | null;
|
||||
workspaceSource?: string | null;
|
||||
workspaceStrategy?: string | null;
|
||||
workspaceId?: string | null;
|
||||
workspaceRepoUrl?: string | null;
|
||||
workspaceRepoRef?: string | null;
|
||||
workspaceBranch?: string | null;
|
||||
workspaceWorktreePath?: string | null;
|
||||
agentHome?: string | null;
|
||||
},
|
||||
): Record<string, string> {
|
||||
const mappings = [
|
||||
["PAPERCLIP_WORKSPACE_CWD", input.workspaceCwd],
|
||||
["PAPERCLIP_WORKSPACE_SOURCE", input.workspaceSource],
|
||||
["PAPERCLIP_WORKSPACE_STRATEGY", input.workspaceStrategy],
|
||||
["PAPERCLIP_WORKSPACE_ID", input.workspaceId],
|
||||
["PAPERCLIP_WORKSPACE_REPO_URL", input.workspaceRepoUrl],
|
||||
["PAPERCLIP_WORKSPACE_REPO_REF", input.workspaceRepoRef],
|
||||
["PAPERCLIP_WORKSPACE_BRANCH", input.workspaceBranch],
|
||||
["PAPERCLIP_WORKSPACE_WORKTREE_PATH", input.workspaceWorktreePath],
|
||||
["AGENT_HOME", input.agentHome],
|
||||
] as const;
|
||||
|
||||
for (const [key, value] of mappings) {
|
||||
if (typeof value === "string" && value.length > 0) {
|
||||
env[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
export function sanitizeInheritedPaperclipEnv(baseEnv: NodeJS.ProcessEnv): NodeJS.ProcessEnv {
|
||||
const env: NodeJS.ProcessEnv = { ...baseEnv };
|
||||
for (const key of Object.keys(env)) {
|
||||
@@ -1021,6 +1072,20 @@ export async function resolvePaperclipSkillsDir(
|
||||
return null;
|
||||
}
|
||||
|
||||
async function readSkillRequired(skillDir: string): Promise<boolean> {
|
||||
try {
|
||||
const content = await fs.readFile(path.join(skillDir, "SKILL.md"), "utf8");
|
||||
const normalized = content.replace(/\r\n/g, "\n");
|
||||
if (!normalized.startsWith("---\n")) return true;
|
||||
const closing = normalized.indexOf("\n---\n", 4);
|
||||
if (closing < 0) return true;
|
||||
const frontmatter = normalized.slice(4, closing);
|
||||
return !/^\s*required\s*:\s*false\s*$/m.test(frontmatter);
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
export async function listPaperclipSkillEntries(
|
||||
moduleDir: string,
|
||||
additionalCandidates: string[] = [],
|
||||
@@ -1030,15 +1095,20 @@ export async function listPaperclipSkillEntries(
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(root, { withFileTypes: true });
|
||||
return entries
|
||||
.filter((entry) => entry.isDirectory())
|
||||
.map((entry) => ({
|
||||
const dirs = entries.filter((entry) => entry.isDirectory());
|
||||
return Promise.all(dirs.map(async (entry) => {
|
||||
const skillDir = path.join(root, entry.name);
|
||||
const required = await readSkillRequired(skillDir);
|
||||
return {
|
||||
key: `paperclipai/paperclip/${entry.name}`,
|
||||
runtimeName: entry.name,
|
||||
source: path.join(root, entry.name),
|
||||
required: true,
|
||||
requiredReason: "Bundled Paperclip skills are always available for local adapters.",
|
||||
}));
|
||||
source: skillDir,
|
||||
required,
|
||||
requiredReason: required
|
||||
? "Bundled Paperclip skills are always available for local adapters."
|
||||
: null,
|
||||
};
|
||||
}));
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
||||
@@ -476,8 +476,8 @@ async function importGitWorkspaceToSsh(input: {
|
||||
`if [ ! -d ${shellQuote(path.posix.join(input.remoteDir, ".git"))} ]; then git init ${shellQuote(input.remoteDir)} >/dev/null; fi`,
|
||||
`git -C ${shellQuote(input.remoteDir)} fetch --force "$tmp_bundle" '${tempRef}:${tempRef}' >/dev/null`,
|
||||
input.snapshot.branchName
|
||||
? `git -C ${shellQuote(input.remoteDir)} checkout -B ${shellQuote(input.snapshot.branchName)} ${shellQuote(input.snapshot.headCommit)} >/dev/null`
|
||||
: `git -C ${shellQuote(input.remoteDir)} -c advice.detachedHead=false checkout --detach ${shellQuote(input.snapshot.headCommit)} >/dev/null`,
|
||||
? `git -C ${shellQuote(input.remoteDir)} checkout --force -B ${shellQuote(input.snapshot.branchName)} ${shellQuote(input.snapshot.headCommit)} >/dev/null`
|
||||
: `git -C ${shellQuote(input.remoteDir)} -c advice.detachedHead=false checkout --force --detach ${shellQuote(input.snapshot.headCommit)} >/dev/null`,
|
||||
`git -C ${shellQuote(input.remoteDir)} reset --hard ${shellQuote(input.snapshot.headCommit)} >/dev/null`,
|
||||
`git -C ${shellQuote(input.remoteDir)} clean -fdx -e .paperclip-runtime >/dev/null`,
|
||||
].join("\n");
|
||||
|
||||
@@ -64,12 +64,16 @@ export interface AdapterRuntimeServiceReport {
|
||||
healthStatus?: "unknown" | "healthy" | "unhealthy";
|
||||
}
|
||||
|
||||
export type AdapterExecutionErrorFamily = "transient_upstream";
|
||||
|
||||
export interface AdapterExecutionResult {
|
||||
exitCode: number | null;
|
||||
signal: string | null;
|
||||
timedOut: boolean;
|
||||
errorMessage?: string | null;
|
||||
errorCode?: string | null;
|
||||
errorFamily?: AdapterExecutionErrorFamily | null;
|
||||
retryNotBefore?: string | null;
|
||||
errorMeta?: Record<string, unknown>;
|
||||
usage?: UsageSummary;
|
||||
/**
|
||||
@@ -212,6 +216,20 @@ export interface AdapterEnvironmentTestContext {
|
||||
companyId: string;
|
||||
adapterType: string;
|
||||
config: Record<string, unknown>;
|
||||
/**
|
||||
* Optional execution target the adapter should run probes against.
|
||||
*
|
||||
* If omitted (or `kind === "local"`), the adapter tests on the Paperclip
|
||||
* host. For SSH/sandbox targets the adapter should run command/auth probes
|
||||
* inside the remote environment so the result reflects what an agent run
|
||||
* would actually see at execution time.
|
||||
*/
|
||||
executionTarget?: AdapterExecutionTarget | null;
|
||||
/**
|
||||
* Friendly name of the environment being tested (when `executionTarget` is set).
|
||||
* Surfaced in check messages so users see which environment the probe ran in.
|
||||
*/
|
||||
environmentName?: string | null;
|
||||
deployment?: {
|
||||
mode?: "local_trusted" | "authenticated";
|
||||
exposure?: "private" | "public";
|
||||
@@ -311,6 +329,13 @@ export interface ServerAdapterModule {
|
||||
supportsLocalAgentJwt?: boolean;
|
||||
models?: AdapterModel[];
|
||||
listModels?: () => Promise<AdapterModel[]>;
|
||||
/**
|
||||
* Optional explicit refresh hook for model discovery.
|
||||
* Use this when the adapter caches discovered models and needs a bypass path
|
||||
* so the UI can fetch newly released models without waiting for cache expiry
|
||||
* or a Paperclip code update.
|
||||
*/
|
||||
refreshModels?: () => Promise<AdapterModel[]>;
|
||||
agentConfigurationDoc?: string;
|
||||
/**
|
||||
* Optional lifecycle hook when an agent is approved/hired (join-request or hire_agent approval).
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { prepareClaudeConfigSeed } from "./claude-config.js";
|
||||
|
||||
describe("prepareClaudeConfigSeed", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await fs.rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
function createEnv(root: string, sourceDir: string): NodeJS.ProcessEnv {
|
||||
return {
|
||||
HOME: root,
|
||||
PAPERCLIP_HOME: path.join(root, "paperclip-home"),
|
||||
PAPERCLIP_INSTANCE_ID: "test-instance",
|
||||
CLAUDE_CONFIG_DIR: sourceDir,
|
||||
};
|
||||
}
|
||||
|
||||
it("reuses the same snapshot path when the seeded files are unchanged", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-claude-config-seed-"));
|
||||
cleanupDirs.push(root);
|
||||
const sourceDir = path.join(root, "claude-source");
|
||||
await fs.mkdir(sourceDir, { recursive: true });
|
||||
await fs.writeFile(path.join(sourceDir, "settings.json"), JSON.stringify({ theme: "light" }), "utf8");
|
||||
|
||||
const onLog = vi.fn(async () => {});
|
||||
const env = createEnv(root, sourceDir);
|
||||
|
||||
const first = await prepareClaudeConfigSeed(env, onLog, "company-1");
|
||||
const second = await prepareClaudeConfigSeed(env, onLog, "company-1");
|
||||
|
||||
expect(first).toBe(second);
|
||||
await expect(fs.readFile(path.join(first, "settings.json"), "utf8"))
|
||||
.resolves.toBe(JSON.stringify({ theme: "light" }));
|
||||
});
|
||||
|
||||
it("keeps an existing snapshot intact when the seeded files change", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-claude-config-race-"));
|
||||
cleanupDirs.push(root);
|
||||
const sourceDir = path.join(root, "claude-source");
|
||||
await fs.mkdir(sourceDir, { recursive: true });
|
||||
await fs.writeFile(path.join(sourceDir, "settings.json"), JSON.stringify({ theme: "light" }), "utf8");
|
||||
|
||||
const onLog = vi.fn(async () => {});
|
||||
const env = createEnv(root, sourceDir);
|
||||
const first = await prepareClaudeConfigSeed(env, onLog, "company-1");
|
||||
|
||||
await fs.writeFile(path.join(sourceDir, "settings.json"), JSON.stringify({ theme: "dark" }), "utf8");
|
||||
const second = await prepareClaudeConfigSeed(env, onLog, "company-1");
|
||||
|
||||
expect(second).not.toBe(first);
|
||||
await expect(fs.readFile(path.join(first, "settings.json"), "utf8"))
|
||||
.resolves.toBe(JSON.stringify({ theme: "light" }));
|
||||
await expect(fs.readFile(path.join(second, "settings.json"), "utf8"))
|
||||
.resolves.toBe(JSON.stringify({ theme: "dark" }));
|
||||
});
|
||||
});
|
||||
135
packages/adapters/claude-local/src/server/claude-config.ts
Normal file
135
packages/adapters/claude-local/src/server/claude-config.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
|
||||
|
||||
const DEFAULT_PAPERCLIP_INSTANCE_ID = "default";
|
||||
const SEEDED_SHARED_FILES = [
|
||||
".credentials.json",
|
||||
"credentials.json",
|
||||
"settings.json",
|
||||
"settings.local.json",
|
||||
"CLAUDE.md",
|
||||
] as const;
|
||||
|
||||
function nonEmpty(value: string | undefined): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
async function pathExists(candidate: string): Promise<boolean> {
|
||||
return fs.access(candidate).then(() => true).catch(() => false);
|
||||
}
|
||||
|
||||
function isAlreadyExistsError(error: unknown): boolean {
|
||||
if (!error || typeof error !== "object") return false;
|
||||
const code = "code" in error ? error.code : null;
|
||||
return code === "EEXIST" || code === "ENOTEMPTY";
|
||||
}
|
||||
|
||||
async function collectSeedFiles(sourceDir: string): Promise<Array<{ name: string; sourcePath: string }>> {
|
||||
const files: Array<{ name: string; sourcePath: string }> = [];
|
||||
for (const name of SEEDED_SHARED_FILES) {
|
||||
const sourcePath = path.join(sourceDir, name);
|
||||
if (!(await pathExists(sourcePath))) continue;
|
||||
files.push({ name, sourcePath });
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
async function buildSeedSnapshotKey(files: Array<{ name: string; sourcePath: string }>): Promise<string> {
|
||||
if (files.length === 0) return "empty";
|
||||
const hash = createHash("sha256");
|
||||
for (const file of files) {
|
||||
hash.update(file.name);
|
||||
hash.update("\0");
|
||||
hash.update(await fs.readFile(file.sourcePath));
|
||||
hash.update("\0");
|
||||
}
|
||||
return hash.digest("hex").slice(0, 16);
|
||||
}
|
||||
|
||||
async function materializeSeedSnapshot(input: {
|
||||
rootDir: string;
|
||||
snapshotKey: string;
|
||||
files: Array<{ name: string; sourcePath: string }>;
|
||||
}): Promise<string> {
|
||||
const targetDir = path.join(input.rootDir, input.snapshotKey);
|
||||
if (await pathExists(targetDir)) {
|
||||
return targetDir;
|
||||
}
|
||||
|
||||
await fs.mkdir(input.rootDir, { recursive: true });
|
||||
const stagingDir = await fs.mkdtemp(path.join(input.rootDir, ".tmp-"));
|
||||
try {
|
||||
for (const file of input.files) {
|
||||
await fs.copyFile(file.sourcePath, path.join(stagingDir, file.name));
|
||||
}
|
||||
try {
|
||||
await fs.rename(stagingDir, targetDir);
|
||||
} catch (error) {
|
||||
if (!isAlreadyExistsError(error)) {
|
||||
throw error;
|
||||
}
|
||||
await fs.rm(stagingDir, { recursive: true, force: true });
|
||||
}
|
||||
} catch (error) {
|
||||
await fs.rm(stagingDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
throw error;
|
||||
}
|
||||
|
||||
return targetDir;
|
||||
}
|
||||
|
||||
export function resolveSharedClaudeConfigDir(
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): string {
|
||||
const fromEnv = nonEmpty(env.CLAUDE_CONFIG_DIR);
|
||||
return fromEnv ? path.resolve(fromEnv) : path.join(os.homedir(), ".claude");
|
||||
}
|
||||
|
||||
export function resolveManagedClaudeConfigSeedDir(
|
||||
env: NodeJS.ProcessEnv,
|
||||
companyId?: string,
|
||||
): string {
|
||||
const paperclipHome = nonEmpty(env.PAPERCLIP_HOME) ?? path.resolve(os.homedir(), ".paperclip");
|
||||
const instanceId = nonEmpty(env.PAPERCLIP_INSTANCE_ID) ?? DEFAULT_PAPERCLIP_INSTANCE_ID;
|
||||
return companyId
|
||||
? path.resolve(paperclipHome, "instances", instanceId, "companies", companyId, "claude-config-seed")
|
||||
: path.resolve(paperclipHome, "instances", instanceId, "claude-config-seed");
|
||||
}
|
||||
|
||||
export async function prepareClaudeConfigSeed(
|
||||
env: NodeJS.ProcessEnv,
|
||||
onLog: AdapterExecutionContext["onLog"],
|
||||
companyId?: string,
|
||||
): Promise<string> {
|
||||
const sourceDir = resolveSharedClaudeConfigDir(env);
|
||||
const targetRootDir = resolveManagedClaudeConfigSeedDir(env, companyId);
|
||||
|
||||
if (path.resolve(sourceDir) === path.resolve(targetRootDir)) {
|
||||
return targetRootDir;
|
||||
}
|
||||
|
||||
const copiedFiles = await collectSeedFiles(sourceDir);
|
||||
const snapshotKey = await buildSeedSnapshotKey(copiedFiles);
|
||||
const targetDir = await materializeSeedSnapshot({
|
||||
rootDir: targetRootDir,
|
||||
snapshotKey,
|
||||
files: copiedFiles,
|
||||
});
|
||||
|
||||
if (copiedFiles.length > 0) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] Prepared Claude config seed "${targetDir}" from "${sourceDir}" (${copiedFiles.map((file) => file.name).join(", ")}).\n`,
|
||||
);
|
||||
} else {
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] No local Claude config seed files were found in "${sourceDir}". Remote Claude auth may still require login.\n`,
|
||||
);
|
||||
}
|
||||
|
||||
return targetDir;
|
||||
}
|
||||
@@ -10,12 +10,15 @@ import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asString,
|
||||
@@ -24,6 +27,7 @@ import {
|
||||
asStringArray,
|
||||
parseObject,
|
||||
parseJson,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
joinPromptSections,
|
||||
@@ -35,13 +39,17 @@ import {
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { shellQuote } from "@paperclipai/adapter-utils/ssh";
|
||||
import {
|
||||
parseClaudeStreamJson,
|
||||
describeClaudeFailure,
|
||||
detectClaudeLoginRequired,
|
||||
extractClaudeRetryNotBefore,
|
||||
isClaudeMaxTurnsResult,
|
||||
isClaudeTransientUpstreamError,
|
||||
isClaudeUnknownSessionError,
|
||||
} from "./parse.js";
|
||||
import { prepareClaudeConfigSeed } from "./claude-config.js";
|
||||
import { resolveClaudeDesiredSkillNames } from "./skills.js";
|
||||
import { isBedrockModelId } from "./models.js";
|
||||
import { prepareClaudePromptBundle } from "./prompt-cache.js";
|
||||
@@ -191,33 +199,17 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
if (workspaceSource) {
|
||||
env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
}
|
||||
if (workspaceStrategy) {
|
||||
env.PAPERCLIP_WORKSPACE_STRATEGY = workspaceStrategy;
|
||||
}
|
||||
if (workspaceId) {
|
||||
env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
}
|
||||
if (workspaceRepoUrl) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
}
|
||||
if (workspaceRepoRef) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
}
|
||||
if (workspaceBranch) {
|
||||
env.PAPERCLIP_WORKSPACE_BRANCH = workspaceBranch;
|
||||
}
|
||||
if (workspaceWorktreePath) {
|
||||
env.PAPERCLIP_WORKSPACE_WORKTREE_PATH = workspaceWorktreePath;
|
||||
}
|
||||
if (agentHome) {
|
||||
env.AGENT_HOME = agentHome;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
}
|
||||
@@ -329,6 +321,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const chrome = asBoolean(config.chrome, false);
|
||||
const maxTurns = asNumber(config.maxTurnsPerRun, 0);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, true);
|
||||
const configEnv = parseObject(config.env);
|
||||
const hasExplicitClaudeConfigDir =
|
||||
typeof configEnv.CLAUDE_CONFIG_DIR === "string" && configEnv.CLAUDE_CONFIG_DIR.trim().length > 0;
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
const instructionsFileDir = instructionsFilePath ? `${path.dirname(instructionsFilePath)}/` : "";
|
||||
const runtimeConfig = await buildClaudeRuntimeConfig({
|
||||
@@ -347,11 +342,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
env,
|
||||
loggedEnv,
|
||||
loggedEnv: initialLoggedEnv,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
extraArgs,
|
||||
} = runtimeConfig;
|
||||
let loggedEnv = initialLoggedEnv;
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const terminalResultCleanupGraceMs = Math.max(
|
||||
0,
|
||||
@@ -392,6 +388,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
instructionsContents: combinedInstructionsContents,
|
||||
onLog,
|
||||
});
|
||||
const useManagedRemoteClaudeConfig =
|
||||
executionTargetIsRemote &&
|
||||
adapterExecutionTargetUsesManagedHome(executionTarget) &&
|
||||
!hasExplicitClaudeConfigDir;
|
||||
const claudeConfigSeedDir = useManagedRemoteClaudeConfig
|
||||
? await prepareClaudeConfigSeed(process.env, onLog, agent.companyId)
|
||||
: null;
|
||||
const preparedExecutionTargetRuntime = executionTargetIsRemote
|
||||
? await (async () => {
|
||||
await onLog(
|
||||
@@ -408,6 +411,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
localDir: promptBundle.addDir,
|
||||
followSymlinks: true,
|
||||
},
|
||||
...(claudeConfigSeedDir
|
||||
? [{
|
||||
key: "config-seed",
|
||||
localDir: claudeConfigSeedDir,
|
||||
followSymlinks: true,
|
||||
}]
|
||||
: []),
|
||||
],
|
||||
});
|
||||
})()
|
||||
@@ -424,6 +434,63 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
? path.posix.join(effectivePromptBundleAddDir, path.basename(promptBundle.instructionsFilePath))
|
||||
: promptBundle.instructionsFilePath
|
||||
: undefined;
|
||||
const remoteClaudeRuntimeRoot = executionTargetIsRemote
|
||||
? preparedExecutionTargetRuntime?.runtimeRootDir ??
|
||||
path.posix.join(effectiveExecutionCwd, ".paperclip-runtime", "claude")
|
||||
: null;
|
||||
const remoteClaudeConfigSeedDir = claudeConfigSeedDir && remoteClaudeRuntimeRoot
|
||||
? preparedExecutionTargetRuntime?.assetDirs["config-seed"] ??
|
||||
path.posix.join(remoteClaudeRuntimeRoot, "config-seed")
|
||||
: null;
|
||||
const remoteClaudeConfigDir = useManagedRemoteClaudeConfig && remoteClaudeRuntimeRoot
|
||||
? path.posix.join(remoteClaudeRuntimeRoot, "config")
|
||||
: null;
|
||||
if (remoteClaudeConfigDir && remoteClaudeConfigSeedDir) {
|
||||
env.CLAUDE_CONFIG_DIR = remoteClaudeConfigDir;
|
||||
loggedEnv.CLAUDE_CONFIG_DIR = remoteClaudeConfigDir;
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] Materializing Claude auth/config into ${remoteClaudeConfigDir}.\n`,
|
||||
);
|
||||
await runAdapterExecutionTargetShellCommand(
|
||||
runId,
|
||||
executionTarget,
|
||||
`mkdir -p ${shellQuote(remoteClaudeConfigDir)} && ` +
|
||||
`if [ -d ${shellQuote(remoteClaudeConfigSeedDir)} ]; then ` +
|
||||
`cp -R ${shellQuote(`${remoteClaudeConfigSeedDir}/.`)} ${shellQuote(remoteClaudeConfigDir)}/; ` +
|
||||
`fi`,
|
||||
{
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec: Math.max(timeoutSec, 15),
|
||||
graceSec,
|
||||
onLog,
|
||||
},
|
||||
);
|
||||
}
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: preparedExecutionTargetRuntime?.runtimeRootDir,
|
||||
adapterKey: "claude",
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(env, paperclipBridge.env);
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME", "CLAUDE_CONFIG_DIR"],
|
||||
resolvedCommand,
|
||||
});
|
||||
if (remoteClaudeConfigDir) {
|
||||
loggedEnv.CLAUDE_CONFIG_DIR = remoteClaudeConfigDir;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
@@ -625,16 +692,48 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
|
||||
if (!parsed) {
|
||||
const fallbackErrorMessage = parseFallbackErrorMessage(proc);
|
||||
const transientUpstream =
|
||||
!loginMeta.requiresLogin &&
|
||||
(proc.exitCode ?? 0) !== 0 &&
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: null,
|
||||
stdout: proc.stdout,
|
||||
stderr: proc.stderr,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
});
|
||||
const transientRetryNotBefore = transientUpstream
|
||||
? extractClaudeRetryNotBefore({
|
||||
parsed: null,
|
||||
stdout: proc.stdout,
|
||||
stderr: proc.stderr,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
})
|
||||
: null;
|
||||
const errorCode = loginMeta.requiresLogin
|
||||
? "claude_auth_required"
|
||||
: transientUpstream
|
||||
? "claude_transient_upstream"
|
||||
: null;
|
||||
return {
|
||||
exitCode: proc.exitCode,
|
||||
signal: proc.signal,
|
||||
timedOut: false,
|
||||
errorMessage: parseFallbackErrorMessage(proc),
|
||||
errorCode: loginMeta.requiresLogin ? "claude_auth_required" : null,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
errorCode,
|
||||
errorFamily: transientUpstream ? "transient_upstream" : null,
|
||||
retryNotBefore: transientRetryNotBefore ? transientRetryNotBefore.toISOString() : null,
|
||||
errorMeta,
|
||||
resultJson: {
|
||||
stdout: proc.stdout,
|
||||
stderr: proc.stderr,
|
||||
...(transientUpstream ? { errorFamily: "transient_upstream" } : {}),
|
||||
...(transientRetryNotBefore
|
||||
? { retryNotBefore: transientRetryNotBefore.toISOString() }
|
||||
: {}),
|
||||
...(transientRetryNotBefore
|
||||
? { transientRetryNotBefore: transientRetryNotBefore.toISOString() }
|
||||
: {}),
|
||||
},
|
||||
clearSession: Boolean(opts.clearSessionOnMissingSession),
|
||||
};
|
||||
@@ -670,16 +769,48 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
} as Record<string, unknown>)
|
||||
: null;
|
||||
const clearSessionForMaxTurns = isClaudeMaxTurnsResult(parsed);
|
||||
const parsedIsError = asBoolean(parsed.is_error, false);
|
||||
const failed = (proc.exitCode ?? 0) !== 0 || parsedIsError;
|
||||
const errorMessage = failed
|
||||
? describeClaudeFailure(parsed) ?? `Claude exited with code ${proc.exitCode ?? -1}`
|
||||
: null;
|
||||
const transientUpstream =
|
||||
failed &&
|
||||
!loginMeta.requiresLogin &&
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed,
|
||||
stdout: proc.stdout,
|
||||
stderr: proc.stderr,
|
||||
errorMessage,
|
||||
});
|
||||
const transientRetryNotBefore = transientUpstream
|
||||
? extractClaudeRetryNotBefore({
|
||||
parsed,
|
||||
stdout: proc.stdout,
|
||||
stderr: proc.stderr,
|
||||
errorMessage,
|
||||
})
|
||||
: null;
|
||||
const resolvedErrorCode = loginMeta.requiresLogin
|
||||
? "claude_auth_required"
|
||||
: transientUpstream
|
||||
? "claude_transient_upstream"
|
||||
: null;
|
||||
const mergedResultJson: Record<string, unknown> = {
|
||||
...parsed,
|
||||
...(transientUpstream ? { errorFamily: "transient_upstream" } : {}),
|
||||
...(transientRetryNotBefore ? { retryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
...(transientRetryNotBefore ? { transientRetryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
};
|
||||
|
||||
return {
|
||||
exitCode: proc.exitCode,
|
||||
signal: proc.signal,
|
||||
timedOut: false,
|
||||
errorMessage:
|
||||
(proc.exitCode ?? 0) === 0
|
||||
? null
|
||||
: describeClaudeFailure(parsed) ?? `Claude exited with code ${proc.exitCode ?? -1}`,
|
||||
errorCode: loginMeta.requiresLogin ? "claude_auth_required" : null,
|
||||
errorMessage,
|
||||
errorCode: resolvedErrorCode,
|
||||
errorFamily: transientUpstream ? "transient_upstream" : null,
|
||||
retryNotBefore: transientRetryNotBefore ? transientRetryNotBefore.toISOString() : null,
|
||||
errorMeta,
|
||||
usage,
|
||||
sessionId: resolvedSessionId,
|
||||
@@ -690,7 +821,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
model: parsedStream.model || asString(parsed.model, model),
|
||||
billingType,
|
||||
costUsd: parsedStream.costUsd ?? asNumber(parsed.total_cost_usd, 0),
|
||||
resultJson: parsed,
|
||||
resultJson: mergedResultJson,
|
||||
summary: parsedStream.summary || asString(parsed.result, ""),
|
||||
clearSession: clearSessionForMaxTurns || Boolean(opts.clearSessionOnMissingSession && !resolvedSessionId),
|
||||
};
|
||||
@@ -715,6 +846,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
|
||||
return toAdapterResult(initial, { fallbackSessionId: runtimeSessionId || runtime.sessionId });
|
||||
} finally {
|
||||
if (paperclipBridge) {
|
||||
await paperclipBridge.stop();
|
||||
}
|
||||
if (restoreRemoteWorkspace) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
|
||||
123
packages/adapters/claude-local/src/server/parse.test.ts
Normal file
123
packages/adapters/claude-local/src/server/parse.test.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
extractClaudeRetryNotBefore,
|
||||
isClaudeTransientUpstreamError,
|
||||
} from "./parse.js";
|
||||
|
||||
describe("isClaudeTransientUpstreamError", () => {
|
||||
it("classifies the 'out of extra usage' subscription window failure as transient", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
errorMessage: "You're out of extra usage · resets 4pm (America/Chicago)",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: {
|
||||
is_error: true,
|
||||
result: "You're out of extra usage. Resets at 4pm (America/Chicago).",
|
||||
},
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("classifies Anthropic API rate_limit_error and overloaded_error as transient", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: {
|
||||
is_error: true,
|
||||
errors: [{ type: "rate_limit_error", message: "Rate limit reached for requests." }],
|
||||
},
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: {
|
||||
is_error: true,
|
||||
errors: [{ type: "overloaded_error", message: "Overloaded" }],
|
||||
},
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
stderr: "HTTP 429: Too Many Requests",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
stderr: "Bedrock ThrottlingException: slow down",
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("classifies the subscription 5-hour / weekly limit wording", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
errorMessage: "Claude usage limit reached — weekly limit reached. Try again in 2 days.",
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
errorMessage: "5-hour limit reached.",
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("does not classify login/auth failures as transient", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
stderr: "Please log in. Run `claude login` first.",
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("does not classify max-turns or unknown-session as transient", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: { subtype: "error_max_turns", result: "Maximum turns reached." },
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed: {
|
||||
result: "No conversation found with session id abc-123",
|
||||
errors: [{ message: "No conversation found with session id abc-123" }],
|
||||
},
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("does not classify deterministic validation errors as transient", () => {
|
||||
expect(
|
||||
isClaudeTransientUpstreamError({
|
||||
errorMessage: "Invalid request_error: Unknown parameter 'foo'.",
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractClaudeRetryNotBefore", () => {
|
||||
it("parses the 'resets 4pm' hint in its explicit timezone", () => {
|
||||
const now = new Date("2026-04-22T15:15:00.000Z");
|
||||
const extracted = extractClaudeRetryNotBefore(
|
||||
{ errorMessage: "You're out of extra usage · resets 4pm (America/Chicago)" },
|
||||
now,
|
||||
);
|
||||
expect(extracted?.toISOString()).toBe("2026-04-22T21:00:00.000Z");
|
||||
});
|
||||
|
||||
it("rolls forward past midnight when the reset time has already passed today", () => {
|
||||
const now = new Date("2026-04-22T23:30:00.000Z");
|
||||
const extracted = extractClaudeRetryNotBefore(
|
||||
{ errorMessage: "Usage limit reached. Resets at 3:15 AM (UTC)." },
|
||||
now,
|
||||
);
|
||||
expect(extracted?.toISOString()).toBe("2026-04-23T03:15:00.000Z");
|
||||
});
|
||||
|
||||
it("returns null when no reset hint is present", () => {
|
||||
expect(
|
||||
extractClaudeRetryNotBefore({ errorMessage: "Overloaded. Try again later." }, new Date()),
|
||||
).toBeNull();
|
||||
});
|
||||
});
|
||||
@@ -1,9 +1,19 @@
|
||||
import type { UsageSummary } from "@paperclipai/adapter-utils";
|
||||
import { asString, asNumber, parseObject, parseJson } from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
parseObject,
|
||||
parseJson,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const CLAUDE_AUTH_REQUIRED_RE = /(?:not\s+logged\s+in|please\s+log\s+in|please\s+run\s+`?claude\s+login`?|login\s+required|requires\s+login|unauthorized|authentication\s+required)/i;
|
||||
const URL_RE = /(https?:\/\/[^\s'"`<>()[\]{};,!?]+[^\s'"`<>()[\]{};,!.?:]+)/gi;
|
||||
|
||||
const CLAUDE_TRANSIENT_UPSTREAM_RE =
|
||||
/(?:rate[-\s]?limit(?:ed)?|rate_limit_error|too\s+many\s+requests|\b429\b|overloaded(?:_error)?|server\s+overloaded|service\s+unavailable|\b503\b|\b529\b|high\s+demand|try\s+again\s+later|temporarily\s+unavailable|throttl(?:ed|ing)|throttlingexception|servicequotaexceededexception|out\s+of\s+extra\s+usage|extra\s+usage\b|claude\s+usage\s+limit\s+reached|5[-\s]?hour\s+limit\s+reached|weekly\s+limit\s+reached|usage\s+limit\s+reached|usage\s+cap\s+reached)/i;
|
||||
const CLAUDE_EXTRA_USAGE_RESET_RE =
|
||||
/(?:out\s+of\s+extra\s+usage|extra\s+usage|usage\s+limit\s+reached|usage\s+cap\s+reached|5[-\s]?hour\s+limit\s+reached|weekly\s+limit\s+reached|claude\s+usage\s+limit\s+reached)[\s\S]{0,80}?\bresets?\s+(?:at\s+)?([^\n()]+?)(?:\s*\(([^)]+)\))?(?:[.!]|\n|$)/i;
|
||||
|
||||
export function parseClaudeStreamJson(stdout: string) {
|
||||
let sessionId: string | null = null;
|
||||
let model = "";
|
||||
@@ -177,3 +187,197 @@ export function isClaudeUnknownSessionError(parsed: Record<string, unknown>): bo
|
||||
/no conversation found with session id|unknown session|session .* not found/i.test(msg),
|
||||
);
|
||||
}
|
||||
|
||||
function buildClaudeTransientHaystack(input: {
|
||||
parsed?: Record<string, unknown> | null;
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
}): string {
|
||||
const parsed = input.parsed ?? null;
|
||||
const resultText = parsed ? asString(parsed.result, "") : "";
|
||||
const parsedErrors = parsed ? extractClaudeErrorMessages(parsed) : [];
|
||||
return [
|
||||
input.errorMessage ?? "",
|
||||
resultText,
|
||||
...parsedErrors,
|
||||
input.stdout ?? "",
|
||||
input.stderr ?? "",
|
||||
]
|
||||
.join("\n")
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
function readTimeZoneParts(date: Date, timeZone: string) {
|
||||
const values = new Map(
|
||||
new Intl.DateTimeFormat("en-US", {
|
||||
timeZone,
|
||||
hourCycle: "h23",
|
||||
year: "numeric",
|
||||
month: "2-digit",
|
||||
day: "2-digit",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
}).formatToParts(date).map((part) => [part.type, part.value]),
|
||||
);
|
||||
return {
|
||||
year: Number.parseInt(values.get("year") ?? "", 10),
|
||||
month: Number.parseInt(values.get("month") ?? "", 10),
|
||||
day: Number.parseInt(values.get("day") ?? "", 10),
|
||||
hour: Number.parseInt(values.get("hour") ?? "", 10),
|
||||
minute: Number.parseInt(values.get("minute") ?? "", 10),
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeResetTimeZone(timeZoneHint: string | null | undefined): string | null {
|
||||
const normalized = timeZoneHint?.trim();
|
||||
if (!normalized) return null;
|
||||
if (/^(?:utc|gmt)$/i.test(normalized)) return "UTC";
|
||||
|
||||
try {
|
||||
new Intl.DateTimeFormat("en-US", { timeZone: normalized }).format(new Date(0));
|
||||
return normalized;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function dateFromTimeZoneWallClock(input: {
|
||||
year: number;
|
||||
month: number;
|
||||
day: number;
|
||||
hour: number;
|
||||
minute: number;
|
||||
timeZone: string;
|
||||
}): Date | null {
|
||||
let candidate = new Date(Date.UTC(input.year, input.month - 1, input.day, input.hour, input.minute, 0, 0));
|
||||
const targetUtc = Date.UTC(input.year, input.month - 1, input.day, input.hour, input.minute, 0, 0);
|
||||
|
||||
for (let attempt = 0; attempt < 4; attempt += 1) {
|
||||
const actual = readTimeZoneParts(candidate, input.timeZone);
|
||||
const actualUtc = Date.UTC(actual.year, actual.month - 1, actual.day, actual.hour, actual.minute, 0, 0);
|
||||
const offsetMs = targetUtc - actualUtc;
|
||||
if (offsetMs === 0) break;
|
||||
candidate = new Date(candidate.getTime() + offsetMs);
|
||||
}
|
||||
|
||||
const verified = readTimeZoneParts(candidate, input.timeZone);
|
||||
if (
|
||||
verified.year !== input.year ||
|
||||
verified.month !== input.month ||
|
||||
verified.day !== input.day ||
|
||||
verified.hour !== input.hour ||
|
||||
verified.minute !== input.minute
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return candidate;
|
||||
}
|
||||
|
||||
function nextClockTimeInTimeZone(input: {
|
||||
now: Date;
|
||||
hour: number;
|
||||
minute: number;
|
||||
timeZoneHint: string;
|
||||
}): Date | null {
|
||||
const timeZone = normalizeResetTimeZone(input.timeZoneHint);
|
||||
if (!timeZone) return null;
|
||||
|
||||
const nowParts = readTimeZoneParts(input.now, timeZone);
|
||||
let retryAt = dateFromTimeZoneWallClock({
|
||||
year: nowParts.year,
|
||||
month: nowParts.month,
|
||||
day: nowParts.day,
|
||||
hour: input.hour,
|
||||
minute: input.minute,
|
||||
timeZone,
|
||||
});
|
||||
if (!retryAt) return null;
|
||||
|
||||
if (retryAt.getTime() <= input.now.getTime()) {
|
||||
const nextDay = new Date(Date.UTC(nowParts.year, nowParts.month - 1, nowParts.day + 1, 0, 0, 0, 0));
|
||||
retryAt = dateFromTimeZoneWallClock({
|
||||
year: nextDay.getUTCFullYear(),
|
||||
month: nextDay.getUTCMonth() + 1,
|
||||
day: nextDay.getUTCDate(),
|
||||
hour: input.hour,
|
||||
minute: input.minute,
|
||||
timeZone,
|
||||
});
|
||||
}
|
||||
|
||||
return retryAt;
|
||||
}
|
||||
|
||||
function parseClaudeResetClockTime(clockText: string, now: Date, timeZoneHint?: string | null): Date | null {
|
||||
const normalized = clockText.trim().replace(/\s+/g, " ");
|
||||
const match = normalized.match(/^(\d{1,2})(?::(\d{2}))?\s*([ap])\.?\s*m\.?/i);
|
||||
if (!match) return null;
|
||||
|
||||
const hour12 = Number.parseInt(match[1] ?? "", 10);
|
||||
const minute = Number.parseInt(match[2] ?? "0", 10);
|
||||
if (!Number.isInteger(hour12) || hour12 < 1 || hour12 > 12) return null;
|
||||
if (!Number.isInteger(minute) || minute < 0 || minute > 59) return null;
|
||||
|
||||
let hour24 = hour12 % 12;
|
||||
if ((match[3] ?? "").toLowerCase() === "p") hour24 += 12;
|
||||
|
||||
if (timeZoneHint) {
|
||||
const explicitRetryAt = nextClockTimeInTimeZone({
|
||||
now,
|
||||
hour: hour24,
|
||||
minute,
|
||||
timeZoneHint,
|
||||
});
|
||||
if (explicitRetryAt) return explicitRetryAt;
|
||||
}
|
||||
|
||||
const retryAt = new Date(now);
|
||||
retryAt.setHours(hour24, minute, 0, 0);
|
||||
if (retryAt.getTime() <= now.getTime()) {
|
||||
retryAt.setDate(retryAt.getDate() + 1);
|
||||
}
|
||||
return retryAt;
|
||||
}
|
||||
|
||||
export function extractClaudeRetryNotBefore(
|
||||
input: {
|
||||
parsed?: Record<string, unknown> | null;
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
},
|
||||
now = new Date(),
|
||||
): Date | null {
|
||||
const haystack = buildClaudeTransientHaystack(input);
|
||||
const match = haystack.match(CLAUDE_EXTRA_USAGE_RESET_RE);
|
||||
if (!match) return null;
|
||||
return parseClaudeResetClockTime(match[1] ?? "", now, match[2]);
|
||||
}
|
||||
|
||||
export function isClaudeTransientUpstreamError(input: {
|
||||
parsed?: Record<string, unknown> | null;
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
}): boolean {
|
||||
const parsed = input.parsed ?? null;
|
||||
// Deterministic failures are handled by their own classifiers.
|
||||
if (parsed && (isClaudeMaxTurnsResult(parsed) || isClaudeUnknownSessionError(parsed))) {
|
||||
return false;
|
||||
}
|
||||
const loginMeta = detectClaudeLoginRequired({
|
||||
parsed,
|
||||
stdout: input.stdout ?? "",
|
||||
stderr: input.stderr ?? "",
|
||||
});
|
||||
if (loginMeta.requiresLogin) return false;
|
||||
|
||||
const haystack = buildClaudeTransientHaystack(input);
|
||||
if (!haystack) return false;
|
||||
return CLAUDE_TRANSIENT_UPSTREAM_RE.test(haystack);
|
||||
}
|
||||
|
||||
@@ -9,11 +9,15 @@ import {
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import path from "node:path";
|
||||
import { detectClaudeLoginRequired, parseClaudeStreamJson } from "./parse.js";
|
||||
import { isBedrockModelId } from "./models.js";
|
||||
@@ -56,10 +60,28 @@ export async function testEnvironment(
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "claude");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `claude-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "claude_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: true,
|
||||
});
|
||||
checks.push({
|
||||
code: "claude_cwd_valid",
|
||||
level: "info",
|
||||
@@ -81,7 +103,7 @@ export async function testEnvironment(
|
||||
}
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "claude_command_resolvable",
|
||||
level: "info",
|
||||
@@ -96,16 +118,21 @@ export async function testEnvironment(
|
||||
});
|
||||
}
|
||||
|
||||
// When probing a remote target, the Paperclip host's process.env does not
|
||||
// reflect what the agent will actually see at runtime. Only consider env
|
||||
// vars from the adapter config in that case; the probe itself will surface
|
||||
// any auth issues on the remote box.
|
||||
const considerHostEnv = !targetIsRemote;
|
||||
const hasBedrock =
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
(considerHostEnv && process.env.CLAUDE_CODE_USE_BEDROCK === "1") ||
|
||||
(considerHostEnv && process.env.CLAUDE_CODE_USE_BEDROCK === "true") ||
|
||||
isNonEmpty(env.ANTHROPIC_BEDROCK_BASE_URL) ||
|
||||
isNonEmpty(process.env.ANTHROPIC_BEDROCK_BASE_URL);
|
||||
(considerHostEnv && isNonEmpty(process.env.ANTHROPIC_BEDROCK_BASE_URL));
|
||||
|
||||
const configApiKey = env.ANTHROPIC_API_KEY;
|
||||
const hostApiKey = process.env.ANTHROPIC_API_KEY;
|
||||
const hostApiKey = considerHostEnv ? process.env.ANTHROPIC_API_KEY : undefined;
|
||||
if (hasBedrock) {
|
||||
const source =
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
@@ -130,7 +157,7 @@ export async function testEnvironment(
|
||||
detail: `Detected in ${source}.`,
|
||||
hint: "Unset ANTHROPIC_API_KEY if you want subscription-based Claude login behavior.",
|
||||
});
|
||||
} else {
|
||||
} else if (!targetIsRemote) {
|
||||
checks.push({
|
||||
code: "claude_subscription_mode_possible",
|
||||
level: "info",
|
||||
@@ -172,8 +199,9 @@ export async function testEnvironment(
|
||||
if (maxTurns > 0) args.push("--max-turns", String(maxTurns));
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
|
||||
const probe = await runChildProcess(
|
||||
`claude-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -66,8 +66,6 @@ export function buildClaudeLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
if (v.model) ac.model = v.model;
|
||||
if (v.thinkingEffort) ac.effort = v.thinkingEffort;
|
||||
if (v.chrome) ac.chrome = true;
|
||||
|
||||
@@ -4,7 +4,23 @@ export const DEFAULT_CODEX_LOCAL_MODEL = "gpt-5.3-codex";
|
||||
export const DEFAULT_CODEX_LOCAL_BYPASS_APPROVALS_AND_SANDBOX = true;
|
||||
export const CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS = ["gpt-5.4"] as const;
|
||||
|
||||
function normalizeModelId(model: string | null | undefined): string {
|
||||
return typeof model === "string" ? model.trim() : "";
|
||||
}
|
||||
|
||||
export function isCodexLocalKnownModel(model: string | null | undefined): boolean {
|
||||
const normalizedModel = normalizeModelId(model);
|
||||
if (!normalizedModel) return false;
|
||||
return models.some((entry) => entry.id === normalizedModel);
|
||||
}
|
||||
|
||||
export function isCodexLocalManualModel(model: string | null | undefined): boolean {
|
||||
const normalizedModel = normalizeModelId(model);
|
||||
return Boolean(normalizedModel) && !isCodexLocalKnownModel(normalizedModel);
|
||||
}
|
||||
|
||||
export function isCodexLocalFastModeSupported(model: string | null | undefined): boolean {
|
||||
if (isCodexLocalManualModel(model)) return true;
|
||||
const normalizedModel = typeof model === "string" ? model.trim() : "";
|
||||
return CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS.includes(
|
||||
normalizedModel as (typeof CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS)[number],
|
||||
@@ -35,7 +51,7 @@ Core fields:
|
||||
- modelReasoningEffort (string, optional): reasoning effort override (minimal|low|medium|high|xhigh) passed via -c model_reasoning_effort=...
|
||||
- promptTemplate (string, optional): run prompt template
|
||||
- search (boolean, optional): run codex with --search
|
||||
- fastMode (boolean, optional): enable Codex Fast mode; currently supported on GPT-5.4 only and consumes credits faster
|
||||
- fastMode (boolean, optional): enable Codex Fast mode; supported on GPT-5.4 and passed through for manual model IDs
|
||||
- dangerouslyBypassApprovalsAndSandbox (boolean, optional): run with bypass flag
|
||||
- command (string, optional): defaults to "codex"
|
||||
- extraArgs (string[], optional): additional CLI args
|
||||
@@ -54,6 +70,6 @@ Notes:
|
||||
- Paperclip injects desired local skills into the effective CODEX_HOME/skills/ directory at execution time so Codex can discover "$paperclip" and related skills without polluting the project working directory. In managed-home mode (the default) this is ~/.paperclip/instances/<id>/companies/<companyId>/codex-home/skills/; when CODEX_HOME is explicitly overridden in adapter config, that override is used instead.
|
||||
- Unless explicitly overridden in adapter config, Paperclip runs Codex with a per-company managed CODEX_HOME under the active Paperclip instance and seeds auth/config from the shared Codex home (the CODEX_HOME env var, when set, or ~/.codex).
|
||||
- Some model/tool combinations reject certain effort levels (for example minimal with web search enabled).
|
||||
- Fast mode is currently supported on GPT-5.4 only. When enabled, Paperclip applies \`service_tier="fast"\` and \`features.fast_mode=true\`.
|
||||
- Fast mode is supported on GPT-5.4 and manual model IDs. When enabled for those models, Paperclip applies \`service_tier="fast"\` and \`features.fast_mode=true\`.
|
||||
- When Paperclip realizes a workspace/runtime for a run, it injects PAPERCLIP_WORKSPACE_* and PAPERCLIP_RUNTIME_* env vars for agent-side tooling.
|
||||
`;
|
||||
|
||||
@@ -26,6 +26,28 @@ describe("buildCodexExecArgs", () => {
|
||||
]);
|
||||
});
|
||||
|
||||
it("enables Codex fast mode overrides for manual models", () => {
|
||||
const result = buildCodexExecArgs({
|
||||
model: "gpt-5.5",
|
||||
fastMode: true,
|
||||
});
|
||||
|
||||
expect(result.fastModeRequested).toBe(true);
|
||||
expect(result.fastModeApplied).toBe(true);
|
||||
expect(result.fastModeIgnoredReason).toBeNull();
|
||||
expect(result.args).toEqual([
|
||||
"exec",
|
||||
"--json",
|
||||
"--model",
|
||||
"gpt-5.5",
|
||||
"-c",
|
||||
'service_tier="fast"',
|
||||
"-c",
|
||||
"features.fast_mode=true",
|
||||
"-",
|
||||
]);
|
||||
});
|
||||
|
||||
it("ignores fast mode for unsupported models", () => {
|
||||
const result = buildCodexExecArgs({
|
||||
model: "gpt-5.3-codex",
|
||||
@@ -34,7 +56,9 @@ describe("buildCodexExecArgs", () => {
|
||||
|
||||
expect(result.fastModeRequested).toBe(true);
|
||||
expect(result.fastModeApplied).toBe(false);
|
||||
expect(result.fastModeIgnoredReason).toContain("currently only supported on gpt-5.4");
|
||||
expect(result.fastModeIgnoredReason).toContain(
|
||||
"currently only supported on gpt-5.4 or manually configured model IDs",
|
||||
);
|
||||
expect(result.args).toEqual([
|
||||
"exec",
|
||||
"--json",
|
||||
|
||||
@@ -25,7 +25,7 @@ function asRecord(value: unknown): Record<string, unknown> {
|
||||
}
|
||||
|
||||
function formatFastModeSupportedModels(): string {
|
||||
return CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS.join(", ");
|
||||
return `${CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS.join(", ")} or manually configured model IDs`;
|
||||
}
|
||||
|
||||
export function buildCodexExecArgs(
|
||||
|
||||
@@ -8,17 +8,20 @@ import {
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
@@ -34,6 +37,7 @@ import {
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
parseCodexJsonl,
|
||||
extractCodexRetryNotBefore,
|
||||
isCodexTransientUpstreamError,
|
||||
isCodexUnknownSessionError,
|
||||
} from "./parse.js";
|
||||
@@ -367,6 +371,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const restoreRemoteWorkspace = preparedExecutionTargetRuntime
|
||||
? () => preparedExecutionTargetRuntime.restoreWorkspace()
|
||||
: null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
const remoteCodexHome = executionTargetIsRemote
|
||||
? preparedExecutionTargetRuntime?.assetDirs.home ??
|
||||
path.posix.join(effectiveExecutionCwd, ".paperclip-runtime", "codex", "home")
|
||||
@@ -420,33 +425,17 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
if (workspaceSource) {
|
||||
env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
}
|
||||
if (workspaceStrategy) {
|
||||
env.PAPERCLIP_WORKSPACE_STRATEGY = workspaceStrategy;
|
||||
}
|
||||
if (workspaceId) {
|
||||
env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
}
|
||||
if (workspaceRepoUrl) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
}
|
||||
if (workspaceRepoRef) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
}
|
||||
if (workspaceBranch) {
|
||||
env.PAPERCLIP_WORKSPACE_BRANCH = workspaceBranch;
|
||||
}
|
||||
if (workspaceWorktreePath) {
|
||||
env.PAPERCLIP_WORKSPACE_WORKTREE_PATH = workspaceWorktreePath;
|
||||
}
|
||||
if (agentHome) {
|
||||
env.AGENT_HOME = agentHome;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
}
|
||||
@@ -470,6 +459,19 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: preparedExecutionTargetRuntime?.runtimeRootDir,
|
||||
adapterKey: "codex",
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(env, paperclipBridge.env);
|
||||
}
|
||||
}
|
||||
const effectiveEnv = Object.fromEntries(
|
||||
Object.entries({ ...process.env, ...env }).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
@@ -725,6 +727,21 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
parsedError ||
|
||||
stderrLine ||
|
||||
`Codex exited with code ${attempt.proc.exitCode ?? -1}`;
|
||||
const transientRetryNotBefore =
|
||||
(attempt.proc.exitCode ?? 0) !== 0
|
||||
? extractCodexRetryNotBefore({
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
})
|
||||
: null;
|
||||
const transientUpstream =
|
||||
(attempt.proc.exitCode ?? 0) !== 0 &&
|
||||
isCodexTransientUpstreamError({
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
});
|
||||
|
||||
return {
|
||||
exitCode: attempt.proc.exitCode,
|
||||
@@ -735,14 +752,11 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
? null
|
||||
: fallbackErrorMessage,
|
||||
errorCode:
|
||||
(attempt.proc.exitCode ?? 0) !== 0 &&
|
||||
isCodexTransientUpstreamError({
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
errorMessage: fallbackErrorMessage,
|
||||
})
|
||||
transientUpstream
|
||||
? "codex_transient_upstream"
|
||||
: null,
|
||||
errorFamily: transientUpstream ? "transient_upstream" : null,
|
||||
retryNotBefore: transientRetryNotBefore ? transientRetryNotBefore.toISOString() : null,
|
||||
usage: attempt.parsed.usage,
|
||||
sessionId: resolvedSessionId,
|
||||
sessionParams: resolvedSessionParams,
|
||||
@@ -755,6 +769,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
resultJson: {
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
...(transientUpstream ? { errorFamily: "transient_upstream" } : {}),
|
||||
...(transientRetryNotBefore ? { retryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
...(transientRetryNotBefore ? { transientRetryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
},
|
||||
summary: attempt.parsed.summary,
|
||||
clearSession: Boolean((clearSessionOnMissingSession || forceFreshSession) && !resolvedSessionId),
|
||||
@@ -779,6 +796,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
|
||||
return toResult(initial, false, false);
|
||||
} finally {
|
||||
if (paperclipBridge) {
|
||||
await paperclipBridge.stop();
|
||||
}
|
||||
if (restoreRemoteWorkspace) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
extractCodexRetryNotBefore,
|
||||
isCodexTransientUpstreamError,
|
||||
isCodexUnknownSessionError,
|
||||
parseCodexJsonl,
|
||||
@@ -101,6 +102,25 @@ describe("isCodexTransientUpstreamError", () => {
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("classifies usage-limit windows as transient and extracts the retry time", () => {
|
||||
const errorMessage = "You've hit your usage limit for GPT-5.3-Codex-Spark. Switch to another model now, or try again at 11:31 PM.";
|
||||
const now = new Date(2026, 3, 22, 22, 29, 2);
|
||||
|
||||
expect(isCodexTransientUpstreamError({ errorMessage })).toBe(true);
|
||||
expect(extractCodexRetryNotBefore({ errorMessage }, now)?.getTime()).toBe(
|
||||
new Date(2026, 3, 22, 23, 31, 0, 0).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it("parses explicit timezone hints on usage-limit retry windows", () => {
|
||||
const errorMessage = "You've hit your usage limit for GPT-5.3-Codex-Spark. Switch to another model now, or try again at 11:31 PM (America/Chicago).";
|
||||
const now = new Date("2026-04-23T03:29:02.000Z");
|
||||
|
||||
expect(extractCodexRetryNotBefore({ errorMessage }, now)?.toISOString()).toBe(
|
||||
"2026-04-23T04:31:00.000Z",
|
||||
);
|
||||
});
|
||||
|
||||
it("does not classify deterministic compaction errors as transient", () => {
|
||||
expect(
|
||||
isCodexTransientUpstreamError({
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
import { asString, asNumber, parseObject, parseJson } from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
parseObject,
|
||||
parseJson,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const CODEX_TRANSIENT_UPSTREAM_RE =
|
||||
/(?:we(?:'|’)re\s+currently\s+experiencing\s+high\s+demand|temporary\s+errors|rate[-\s]?limit(?:ed)?|too\s+many\s+requests|\b429\b|server\s+overloaded|service\s+unavailable|try\s+again\s+later)/i;
|
||||
const CODEX_REMOTE_COMPACTION_RE = /remote\s+compact\s+task/i;
|
||||
const CODEX_USAGE_LIMIT_RE =
|
||||
/you(?:'|’)ve hit your usage limit for .+\.\s+switch to another model now,\s+or try again at\s+([^.!\n]+)(?:[.!]|\n|$)/i;
|
||||
|
||||
export function parseCodexJsonl(stdout: string) {
|
||||
let sessionId: string | null = null;
|
||||
@@ -76,12 +83,12 @@ export function isCodexUnknownSessionError(stdout: string, stderr: string): bool
|
||||
);
|
||||
}
|
||||
|
||||
export function isCodexTransientUpstreamError(input: {
|
||||
function buildCodexErrorHaystack(input: {
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
}): boolean {
|
||||
const haystack = [
|
||||
}): string {
|
||||
return [
|
||||
input.errorMessage ?? "",
|
||||
input.stdout ?? "",
|
||||
input.stderr ?? "",
|
||||
@@ -91,9 +98,164 @@ export function isCodexTransientUpstreamError(input: {
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
function readTimeZoneParts(date: Date, timeZone: string) {
|
||||
const values = new Map(
|
||||
new Intl.DateTimeFormat("en-US", {
|
||||
timeZone,
|
||||
hourCycle: "h23",
|
||||
year: "numeric",
|
||||
month: "2-digit",
|
||||
day: "2-digit",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
}).formatToParts(date).map((part) => [part.type, part.value]),
|
||||
);
|
||||
return {
|
||||
year: Number.parseInt(values.get("year") ?? "", 10),
|
||||
month: Number.parseInt(values.get("month") ?? "", 10),
|
||||
day: Number.parseInt(values.get("day") ?? "", 10),
|
||||
hour: Number.parseInt(values.get("hour") ?? "", 10),
|
||||
minute: Number.parseInt(values.get("minute") ?? "", 10),
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeResetTimeZone(timeZoneHint: string | null | undefined): string | null {
|
||||
const normalized = timeZoneHint?.trim();
|
||||
if (!normalized) return null;
|
||||
if (/^(?:utc|gmt)$/i.test(normalized)) return "UTC";
|
||||
|
||||
try {
|
||||
new Intl.DateTimeFormat("en-US", { timeZone: normalized }).format(new Date(0));
|
||||
return normalized;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function dateFromTimeZoneWallClock(input: {
|
||||
year: number;
|
||||
month: number;
|
||||
day: number;
|
||||
hour: number;
|
||||
minute: number;
|
||||
timeZone: string;
|
||||
}): Date | null {
|
||||
let candidate = new Date(Date.UTC(input.year, input.month - 1, input.day, input.hour, input.minute, 0, 0));
|
||||
const targetUtc = Date.UTC(input.year, input.month - 1, input.day, input.hour, input.minute, 0, 0);
|
||||
|
||||
for (let attempt = 0; attempt < 4; attempt += 1) {
|
||||
const actual = readTimeZoneParts(candidate, input.timeZone);
|
||||
const actualUtc = Date.UTC(actual.year, actual.month - 1, actual.day, actual.hour, actual.minute, 0, 0);
|
||||
const offsetMs = targetUtc - actualUtc;
|
||||
if (offsetMs === 0) break;
|
||||
candidate = new Date(candidate.getTime() + offsetMs);
|
||||
}
|
||||
|
||||
const verified = readTimeZoneParts(candidate, input.timeZone);
|
||||
if (
|
||||
verified.year !== input.year ||
|
||||
verified.month !== input.month ||
|
||||
verified.day !== input.day ||
|
||||
verified.hour !== input.hour ||
|
||||
verified.minute !== input.minute
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return candidate;
|
||||
}
|
||||
|
||||
function nextClockTimeInTimeZone(input: {
|
||||
now: Date;
|
||||
hour: number;
|
||||
minute: number;
|
||||
timeZoneHint: string;
|
||||
}): Date | null {
|
||||
const timeZone = normalizeResetTimeZone(input.timeZoneHint);
|
||||
if (!timeZone) return null;
|
||||
|
||||
const nowParts = readTimeZoneParts(input.now, timeZone);
|
||||
let retryAt = dateFromTimeZoneWallClock({
|
||||
year: nowParts.year,
|
||||
month: nowParts.month,
|
||||
day: nowParts.day,
|
||||
hour: input.hour,
|
||||
minute: input.minute,
|
||||
timeZone,
|
||||
});
|
||||
if (!retryAt) return null;
|
||||
|
||||
if (retryAt.getTime() <= input.now.getTime()) {
|
||||
const nextDay = new Date(Date.UTC(nowParts.year, nowParts.month - 1, nowParts.day + 1, 0, 0, 0, 0));
|
||||
retryAt = dateFromTimeZoneWallClock({
|
||||
year: nextDay.getUTCFullYear(),
|
||||
month: nextDay.getUTCMonth() + 1,
|
||||
day: nextDay.getUTCDate(),
|
||||
hour: input.hour,
|
||||
minute: input.minute,
|
||||
timeZone,
|
||||
});
|
||||
}
|
||||
|
||||
return retryAt;
|
||||
}
|
||||
|
||||
function parseLocalClockTime(clockText: string, now: Date): Date | null {
|
||||
const normalized = clockText.trim();
|
||||
const match = normalized.match(/^(\d{1,2})(?::(\d{2}))?\s*([ap])\.?\s*m\.?(?:\s*\(([^)]+)\)|\s+([A-Z]{2,5}))?$/i);
|
||||
if (!match) return null;
|
||||
|
||||
const hour12 = Number.parseInt(match[1] ?? "", 10);
|
||||
const minute = Number.parseInt(match[2] ?? "0", 10);
|
||||
if (!Number.isInteger(hour12) || hour12 < 1 || hour12 > 12) return null;
|
||||
if (!Number.isInteger(minute) || minute < 0 || minute > 59) return null;
|
||||
|
||||
let hour24 = hour12 % 12;
|
||||
if ((match[3] ?? "").toLowerCase() === "p") hour24 += 12;
|
||||
|
||||
const timeZoneHint = match[4] ?? match[5];
|
||||
if (timeZoneHint) {
|
||||
const explicitRetryAt = nextClockTimeInTimeZone({
|
||||
now,
|
||||
hour: hour24,
|
||||
minute,
|
||||
timeZoneHint,
|
||||
});
|
||||
if (explicitRetryAt) return explicitRetryAt;
|
||||
}
|
||||
|
||||
const retryAt = new Date(now);
|
||||
retryAt.setHours(hour24, minute, 0, 0);
|
||||
if (retryAt.getTime() <= now.getTime()) {
|
||||
retryAt.setDate(retryAt.getDate() + 1);
|
||||
}
|
||||
return retryAt;
|
||||
}
|
||||
|
||||
export function extractCodexRetryNotBefore(input: {
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
}, now = new Date()): Date | null {
|
||||
const haystack = buildCodexErrorHaystack(input);
|
||||
const usageLimitMatch = haystack.match(CODEX_USAGE_LIMIT_RE);
|
||||
if (!usageLimitMatch) return null;
|
||||
return parseLocalClockTime(usageLimitMatch[1] ?? "", now);
|
||||
}
|
||||
|
||||
export function isCodexTransientUpstreamError(input: {
|
||||
stdout?: string | null;
|
||||
stderr?: string | null;
|
||||
errorMessage?: string | null;
|
||||
}): boolean {
|
||||
const haystack = buildCodexErrorHaystack(input);
|
||||
|
||||
if (extractCodexRetryNotBefore(input) != null) return true;
|
||||
if (!CODEX_TRANSIENT_UPSTREAM_RE.test(haystack)) return false;
|
||||
// Keep automatic retries scoped to the observed remote-compaction/high-demand
|
||||
// failure shape; broader 429s may be caused by user or account limits.
|
||||
// failure shape, plus explicit usage-limit windows that tell us when retrying
|
||||
// becomes safe again.
|
||||
return CODEX_REMOTE_COMPACTION_RE.test(haystack) || /high\s+demand|temporary\s+errors/i.test(haystack);
|
||||
}
|
||||
|
||||
@@ -6,11 +6,15 @@ import type {
|
||||
import {
|
||||
asString,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import path from "node:path";
|
||||
import { parseCodexJsonl } from "./parse.js";
|
||||
import { codexHomeDir, readCodexAuthInfo } from "./quota.js";
|
||||
@@ -57,10 +61,28 @@ export async function testEnvironment(
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "codex");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `codex-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "codex_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: true,
|
||||
});
|
||||
checks.push({
|
||||
code: "codex_cwd_valid",
|
||||
level: "info",
|
||||
@@ -82,7 +104,7 @@ export async function testEnvironment(
|
||||
}
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "codex_command_resolvable",
|
||||
level: "info",
|
||||
@@ -98,7 +120,7 @@ export async function testEnvironment(
|
||||
}
|
||||
|
||||
const configOpenAiKey = env.OPENAI_API_KEY;
|
||||
const hostOpenAiKey = process.env.OPENAI_API_KEY;
|
||||
const hostOpenAiKey = targetIsRemote ? undefined : process.env.OPENAI_API_KEY;
|
||||
if (isNonEmpty(configOpenAiKey) || isNonEmpty(hostOpenAiKey)) {
|
||||
const source = isNonEmpty(configOpenAiKey) ? "adapter config env" : "server environment";
|
||||
checks.push({
|
||||
@@ -107,7 +129,9 @@ export async function testEnvironment(
|
||||
message: "OPENAI_API_KEY is set for Codex authentication.",
|
||||
detail: `Detected in ${source}.`,
|
||||
});
|
||||
} else {
|
||||
} else if (!targetIsRemote) {
|
||||
// Local-only auth file check. On remote targets, the probe will surface
|
||||
// any missing-auth errors directly from the remote `codex` invocation.
|
||||
const codexHome = isNonEmpty(env.CODEX_HOME) ? env.CODEX_HOME : undefined;
|
||||
const codexAuth = await readCodexAuthInfo(codexHome).catch(() => null);
|
||||
if (codexAuth) {
|
||||
@@ -146,12 +170,13 @@ export async function testEnvironment(
|
||||
code: "codex_fast_mode_unsupported_model",
|
||||
level: "warn",
|
||||
message: execArgs.fastModeIgnoredReason,
|
||||
hint: "Switch the agent model to GPT-5.4 to enable Codex Fast mode.",
|
||||
hint: "Switch the agent model to GPT-5.4 or enter a manual model ID to enable Codex Fast mode.",
|
||||
});
|
||||
}
|
||||
|
||||
const probe = await runChildProcess(
|
||||
`codex-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -70,8 +70,6 @@ export function buildCodexLocalConfig(v: CreateConfigValues): Record<string, unk
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_CODEX_LOCAL_MODEL;
|
||||
if (v.thinkingEffort) ac.modelReasoningEffort = v.thinkingEffort;
|
||||
ac.timeoutSec = 0;
|
||||
|
||||
@@ -80,4 +80,5 @@ Notes:
|
||||
- Sessions are resumed with --resume when stored session cwd matches current cwd.
|
||||
- Paperclip auto-injects local skills into "~/.cursor/skills" when missing, so Cursor can discover "$paperclip" and related skills on local runs.
|
||||
- Paperclip auto-adds --yolo unless one of --trust/--yolo/-f is already present in extraArgs.
|
||||
- Remote sandbox runs prepend "~/.local/bin" to PATH and prefer "~/.local/bin/cursor-agent" when the default Cursor entrypoint is requested, so standard E2B-style installs do not need hardcoded absolute command paths.
|
||||
`;
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
@@ -18,12 +19,14 @@ import {
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
@@ -40,6 +43,7 @@ import {
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { DEFAULT_CURSOR_LOCAL_MODEL } from "../index.js";
|
||||
import { parseCursorJsonl, isCursorUnknownSessionError } from "./parse.js";
|
||||
import { prepareCursorSandboxCommand } from "./remote-command.js";
|
||||
import { normalizeCursorStreamLine } from "../shared/stream.js";
|
||||
import { hasCursorTrustBypassArg } from "../shared/trust.js";
|
||||
|
||||
@@ -198,7 +202,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
config.promptTemplate,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
);
|
||||
const command = asString(config.command, "agent");
|
||||
let command = asString(config.command, "agent");
|
||||
const model = asString(config.model, DEFAULT_CURSOR_LOCAL_MODEL).trim();
|
||||
const mode = normalizeMode(asString(config.mode, ""));
|
||||
|
||||
@@ -230,7 +234,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const envConfig = parseObject(config.env);
|
||||
const hasExplicitApiKey =
|
||||
typeof envConfig.PAPERCLIP_API_KEY === "string" && envConfig.PAPERCLIP_API_KEY.trim().length > 0;
|
||||
const env: Record<string, string> = { ...buildPaperclipEnv(agent) };
|
||||
let env: Record<string, string> = { ...buildPaperclipEnv(agent) };
|
||||
env.PAPERCLIP_RUN_ID = runId;
|
||||
const wakeTaskId =
|
||||
(typeof context.taskId === "string" && context.taskId.trim().length > 0 && context.taskId.trim()) ||
|
||||
@@ -277,24 +281,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
if (workspaceSource) {
|
||||
env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
}
|
||||
if (workspaceId) {
|
||||
env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
}
|
||||
if (workspaceRepoUrl) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
}
|
||||
if (workspaceRepoRef) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
}
|
||||
if (agentHome) {
|
||||
env.AGENT_HOME = agentHome;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
}
|
||||
@@ -308,6 +302,22 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
// Probe the sandbox before the managed-home override so we discover
|
||||
// cursor-agent from the real system HOME (e.g. ~/.local/bin/cursor-agent).
|
||||
// The managed HOME set later is for runtime isolation, not for finding the CLI.
|
||||
const sandboxCommand = await prepareCursorSandboxCommand({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
});
|
||||
command = sandboxCommand.command;
|
||||
env = sandboxCommand.env;
|
||||
const effectiveEnv = Object.fromEntries(
|
||||
Object.entries({ ...process.env, ...env }).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
@@ -317,14 +327,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
@@ -334,6 +342,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
|
||||
if (executionTargetIsRemote) {
|
||||
try {
|
||||
@@ -353,6 +363,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}],
|
||||
});
|
||||
restoreRemoteWorkspace = () => preparedExecutionTargetRuntime.restoreWorkspace();
|
||||
remoteRuntimeRootDir = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
const managedHome = adapterExecutionTargetUsesManagedHome(executionTarget);
|
||||
if (managedHome && preparedExecutionTargetRuntime.runtimeRootDir) {
|
||||
env.HOME = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
@@ -383,6 +394,24 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "cursor",
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(env, paperclipBridge.env);
|
||||
loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv: ensurePathInEnv({ ...process.env, ...env }),
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
@@ -431,6 +460,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
notes.push("Auto-added --yolo to bypass interactive prompts.");
|
||||
}
|
||||
notes.push("Prompt is piped to Cursor via stdin.");
|
||||
if (sandboxCommand.addedPathEntry) {
|
||||
notes.push(`Remote sandbox runs prepend ${sandboxCommand.addedPathEntry} to PATH.`);
|
||||
}
|
||||
if (sandboxCommand.preferredCommandPath) {
|
||||
notes.push(`Remote sandbox runs prefer ${sandboxCommand.preferredCommandPath} when using the default Cursor entrypoint.`);
|
||||
}
|
||||
if (!instructionsFilePath) return notes;
|
||||
if (instructionsPrefix.length > 0) {
|
||||
notes.push(
|
||||
@@ -645,6 +680,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
return toResult(initial);
|
||||
} finally {
|
||||
if (paperclipBridge) {
|
||||
await paperclipBridge.stop();
|
||||
}
|
||||
if (restoreRemoteWorkspace) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
|
||||
160
packages/adapters/cursor-local/src/server/remote-command.ts
Normal file
160
packages/adapters/cursor-local/src/server/remote-command.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import path from "node:path";
|
||||
import {
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
type AdapterExecutionTarget,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import { ensurePathInEnv } from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const DEFAULT_CURSOR_COMMAND_BASENAMES = new Set(["agent", "cursor-agent"]);
|
||||
|
||||
function commandBasename(command: string): string {
|
||||
return command.trim().split(/[\\/]/).pop()?.toLowerCase() ?? "";
|
||||
}
|
||||
|
||||
function hasPathSeparator(command: string): boolean {
|
||||
return command.includes("/") || command.includes("\\");
|
||||
}
|
||||
|
||||
function prependPosixPathEntry(pathValue: string, entry: string): string {
|
||||
const parts = pathValue.split(":").filter(Boolean);
|
||||
if (parts.includes(entry)) return pathValue;
|
||||
const cleaned = parts.join(":");
|
||||
return cleaned.length > 0 ? `${entry}:${cleaned}` : entry;
|
||||
}
|
||||
|
||||
type SandboxCursorRuntimeInfo = {
|
||||
remoteSystemHomeDir: string | null;
|
||||
preferredCommandPath: string | null;
|
||||
};
|
||||
|
||||
function readMarkedValue(lines: string[], marker: string): string | null {
|
||||
const matchedLine = lines.find((line) => line.startsWith(marker));
|
||||
if (!matchedLine) return null;
|
||||
const value = matchedLine.slice(marker.length).trim();
|
||||
return value.length > 0 ? value : null;
|
||||
}
|
||||
|
||||
async function readSandboxCursorRuntimeInfo(input: {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget;
|
||||
command: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
}): Promise<SandboxCursorRuntimeInfo> {
|
||||
const shouldCheckPreferredCommand = isDefaultCursorCommand(input.command) && !hasPathSeparator(input.command);
|
||||
const homeMarker = "__PAPERCLIP_CURSOR_HOME__:";
|
||||
const preferredMarker = "__PAPERCLIP_CURSOR_AGENT__:";
|
||||
try {
|
||||
const result = await runAdapterExecutionTargetShellCommand(
|
||||
input.runId,
|
||||
input.target,
|
||||
[
|
||||
`printf ${JSON.stringify(`${homeMarker}%s\\n`)} "$HOME"`,
|
||||
shouldCheckPreferredCommand
|
||||
? `if [ -x "$HOME/.local/bin/cursor-agent" ]; then printf ${JSON.stringify(`${preferredMarker}%s\\n`)} "$HOME/.local/bin/cursor-agent"; fi`
|
||||
: "",
|
||||
].filter(Boolean).join("; "),
|
||||
{
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
timeoutSec: input.timeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
},
|
||||
);
|
||||
if (result.timedOut || (result.exitCode ?? 1) !== 0) {
|
||||
return {
|
||||
remoteSystemHomeDir: null,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
const lines = result.stdout.split(/\r?\n/);
|
||||
return {
|
||||
remoteSystemHomeDir: readMarkedValue(lines, homeMarker),
|
||||
preferredCommandPath: readMarkedValue(lines, preferredMarker),
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
remoteSystemHomeDir: null,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function isDefaultCursorCommand(command: string): boolean {
|
||||
return DEFAULT_CURSOR_COMMAND_BASENAMES.has(commandBasename(command));
|
||||
}
|
||||
|
||||
export type PreparedCursorSandboxCommand = {
|
||||
command: string;
|
||||
env: Record<string, string>;
|
||||
remoteSystemHomeDir: string | null;
|
||||
addedPathEntry: string | null;
|
||||
preferredCommandPath: string | null;
|
||||
};
|
||||
|
||||
export async function prepareCursorSandboxCommand(input: {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
command: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
}): Promise<PreparedCursorSandboxCommand> {
|
||||
if (input.target?.kind !== "remote" || input.target.transport !== "sandbox") {
|
||||
return {
|
||||
command: input.command,
|
||||
env: input.env,
|
||||
remoteSystemHomeDir: null,
|
||||
addedPathEntry: null,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
|
||||
const runtimeInfo = await readSandboxCursorRuntimeInfo({
|
||||
runId: input.runId,
|
||||
target: input.target,
|
||||
command: input.command,
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
timeoutSec: input.timeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
});
|
||||
const remoteSystemHomeDir = runtimeInfo.remoteSystemHomeDir;
|
||||
|
||||
if (!remoteSystemHomeDir) {
|
||||
return {
|
||||
command: input.command,
|
||||
env: input.env,
|
||||
remoteSystemHomeDir: null,
|
||||
addedPathEntry: null,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
|
||||
const remoteLocalBinDir = path.posix.join(remoteSystemHomeDir, ".local", "bin");
|
||||
const runtimeEnv = ensurePathInEnv(input.env);
|
||||
const currentPath = runtimeEnv.PATH ?? runtimeEnv.Path ?? "";
|
||||
const nextPath = prependPosixPathEntry(currentPath, remoteLocalBinDir);
|
||||
const env = nextPath === currentPath ? input.env : { ...input.env, PATH: nextPath };
|
||||
|
||||
if (!runtimeInfo.preferredCommandPath) {
|
||||
return {
|
||||
command: input.command,
|
||||
env,
|
||||
remoteSystemHomeDir,
|
||||
addedPathEntry: nextPath === currentPath ? null : remoteLocalBinDir,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
command: runtimeInfo.preferredCommandPath,
|
||||
env,
|
||||
remoteSystemHomeDir,
|
||||
addedPathEntry: nextPath === currentPath ? null : remoteLocalBinDir,
|
||||
preferredCommandPath: runtimeInfo.preferredCommandPath,
|
||||
};
|
||||
}
|
||||
@@ -7,16 +7,21 @@ import {
|
||||
asString,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { DEFAULT_CURSOR_LOCAL_MODEL } from "../index.js";
|
||||
import { parseCursorJsonl } from "./parse.js";
|
||||
import { isDefaultCursorCommand, prepareCursorSandboxCommand } from "./remote-command.js";
|
||||
import { hasCursorTrustBypassArg } from "../shared/trust.js";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
@@ -38,11 +43,6 @@ function firstNonEmptyLine(text: string): string {
|
||||
);
|
||||
}
|
||||
|
||||
function commandLooksLike(command: string, expected: string): boolean {
|
||||
const base = path.basename(command).toLowerCase();
|
||||
return base === expected || base === `${expected}.cmd` || base === `${expected}.exe`;
|
||||
}
|
||||
|
||||
function summarizeProbeDetail(stdout: string, stderr: string, parsedError: string | null): string | null {
|
||||
const raw = parsedError?.trim() || firstNonEmptyLine(stderr) || firstNonEmptyLine(stdout);
|
||||
if (!raw) return null;
|
||||
@@ -94,11 +94,29 @@ export async function testEnvironment(
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "agent");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
let command = asString(config.command, "agent");
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `cursor-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "cursor_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: true,
|
||||
});
|
||||
checks.push({
|
||||
code: "cursor_cwd_valid",
|
||||
level: "info",
|
||||
@@ -114,13 +132,24 @@ export async function testEnvironment(
|
||||
}
|
||||
|
||||
const envConfig = parseObject(config.env);
|
||||
const env: Record<string, string> = {};
|
||||
let env: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
const sandboxCommand = await prepareCursorSandboxCommand({
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
});
|
||||
command = sandboxCommand.command;
|
||||
env = sandboxCommand.env;
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "cursor_command_resolvable",
|
||||
level: "info",
|
||||
@@ -136,7 +165,7 @@ export async function testEnvironment(
|
||||
}
|
||||
|
||||
const configCursorApiKey = env.CURSOR_API_KEY;
|
||||
const hostCursorApiKey = process.env.CURSOR_API_KEY;
|
||||
const hostCursorApiKey = targetIsRemote ? undefined : process.env.CURSOR_API_KEY;
|
||||
if (isNonEmpty(configCursorApiKey) || isNonEmpty(hostCursorApiKey)) {
|
||||
const source = isNonEmpty(configCursorApiKey) ? "adapter config env" : "server environment";
|
||||
checks.push({
|
||||
@@ -145,7 +174,7 @@ export async function testEnvironment(
|
||||
message: "CURSOR_API_KEY is set for Cursor authentication.",
|
||||
detail: `Detected in ${source}.`,
|
||||
});
|
||||
} else {
|
||||
} else if (!targetIsRemote) {
|
||||
const cursorHome = isNonEmpty(env.CURSOR_HOME) ? env.CURSOR_HOME : undefined;
|
||||
const cursorAuth = await readCursorAuthInfo(cursorHome).catch(() => null);
|
||||
if (cursorAuth) {
|
||||
@@ -170,13 +199,13 @@ export async function testEnvironment(
|
||||
const canRunProbe =
|
||||
checks.every((check) => check.code !== "cursor_cwd_invalid" && check.code !== "cursor_command_unresolvable");
|
||||
if (canRunProbe) {
|
||||
if (!commandLooksLike(command, "agent")) {
|
||||
if (!isDefaultCursorCommand(command)) {
|
||||
checks.push({
|
||||
code: "cursor_hello_probe_skipped_custom_command",
|
||||
level: "info",
|
||||
message: "Skipped hello probe because command is not `agent`.",
|
||||
message: "Skipped hello probe because command is not a default Cursor CLI entrypoint.",
|
||||
detail: command,
|
||||
hint: "Use the `agent` CLI command to run the automatic installation and auth probe.",
|
||||
hint: "Use `agent` or `cursor-agent` to run the automatic installation and auth probe.",
|
||||
});
|
||||
} else {
|
||||
const model = asString(config.model, DEFAULT_CURSOR_LOCAL_MODEL).trim();
|
||||
@@ -192,8 +221,9 @@ export async function testEnvironment(
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
args.push("Respond with hello.");
|
||||
|
||||
const probe = await runChildProcess(
|
||||
`cursor-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -61,8 +61,6 @@ export function buildCursorLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_CURSOR_LOCAL_MODEL;
|
||||
const mode = normalizeMode(v.thinkingEffort);
|
||||
if (mode) ac.mode = mode;
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
@@ -19,12 +20,14 @@ import {
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asBoolean,
|
||||
asNumber,
|
||||
asString,
|
||||
asStringArray,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
@@ -240,12 +243,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (effectiveWorkspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
if (workspaceRepoUrl) env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
if (workspaceRepoRef) env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
if (agentHome) env.AGENT_HOME = agentHome;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
@@ -265,7 +270,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
@@ -282,6 +287,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let remoteSkillsDir: string | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
|
||||
if (executionTargetIsRemote) {
|
||||
try {
|
||||
@@ -301,6 +308,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}],
|
||||
});
|
||||
restoreRemoteWorkspace = () => preparedExecutionTargetRuntime.restoreWorkspace();
|
||||
remoteRuntimeRootDir = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
const managedHome = adapterExecutionTargetUsesManagedHome(executionTarget);
|
||||
if (managedHome && preparedExecutionTargetRuntime.runtimeRootDir) {
|
||||
env.HOME = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
@@ -331,6 +339,24 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "gemini",
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(env, paperclipBridge.env);
|
||||
loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv: ensurePathInEnv({ ...process.env, ...env }),
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
@@ -580,6 +606,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
return toResult(initial);
|
||||
} finally {
|
||||
await Promise.all([
|
||||
paperclipBridge?.stop(),
|
||||
restoreRemoteWorkspace?.(),
|
||||
localSkillsDir ? fs.rm(path.dirname(localSkillsDir), { recursive: true, force: true }).catch(() => undefined) : Promise.resolve(),
|
||||
]);
|
||||
|
||||
@@ -9,12 +9,16 @@ import {
|
||||
asNumber,
|
||||
asString,
|
||||
asStringArray,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
parseObject,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import { DEFAULT_GEMINI_LOCAL_MODEL } from "../index.js";
|
||||
import { detectGeminiAuthRequired, detectGeminiQuotaExhausted, parseGeminiJsonl } from "./parse.js";
|
||||
import { firstNonEmptyLine } from "./utils.js";
|
||||
@@ -48,10 +52,28 @@ export async function testEnvironment(
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "gemini");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `gemini-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "gemini_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: true,
|
||||
});
|
||||
checks.push({
|
||||
code: "gemini_cwd_valid",
|
||||
level: "info",
|
||||
@@ -73,7 +95,7 @@ export async function testEnvironment(
|
||||
}
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "gemini_command_resolvable",
|
||||
level: "info",
|
||||
@@ -89,10 +111,10 @@ export async function testEnvironment(
|
||||
}
|
||||
|
||||
const configGeminiApiKey = env.GEMINI_API_KEY;
|
||||
const hostGeminiApiKey = process.env.GEMINI_API_KEY;
|
||||
const hostGeminiApiKey = targetIsRemote ? undefined : process.env.GEMINI_API_KEY;
|
||||
const configGoogleApiKey = env.GOOGLE_API_KEY;
|
||||
const hostGoogleApiKey = process.env.GOOGLE_API_KEY;
|
||||
const hasGca = env.GOOGLE_GENAI_USE_GCA === "true" || process.env.GOOGLE_GENAI_USE_GCA === "true";
|
||||
const hostGoogleApiKey = targetIsRemote ? undefined : process.env.GOOGLE_API_KEY;
|
||||
const hasGca = env.GOOGLE_GENAI_USE_GCA === "true" || (!targetIsRemote && process.env.GOOGLE_GENAI_USE_GCA === "true");
|
||||
if (
|
||||
isNonEmpty(configGeminiApiKey) ||
|
||||
isNonEmpty(hostGeminiApiKey) ||
|
||||
@@ -152,8 +174,9 @@ export async function testEnvironment(
|
||||
}
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
|
||||
const probe = await runChildProcess(
|
||||
`gemini-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -55,8 +55,6 @@ export function buildGeminiLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_GEMINI_LOCAL_MODEL;
|
||||
ac.timeoutSec = 0;
|
||||
ac.graceSec = 15;
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
@@ -18,12 +19,14 @@ import {
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
joinPromptSections,
|
||||
buildInvocationEnvForLogs,
|
||||
@@ -199,12 +202,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (effectiveWorkspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
if (workspaceRepoUrl) env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
if (workspaceRepoRef) env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
if (agentHome) env.AGENT_HOME = agentHome;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
@@ -231,7 +236,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(preparedRuntimeConfig.env, {
|
||||
let loggedEnv = buildInvocationEnvForLogs(preparedRuntimeConfig.env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
@@ -256,6 +261,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
|
||||
if (executionTargetIsRemote) {
|
||||
localSkillsDir = await buildOpenCodeSkillsDir(config);
|
||||
@@ -282,6 +289,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
],
|
||||
});
|
||||
restoreRemoteWorkspace = () => preparedExecutionTargetRuntime.restoreWorkspace();
|
||||
remoteRuntimeRootDir = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
const managedHome = adapterExecutionTargetUsesManagedHome(executionTarget);
|
||||
if (managedHome && preparedExecutionTargetRuntime.runtimeRootDir) {
|
||||
preparedRuntimeConfig.env.HOME = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
@@ -308,6 +316,28 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
);
|
||||
}
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "opencode",
|
||||
hostApiToken: preparedRuntimeConfig.env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(preparedRuntimeConfig.env, paperclipBridge.env);
|
||||
loggedEnv = buildInvocationEnvForLogs(preparedRuntimeConfig.env, {
|
||||
runtimeEnv: Object.fromEntries(
|
||||
Object.entries(ensurePathInEnv({ ...process.env, ...preparedRuntimeConfig.env })).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
),
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
@@ -535,6 +565,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
return toResult(initial);
|
||||
} finally {
|
||||
await Promise.all([
|
||||
paperclipBridge?.stop(),
|
||||
restoreRemoteWorkspace?.(),
|
||||
localSkillsDir ? fs.rm(path.dirname(localSkillsDir), { recursive: true, force: true }).catch(() => undefined) : Promise.resolve(),
|
||||
]);
|
||||
|
||||
@@ -8,11 +8,15 @@ import {
|
||||
asString,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import { discoverOpenCodeModels, ensureOpenCodeModelConfiguredAndAvailable } from "./models.js";
|
||||
import { parseOpenCodeJsonl } from "./parse.js";
|
||||
import { prepareOpenCodeRuntimeConfig } from "./runtime-config.js";
|
||||
@@ -58,10 +62,28 @@ export async function testEnvironment(
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "opencode");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `opencode-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "opencode_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: false });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: false,
|
||||
});
|
||||
checks.push({
|
||||
code: "opencode_cwd_valid",
|
||||
level: "info",
|
||||
@@ -115,7 +137,7 @@ export async function testEnvironment(
|
||||
});
|
||||
} else {
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "opencode_command_resolvable",
|
||||
level: "info",
|
||||
@@ -137,7 +159,19 @@ export async function testEnvironment(
|
||||
let modelValidationPassed = false;
|
||||
const configuredModel = asString(config.model, "").trim();
|
||||
|
||||
if (canRunProbe && configuredModel) {
|
||||
// Model discovery and validation use local child processes against
|
||||
// OpenCode's `models` subcommand and JSON config; these are not yet
|
||||
// wired through the execution target. When probing a remote env, skip
|
||||
// discovery/validation and rely on the remote hello probe to surface
|
||||
// model/auth issues directly.
|
||||
if (targetIsRemote && configuredModel) {
|
||||
checks.push({
|
||||
code: "opencode_model_validation_skipped_remote",
|
||||
level: "info",
|
||||
message: `Skipped local model validation; will be validated by the hello probe inside ${targetLabel}.`,
|
||||
});
|
||||
modelValidationPassed = true;
|
||||
} else if (canRunProbe && configuredModel) {
|
||||
try {
|
||||
const discovered = await discoverOpenCodeModels({ command, cwd, env: runtimeEnv });
|
||||
if (discovered.length > 0) {
|
||||
@@ -173,7 +207,7 @@ export async function testEnvironment(
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (canRunProbe && !configuredModel) {
|
||||
} else if (!targetIsRemote && canRunProbe && !configuredModel) {
|
||||
try {
|
||||
const discovered = await discoverOpenCodeModels({ command, cwd, env: runtimeEnv });
|
||||
if (discovered.length > 0) {
|
||||
@@ -207,7 +241,7 @@ export async function testEnvironment(
|
||||
const modelUnavailable = checks.some((check) => check.code === "opencode_hello_probe_model_unavailable");
|
||||
if (!configuredModel && !modelUnavailable) {
|
||||
// No model configured – skip model requirement if no model-related checks exist
|
||||
} else if (configuredModel && canRunProbe) {
|
||||
} else if (!targetIsRemote && configuredModel && canRunProbe) {
|
||||
try {
|
||||
await ensureOpenCodeModelConfiguredAndAvailable({
|
||||
model: configuredModel,
|
||||
@@ -246,8 +280,9 @@ export async function testEnvironment(
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
|
||||
try {
|
||||
const probe = await runChildProcess(
|
||||
`opencode-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -54,8 +54,6 @@ export function buildOpenCodeLocalConfig(v: CreateConfigValues): Record<string,
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
if (v.model) ac.model = v.model;
|
||||
if (v.thinkingEffort) ac.variant = v.thinkingEffort;
|
||||
ac.dangerouslySkipPermissions = v.dangerouslySkipPermissions;
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetFile,
|
||||
@@ -17,12 +18,14 @@ import {
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
joinPromptSections,
|
||||
buildInvocationEnvForLogs,
|
||||
@@ -228,12 +231,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (workspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = workspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
if (workspaceRepoUrl) env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
if (workspaceRepoRef) env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
if (agentHome) env.AGENT_HOME = agentHome;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
@@ -275,7 +280,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
@@ -301,6 +306,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteSkillsDir: string | null = null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
|
||||
if (executionTargetIsRemote) {
|
||||
try {
|
||||
@@ -335,6 +341,28 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "pi",
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
if (paperclipBridge) {
|
||||
Object.assign(env, paperclipBridge.env);
|
||||
loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv: Object.fromEntries(
|
||||
Object.entries(ensurePathInEnv({ ...process.env, ...env })).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
),
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
@@ -651,6 +679,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
return toResult(initial);
|
||||
} finally {
|
||||
await Promise.all([
|
||||
paperclipBridge?.stop(),
|
||||
restoreRemoteWorkspace?.(),
|
||||
localSkillsDir ? fs.rm(path.dirname(localSkillsDir), { recursive: true, force: true }).catch(() => undefined) : Promise.resolve(),
|
||||
]);
|
||||
|
||||
@@ -6,14 +6,18 @@ import type {
|
||||
import {
|
||||
asString,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
asStringArray,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import { discoverPiModelsCached } from "./models.js";
|
||||
import { parsePiJsonl } from "./parse.js";
|
||||
|
||||
@@ -78,10 +82,28 @@ export async function testEnvironment(
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "pi");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
: null;
|
||||
const runId = `pi-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
|
||||
if (targetLabel) {
|
||||
checks.push({
|
||||
code: "pi_environment_target",
|
||||
level: "info",
|
||||
message: `Probing inside environment: ${targetLabel}`,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: false });
|
||||
await ensureAdapterExecutionTargetDirectory(runId, target, cwd, {
|
||||
cwd,
|
||||
env: {},
|
||||
createIfMissing: false,
|
||||
});
|
||||
checks.push({
|
||||
code: "pi_cwd_valid",
|
||||
level: "info",
|
||||
@@ -113,7 +135,7 @@ export async function testEnvironment(
|
||||
});
|
||||
} else {
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "pi_command_resolvable",
|
||||
level: "info",
|
||||
@@ -132,7 +154,10 @@ export async function testEnvironment(
|
||||
const canRunProbe =
|
||||
checks.every((check) => check.code !== "pi_cwd_invalid" && check.code !== "pi_command_unresolvable");
|
||||
|
||||
if (canRunProbe) {
|
||||
// Pi model discovery shells out to `pi --list-models` locally; when probing a
|
||||
// remote target we skip discovery and let the remote hello probe surface
|
||||
// model/auth issues directly.
|
||||
if (!targetIsRemote && canRunProbe) {
|
||||
try {
|
||||
const discovered = await discoverPiModelsCached({ command, cwd, env: runtimeEnv });
|
||||
if (discovered.length > 0) {
|
||||
@@ -166,6 +191,12 @@ export async function testEnvironment(
|
||||
message: "Pi requires a configured model in provider/model format.",
|
||||
hint: "Set adapterConfig.model using an ID from `pi --list-models`.",
|
||||
});
|
||||
} else if (targetIsRemote) {
|
||||
checks.push({
|
||||
code: "pi_model_validation_skipped_remote",
|
||||
level: "info",
|
||||
message: `Skipped local model validation; will be validated by the hello probe inside ${targetLabel}.`,
|
||||
});
|
||||
} else if (canRunProbe) {
|
||||
// Verify model is in the list
|
||||
try {
|
||||
@@ -218,8 +249,9 @@ export async function testEnvironment(
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
|
||||
try {
|
||||
const probe = await runChildProcess(
|
||||
`pi-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
|
||||
@@ -47,8 +47,6 @@ export function buildPiLocalConfig(v: CreateConfigValues): Record<string, unknow
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
if (v.model) ac.model = v.model;
|
||||
if (v.thinkingEffort) ac.thinking = v.thinkingEffort;
|
||||
|
||||
|
||||
@@ -182,7 +182,93 @@ describeEmbeddedPostgres("runDatabaseBackup", () => {
|
||||
);
|
||||
|
||||
it(
|
||||
"restores statements incrementally when backup comments precede the first breakpoint",
|
||||
"backs up and restores non-public database schemas and migration history",
|
||||
async () => {
|
||||
const sourceConnectionString = await createTempDatabase();
|
||||
const restoreConnectionString = await createSiblingDatabase(
|
||||
sourceConnectionString,
|
||||
"paperclip_full_logical_restore_target",
|
||||
);
|
||||
const backupDir = createTempDir("paperclip-db-full-logical-backup-");
|
||||
const sourceSql = postgres(sourceConnectionString, { max: 1, onnotice: () => {} });
|
||||
const restoreSql = postgres(restoreConnectionString, { max: 1, onnotice: () => {} });
|
||||
|
||||
try {
|
||||
await sourceSql.unsafe(`
|
||||
CREATE SCHEMA IF NOT EXISTS "drizzle";
|
||||
CREATE TABLE IF NOT EXISTS "drizzle"."__drizzle_migrations" (
|
||||
"id" serial PRIMARY KEY,
|
||||
"hash" text NOT NULL,
|
||||
"created_at" bigint
|
||||
);
|
||||
INSERT INTO "drizzle"."__drizzle_migrations" ("hash", "created_at")
|
||||
VALUES ('paperclip-migration-history', 1770000000000);
|
||||
`);
|
||||
await sourceSql.unsafe(`
|
||||
CREATE TABLE "public"."backup_parent_records" (
|
||||
"id" uuid PRIMARY KEY,
|
||||
"name" text NOT NULL
|
||||
);
|
||||
INSERT INTO "public"."backup_parent_records" ("id", "name")
|
||||
VALUES ('11111111-1111-4111-8111-111111111111', 'parent');
|
||||
`);
|
||||
await sourceSql.unsafe(`
|
||||
CREATE SCHEMA "plugin_backup_scope";
|
||||
CREATE TYPE "plugin_backup_scope"."plugin_status" AS ENUM ('ready', 'done');
|
||||
CREATE TABLE "plugin_backup_scope"."plugin_rows" (
|
||||
"id" serial PRIMARY KEY,
|
||||
"parent_id" uuid NOT NULL REFERENCES "public"."backup_parent_records"("id") ON DELETE CASCADE,
|
||||
"status" "plugin_backup_scope"."plugin_status" NOT NULL,
|
||||
"note" text NOT NULL
|
||||
);
|
||||
CREATE UNIQUE INDEX "plugin_rows_note_uq" ON "plugin_backup_scope"."plugin_rows" ("note");
|
||||
INSERT INTO "plugin_backup_scope"."plugin_rows" ("parent_id", "status", "note")
|
||||
VALUES ('11111111-1111-4111-8111-111111111111', 'ready', 'first');
|
||||
`);
|
||||
|
||||
const result = await runDatabaseBackup({
|
||||
connectionString: sourceConnectionString,
|
||||
backupDir,
|
||||
retention: { dailyDays: 7, weeklyWeeks: 4, monthlyMonths: 1 },
|
||||
filenamePrefix: "paperclip-full-logical-test",
|
||||
backupEngine: "javascript",
|
||||
});
|
||||
|
||||
await runDatabaseRestore({
|
||||
connectionString: restoreConnectionString,
|
||||
backupFile: result.backupFile,
|
||||
});
|
||||
|
||||
const migrationRows = await restoreSql.unsafe<{ hash: string }[]>(`
|
||||
SELECT "hash"
|
||||
FROM "drizzle"."__drizzle_migrations"
|
||||
WHERE "hash" = 'paperclip-migration-history'
|
||||
`);
|
||||
expect(migrationRows).toEqual([{ hash: "paperclip-migration-history" }]);
|
||||
|
||||
const pluginRows = await restoreSql.unsafe<{ note: string; status: string; parent_name: string }[]>(`
|
||||
SELECT r."note", r."status"::text AS "status", p."name" AS "parent_name"
|
||||
FROM "plugin_backup_scope"."plugin_rows" r
|
||||
JOIN "public"."backup_parent_records" p ON p."id" = r."parent_id"
|
||||
`);
|
||||
expect(pluginRows).toEqual([{ note: "first", status: "ready", parent_name: "parent" }]);
|
||||
|
||||
await expect(
|
||||
restoreSql.unsafe(`
|
||||
INSERT INTO "plugin_backup_scope"."plugin_rows" ("parent_id", "status", "note")
|
||||
VALUES ('11111111-1111-4111-8111-111111111111', 'done', 'first')
|
||||
`),
|
||||
).rejects.toThrow();
|
||||
} finally {
|
||||
await sourceSql.end();
|
||||
await restoreSql.end();
|
||||
}
|
||||
},
|
||||
60_000,
|
||||
);
|
||||
|
||||
it(
|
||||
"restores legacy public-only backups without migration history",
|
||||
async () => {
|
||||
const restoreConnectionString = await createTempDatabase();
|
||||
const restoreSql = postgres(restoreConnectionString, { max: 1, onnotice: () => {} });
|
||||
|
||||
@@ -19,6 +19,11 @@ export type RunDatabaseBackupOptions = {
|
||||
retention: BackupRetentionPolicy;
|
||||
filenamePrefix?: string;
|
||||
connectTimeoutSeconds?: number;
|
||||
/**
|
||||
* @deprecated Migration-journal schemas are included with the normal backup
|
||||
* scope. This option is kept for compatibility and no longer changes backup
|
||||
* engine selection.
|
||||
*/
|
||||
includeMigrationJournal?: boolean;
|
||||
excludeTables?: string[];
|
||||
nullifyColumns?: Record<string, string[]>;
|
||||
@@ -61,8 +66,6 @@ type ExtensionDefinition = {
|
||||
schema_name: string;
|
||||
};
|
||||
|
||||
const DRIZZLE_SCHEMA = "drizzle";
|
||||
const DRIZZLE_MIGRATIONS_TABLE = "__drizzle_migrations";
|
||||
const DEFAULT_BACKUP_WRITE_BUFFER_BYTES = 1024 * 1024;
|
||||
const BACKUP_DATA_CURSOR_ROWS = 100;
|
||||
const BACKUP_CLI_STDERR_BYTES = 64 * 1024;
|
||||
@@ -229,9 +232,15 @@ function tableKey(schemaName: string, tableName: string): string {
|
||||
return `${schemaName}.${tableName}`;
|
||||
}
|
||||
|
||||
function nonSystemSchemaPredicate(identifier: string): string {
|
||||
return `${identifier} NOT IN ('pg_catalog', 'information_schema')
|
||||
AND ${identifier} NOT LIKE 'pg_toast%'
|
||||
AND ${identifier} NOT LIKE 'pg_temp_%'
|
||||
AND ${identifier} NOT LIKE 'pg_toast_temp_%'`;
|
||||
}
|
||||
|
||||
function hasBackupTransforms(opts: RunDatabaseBackupOptions): boolean {
|
||||
return opts.includeMigrationJournal === true ||
|
||||
(opts.excludeTables?.length ?? 0) > 0 ||
|
||||
return (opts.excludeTables?.length ?? 0) > 0 ||
|
||||
Object.keys(opts.nullifyColumns ?? {}).length > 0;
|
||||
}
|
||||
|
||||
@@ -285,7 +294,6 @@ async function runPgDumpBackup(opts: {
|
||||
"--if-exists",
|
||||
"--no-owner",
|
||||
"--no-privileges",
|
||||
"--schema=public",
|
||||
],
|
||||
{
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
@@ -484,7 +492,6 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
const connectTimeout = Math.max(1, Math.trunc(opts.connectTimeoutSeconds ?? 5));
|
||||
const backupEngine = opts.backupEngine ?? "auto";
|
||||
const canUsePgDump = !hasBackupTransforms(opts);
|
||||
const includeMigrationJournal = opts.includeMigrationJournal === true;
|
||||
const excludedTableNames = normalizeTableNameSet(opts.excludeTables);
|
||||
const nullifiedColumnsByTable = normalizeNullifyColumnMap(opts.nullifyColumns);
|
||||
let sql = postgres(opts.connectionString, { max: 1, connect_timeout: connectTimeout });
|
||||
@@ -552,31 +559,24 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
SELECT table_schema AS schema_name, table_name AS tablename
|
||||
FROM information_schema.tables
|
||||
WHERE table_type = 'BASE TABLE'
|
||||
AND (
|
||||
table_schema = 'public'
|
||||
OR (${includeMigrationJournal}::boolean AND table_schema = ${DRIZZLE_SCHEMA} AND table_name = ${DRIZZLE_MIGRATIONS_TABLE})
|
||||
)
|
||||
AND ${sql.unsafe(nonSystemSchemaPredicate("table_schema"))}
|
||||
ORDER BY table_schema, table_name
|
||||
`;
|
||||
const tables = allTables;
|
||||
const includedTableNames = new Set(tables.map(({ schema_name, tablename }) => tableKey(schema_name, tablename)));
|
||||
const includedSchemas = new Set(tables.map(({ schema_name }) => schema_name));
|
||||
|
||||
// Get all enums
|
||||
const enums = await sql<{ typname: string; labels: string[] }[]>`
|
||||
SELECT t.typname, array_agg(e.enumlabel ORDER BY e.enumsortorder) AS labels
|
||||
const enums = await sql<{ schema_name: string; typname: string; labels: string[] }[]>`
|
||||
SELECT n.nspname AS schema_name, t.typname, array_agg(e.enumlabel ORDER BY e.enumsortorder) AS labels
|
||||
FROM pg_type t
|
||||
JOIN pg_enum e ON t.oid = e.enumtypid
|
||||
JOIN pg_namespace n ON t.typnamespace = n.oid
|
||||
WHERE n.nspname = 'public'
|
||||
GROUP BY t.typname
|
||||
ORDER BY t.typname
|
||||
WHERE ${sql.unsafe(nonSystemSchemaPredicate("n.nspname"))}
|
||||
GROUP BY n.nspname, t.typname
|
||||
ORDER BY n.nspname, t.typname
|
||||
`;
|
||||
|
||||
for (const e of enums) {
|
||||
const labels = e.labels.map((l) => `'${l.replace(/'/g, "''")}'`).join(", ");
|
||||
emitStatement(`CREATE TYPE "public"."${e.typname}" AS ENUM (${labels});`);
|
||||
}
|
||||
if (enums.length > 0) emit("");
|
||||
for (const e of enums) includedSchemas.add(e.schema_name);
|
||||
|
||||
const allSequences = await sql<SequenceDefinition[]>`
|
||||
SELECT
|
||||
@@ -598,15 +598,14 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
LEFT JOIN pg_class tbl ON tbl.oid = dep.refobjid
|
||||
LEFT JOIN pg_namespace tblns ON tblns.oid = tbl.relnamespace
|
||||
LEFT JOIN pg_attribute attr ON attr.attrelid = tbl.oid AND attr.attnum = dep.refobjsubid
|
||||
WHERE s.sequence_schema = 'public'
|
||||
OR (${includeMigrationJournal}::boolean AND s.sequence_schema = ${DRIZZLE_SCHEMA})
|
||||
WHERE ${sql.unsafe(nonSystemSchemaPredicate("s.sequence_schema"))}
|
||||
ORDER BY s.sequence_schema, s.sequence_name
|
||||
`;
|
||||
const sequences = allSequences.filter(
|
||||
(seq) => !seq.owner_table || includedTableNames.has(tableKey(seq.owner_schema ?? "public", seq.owner_table)),
|
||||
);
|
||||
|
||||
const schemas = new Set<string>();
|
||||
const schemas = new Set<string>(includedSchemas);
|
||||
for (const table of tables) schemas.add(table.schema_name);
|
||||
for (const seq of sequences) schemas.add(seq.sequence_schema);
|
||||
const extraSchemas = [...schemas].filter((schemaName) => schemaName !== "public");
|
||||
@@ -618,6 +617,12 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
emit("");
|
||||
}
|
||||
|
||||
for (const e of enums) {
|
||||
const labels = e.labels.map((l) => `'${l.replace(/'/g, "''")}'`).join(", ");
|
||||
emitStatement(`CREATE TYPE ${quoteQualifiedName(e.schema_name, e.typname)} AS ENUM (${labels});`);
|
||||
}
|
||||
if (enums.length > 0) emit("");
|
||||
|
||||
const extensions = await sql<ExtensionDefinition[]>`
|
||||
SELECT
|
||||
e.extname AS extension_name,
|
||||
@@ -655,6 +660,7 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
const columns = await sql<{
|
||||
column_name: string;
|
||||
data_type: string;
|
||||
udt_schema: string;
|
||||
udt_name: string;
|
||||
is_nullable: string;
|
||||
column_default: string | null;
|
||||
@@ -662,7 +668,7 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
numeric_precision: number | null;
|
||||
numeric_scale: number | null;
|
||||
}[]>`
|
||||
SELECT column_name, data_type, udt_name, is_nullable, column_default,
|
||||
SELECT column_name, data_type, udt_schema, udt_name, is_nullable, column_default,
|
||||
character_maximum_length, numeric_precision, numeric_scale
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = ${schema_name} AND table_name = ${tablename}
|
||||
@@ -676,9 +682,12 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
for (const col of columns) {
|
||||
let typeStr: string;
|
||||
if (col.data_type === "USER-DEFINED") {
|
||||
typeStr = `"${col.udt_name}"`;
|
||||
typeStr = quoteQualifiedName(col.udt_schema, col.udt_name);
|
||||
} else if (col.data_type === "ARRAY") {
|
||||
typeStr = `${col.udt_name.replace(/^_/, "")}[]`;
|
||||
const elementType = col.udt_name.replace(/^_/, "");
|
||||
typeStr = col.udt_schema === "pg_catalog"
|
||||
? `${elementType}[]`
|
||||
: `${quoteQualifiedName(col.udt_schema, elementType)}[]`;
|
||||
} else if (col.data_type === "character varying") {
|
||||
typeStr = col.character_maximum_length
|
||||
? `varchar(${col.character_maximum_length})`
|
||||
@@ -761,10 +770,8 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
JOIN pg_namespace tgtn ON tgtn.oid = tgt.relnamespace
|
||||
JOIN pg_attribute sa ON sa.attrelid = src.oid AND sa.attnum = ANY(c.conkey)
|
||||
JOIN pg_attribute ta ON ta.attrelid = tgt.oid AND ta.attnum = ANY(c.confkey)
|
||||
WHERE c.contype = 'f' AND (
|
||||
srcn.nspname = 'public'
|
||||
OR (${includeMigrationJournal}::boolean AND srcn.nspname = ${DRIZZLE_SCHEMA})
|
||||
)
|
||||
WHERE c.contype = 'f'
|
||||
AND ${sql.unsafe(nonSystemSchemaPredicate("srcn.nspname"))}
|
||||
GROUP BY c.conname, srcn.nspname, src.relname, tgtn.nspname, tgt.relname, c.confupdtype, c.confdeltype
|
||||
ORDER BY srcn.nspname, src.relname, c.conname
|
||||
`;
|
||||
@@ -800,10 +807,8 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
JOIN pg_class t ON t.oid = c.conrelid
|
||||
JOIN pg_namespace n ON n.oid = t.relnamespace
|
||||
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(c.conkey)
|
||||
WHERE c.contype = 'u' AND (
|
||||
n.nspname = 'public'
|
||||
OR (${includeMigrationJournal}::boolean AND n.nspname = ${DRIZZLE_SCHEMA})
|
||||
)
|
||||
WHERE c.contype = 'u'
|
||||
AND ${sql.unsafe(nonSystemSchemaPredicate("n.nspname"))}
|
||||
GROUP BY c.conname, n.nspname, t.relname
|
||||
ORDER BY n.nspname, t.relname, c.conname
|
||||
`;
|
||||
@@ -822,10 +827,7 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
const allIndexes = await sql<{ schema_name: string; tablename: string; indexdef: string }[]>`
|
||||
SELECT schemaname AS schema_name, tablename, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE (
|
||||
schemaname = 'public'
|
||||
OR (${includeMigrationJournal}::boolean AND schemaname = ${DRIZZLE_SCHEMA})
|
||||
)
|
||||
WHERE ${sql.unsafe(nonSystemSchemaPredicate("schemaname"))}
|
||||
AND indexname NOT IN (
|
||||
SELECT conname FROM pg_constraint c
|
||||
JOIN pg_namespace n ON n.oid = c.connamespace
|
||||
|
||||
13
packages/db/src/migrations/0069_liveness_recovery_dedupe.sql
Normal file
13
packages/db/src/migrations/0069_liveness_recovery_dedupe.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "issues_active_liveness_recovery_incident_uq"
|
||||
ON "issues" USING btree ("company_id","origin_kind","origin_id")
|
||||
WHERE "origin_kind" = 'harness_liveness_escalation'
|
||||
AND "origin_id" IS NOT NULL
|
||||
AND "hidden_at" IS NULL
|
||||
AND "status" NOT IN ('done', 'cancelled');
|
||||
--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "issues_active_liveness_recovery_leaf_uq"
|
||||
ON "issues" USING btree ("company_id","origin_kind","origin_fingerprint")
|
||||
WHERE "origin_kind" = 'harness_liveness_escalation'
|
||||
AND "origin_fingerprint" <> 'default'
|
||||
AND "hidden_at" IS NULL
|
||||
AND "status" NOT IN ('done', 'cancelled');
|
||||
@@ -0,0 +1,70 @@
|
||||
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "last_output_at" timestamp with time zone;
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "last_output_seq" integer DEFAULT 0 NOT NULL;
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "last_output_stream" text;
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "last_output_bytes" bigint;
|
||||
--> statement-breakpoint
|
||||
CREATE INDEX IF NOT EXISTS "heartbeat_runs_company_status_last_output_idx"
|
||||
ON "heartbeat_runs" USING btree ("company_id","status","last_output_at");
|
||||
--> statement-breakpoint
|
||||
CREATE INDEX IF NOT EXISTS "heartbeat_runs_company_status_process_started_idx"
|
||||
ON "heartbeat_runs" USING btree ("company_id","status","process_started_at");
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE IF NOT EXISTS "heartbeat_run_watchdog_decisions" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"company_id" uuid NOT NULL,
|
||||
"run_id" uuid NOT NULL,
|
||||
"evaluation_issue_id" uuid,
|
||||
"decision" text NOT NULL,
|
||||
"snoozed_until" timestamp with time zone,
|
||||
"reason" text,
|
||||
"created_by_agent_id" uuid,
|
||||
"created_by_user_id" text,
|
||||
"created_by_run_id" uuid,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE "heartbeat_run_watchdog_decisions" ADD CONSTRAINT "heartbeat_run_watchdog_decisions_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
--> statement-breakpoint
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE "heartbeat_run_watchdog_decisions" ADD CONSTRAINT "heartbeat_run_watchdog_decisions_run_id_heartbeat_runs_id_fk" FOREIGN KEY ("run_id") REFERENCES "public"."heartbeat_runs"("id") ON DELETE cascade ON UPDATE no action;
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
--> statement-breakpoint
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE "heartbeat_run_watchdog_decisions" ADD CONSTRAINT "heartbeat_run_watchdog_decisions_evaluation_issue_id_issues_id_fk" FOREIGN KEY ("evaluation_issue_id") REFERENCES "public"."issues"("id") ON DELETE set null ON UPDATE no action;
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
--> statement-breakpoint
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE "heartbeat_run_watchdog_decisions" ADD CONSTRAINT "heartbeat_run_watchdog_decisions_created_by_agent_id_agents_id_fk" FOREIGN KEY ("created_by_agent_id") REFERENCES "public"."agents"("id") ON DELETE set null ON UPDATE no action;
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
--> statement-breakpoint
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE "heartbeat_run_watchdog_decisions" ADD CONSTRAINT "heartbeat_run_watchdog_decisions_created_by_run_id_heartbeat_runs_id_fk" FOREIGN KEY ("created_by_run_id") REFERENCES "public"."heartbeat_runs"("id") ON DELETE set null ON UPDATE no action;
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
--> statement-breakpoint
|
||||
CREATE INDEX IF NOT EXISTS "heartbeat_run_watchdog_decisions_company_run_created_idx"
|
||||
ON "heartbeat_run_watchdog_decisions" USING btree ("company_id","run_id","created_at");
|
||||
--> statement-breakpoint
|
||||
CREATE INDEX IF NOT EXISTS "heartbeat_run_watchdog_decisions_company_run_snooze_idx"
|
||||
ON "heartbeat_run_watchdog_decisions" USING btree ("company_id","run_id","snoozed_until");
|
||||
--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "issues_active_stale_run_evaluation_uq"
|
||||
ON "issues" USING btree ("company_id","origin_kind","origin_id")
|
||||
WHERE "origin_kind" = 'stale_active_run_evaluation'
|
||||
AND "origin_id" IS NOT NULL
|
||||
AND "hidden_at" IS NULL
|
||||
AND "status" NOT IN ('done', 'cancelled');
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "companies" ALTER COLUMN "require_board_approval_for_new_agents" SET DEFAULT false;
|
||||
6
packages/db/src/migrations/0072_large_sandman.sql
Normal file
6
packages/db/src/migrations/0072_large_sandman.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "issues_active_stranded_issue_recovery_uq"
|
||||
ON "issues" USING btree ("company_id","origin_kind","origin_id")
|
||||
WHERE "origin_kind" = 'stranded_issue_recovery'
|
||||
AND "origin_id" IS NOT NULL
|
||||
AND "hidden_at" IS NULL
|
||||
AND "status" NOT IN ('done', 'cancelled');
|
||||
1
packages/db/src/migrations/0073_shiny_salo.sql
Normal file
1
packages/db/src/migrations/0073_shiny_salo.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE "companies" ADD COLUMN "attachment_max_bytes" integer DEFAULT 10485760 NOT NULL;
|
||||
4
packages/db/src/migrations/0074_striped_genesis.sql
Normal file
4
packages/db/src/migrations/0074_striped_genesis.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
CREATE UNIQUE INDEX "issues_active_productivity_review_uq" ON "issues" USING btree ("company_id","origin_kind","origin_id") WHERE "issues"."origin_kind" = 'issue_productivity_review'
|
||||
and "issues"."origin_id" is not null
|
||||
and "issues"."hidden_at" is null
|
||||
and "issues"."status" not in ('done', 'cancelled');
|
||||
15852
packages/db/src/migrations/meta/0072_snapshot.json
Normal file
15852
packages/db/src/migrations/meta/0072_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
15859
packages/db/src/migrations/meta/0073_snapshot.json
Normal file
15859
packages/db/src/migrations/meta/0073_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
15887
packages/db/src/migrations/meta/0074_snapshot.json
Normal file
15887
packages/db/src/migrations/meta/0074_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -484,6 +484,48 @@
|
||||
"when": 1776959400000,
|
||||
"tag": "0068_environment_local_driver_unique",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 69,
|
||||
"version": "7",
|
||||
"when": 1776780003000,
|
||||
"tag": "0069_liveness_recovery_dedupe",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 70,
|
||||
"version": "7",
|
||||
"when": 1776780004000,
|
||||
"tag": "0070_active_run_output_watchdog",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 71,
|
||||
"version": "7",
|
||||
"when": 1777131234000,
|
||||
"tag": "0071_default_hire_approval_off",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 72,
|
||||
"version": "7",
|
||||
"when": 1777305216238,
|
||||
"tag": "0072_large_sandman",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 73,
|
||||
"version": "7",
|
||||
"when": 1777382021347,
|
||||
"tag": "0073_shiny_salo",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 74,
|
||||
"version": "7",
|
||||
"when": 1777384535070,
|
||||
"tag": "0074_striped_genesis",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -13,9 +13,12 @@ export const companies = pgTable(
|
||||
issueCounter: integer("issue_counter").notNull().default(0),
|
||||
budgetMonthlyCents: integer("budget_monthly_cents").notNull().default(0),
|
||||
spentMonthlyCents: integer("spent_monthly_cents").notNull().default(0),
|
||||
attachmentMaxBytes: integer("attachment_max_bytes")
|
||||
.notNull()
|
||||
.default(10 * 1024 * 1024),
|
||||
requireBoardApprovalForNewAgents: boolean("require_board_approval_for_new_agents")
|
||||
.notNull()
|
||||
.default(true),
|
||||
.default(false),
|
||||
feedbackDataSharingEnabled: boolean("feedback_data_sharing_enabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
34
packages/db/src/schema/heartbeat_run_watchdog_decisions.ts
Normal file
34
packages/db/src/schema/heartbeat_run_watchdog_decisions.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { index, pgTable, text, timestamp, uuid } from "drizzle-orm/pg-core";
|
||||
import { agents } from "./agents.js";
|
||||
import { companies } from "./companies.js";
|
||||
import { heartbeatRuns } from "./heartbeat_runs.js";
|
||||
import { issues } from "./issues.js";
|
||||
|
||||
export const heartbeatRunWatchdogDecisions = pgTable(
|
||||
"heartbeat_run_watchdog_decisions",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
companyId: uuid("company_id").notNull().references(() => companies.id),
|
||||
runId: uuid("run_id").notNull().references(() => heartbeatRuns.id, { onDelete: "cascade" }),
|
||||
evaluationIssueId: uuid("evaluation_issue_id").references(() => issues.id, { onDelete: "set null" }),
|
||||
decision: text("decision").notNull(),
|
||||
snoozedUntil: timestamp("snoozed_until", { withTimezone: true }),
|
||||
reason: text("reason"),
|
||||
createdByAgentId: uuid("created_by_agent_id").references(() => agents.id, { onDelete: "set null" }),
|
||||
createdByUserId: text("created_by_user_id"),
|
||||
createdByRunId: uuid("created_by_run_id").references(() => heartbeatRuns.id, { onDelete: "set null" }),
|
||||
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
|
||||
},
|
||||
(table) => ({
|
||||
companyRunCreatedIdx: index("heartbeat_run_watchdog_decisions_company_run_created_idx").on(
|
||||
table.companyId,
|
||||
table.runId,
|
||||
table.createdAt,
|
||||
),
|
||||
companyRunSnoozeIdx: index("heartbeat_run_watchdog_decisions_company_run_snooze_idx").on(
|
||||
table.companyId,
|
||||
table.runId,
|
||||
table.snoozedUntil,
|
||||
),
|
||||
}),
|
||||
);
|
||||
@@ -34,6 +34,10 @@ export const heartbeatRuns = pgTable(
|
||||
processPid: integer("process_pid"),
|
||||
processGroupId: integer("process_group_id"),
|
||||
processStartedAt: timestamp("process_started_at", { withTimezone: true }),
|
||||
lastOutputAt: timestamp("last_output_at", { withTimezone: true }),
|
||||
lastOutputSeq: integer("last_output_seq").notNull().default(0),
|
||||
lastOutputStream: text("last_output_stream"),
|
||||
lastOutputBytes: bigint("last_output_bytes", { mode: "number" }),
|
||||
retryOfRunId: uuid("retry_of_run_id").references((): AnyPgColumn => heartbeatRuns.id, {
|
||||
onDelete: "set null",
|
||||
}),
|
||||
@@ -64,5 +68,15 @@ export const heartbeatRuns = pgTable(
|
||||
table.livenessState,
|
||||
table.createdAt,
|
||||
),
|
||||
companyStatusLastOutputIdx: index("heartbeat_runs_company_status_last_output_idx").on(
|
||||
table.companyId,
|
||||
table.status,
|
||||
table.lastOutputAt,
|
||||
),
|
||||
companyStatusProcessStartedIdx: index("heartbeat_runs_company_status_process_started_idx").on(
|
||||
table.companyId,
|
||||
table.status,
|
||||
table.processStartedAt,
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -53,6 +53,7 @@ export { documentRevisions } from "./document_revisions.js";
|
||||
export { issueDocuments } from "./issue_documents.js";
|
||||
export { heartbeatRuns } from "./heartbeat_runs.js";
|
||||
export { heartbeatRunEvents } from "./heartbeat_run_events.js";
|
||||
export { heartbeatRunWatchdogDecisions } from "./heartbeat_run_watchdog_decisions.js";
|
||||
export { costEvents } from "./cost_events.js";
|
||||
export { financeEvents } from "./finance_events.js";
|
||||
export { approvals } from "./approvals.js";
|
||||
|
||||
@@ -91,5 +91,45 @@ export const issues = pgTable(
|
||||
and ${table.executionRunId} is not null
|
||||
and ${table.status} in ('backlog', 'todo', 'in_progress', 'in_review', 'blocked')`,
|
||||
),
|
||||
activeLivenessRecoveryIncidentIdx: uniqueIndex("issues_active_liveness_recovery_incident_uq")
|
||||
.on(table.companyId, table.originKind, table.originId)
|
||||
.where(
|
||||
sql`${table.originKind} = 'harness_liveness_escalation'
|
||||
and ${table.originId} is not null
|
||||
and ${table.hiddenAt} is null
|
||||
and ${table.status} not in ('done', 'cancelled')`,
|
||||
),
|
||||
activeLivenessRecoveryLeafIdx: uniqueIndex("issues_active_liveness_recovery_leaf_uq")
|
||||
.on(table.companyId, table.originKind, table.originFingerprint)
|
||||
.where(
|
||||
sql`${table.originKind} = 'harness_liveness_escalation'
|
||||
and ${table.originFingerprint} <> 'default'
|
||||
and ${table.hiddenAt} is null
|
||||
and ${table.status} not in ('done', 'cancelled')`,
|
||||
),
|
||||
activeStaleRunEvaluationIdx: uniqueIndex("issues_active_stale_run_evaluation_uq")
|
||||
.on(table.companyId, table.originKind, table.originId)
|
||||
.where(
|
||||
sql`${table.originKind} = 'stale_active_run_evaluation'
|
||||
and ${table.originId} is not null
|
||||
and ${table.hiddenAt} is null
|
||||
and ${table.status} not in ('done', 'cancelled')`,
|
||||
),
|
||||
activeProductivityReviewIdx: uniqueIndex("issues_active_productivity_review_uq")
|
||||
.on(table.companyId, table.originKind, table.originId)
|
||||
.where(
|
||||
sql`${table.originKind} = 'issue_productivity_review'
|
||||
and ${table.originId} is not null
|
||||
and ${table.hiddenAt} is null
|
||||
and ${table.status} not in ('done', 'cancelled')`,
|
||||
),
|
||||
activeStrandedIssueRecoveryIdx: uniqueIndex("issues_active_stranded_issue_recovery_uq")
|
||||
.on(table.companyId, table.originKind, table.originId)
|
||||
.where(
|
||||
sql`${table.originKind} = 'stranded_issue_recovery'
|
||||
and ${table.originId} is not null
|
||||
and ${table.hiddenAt} is null
|
||||
and ${table.status} not in ('done', 'cancelled')`,
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -33,77 +33,56 @@ export type EmbeddedPostgresTestDatabase = {
|
||||
|
||||
let embeddedPostgresSupportPromise: Promise<EmbeddedPostgresTestSupport> | null = null;
|
||||
|
||||
const DEFAULT_PAPERCLIP_EMBEDDED_POSTGRES_PORT = 54329;
|
||||
|
||||
function getReservedTestPorts(): Set<number> {
|
||||
const configuredPorts = [
|
||||
DEFAULT_PAPERCLIP_EMBEDDED_POSTGRES_PORT,
|
||||
Number.parseInt(process.env.PAPERCLIP_EMBEDDED_POSTGRES_PORT ?? "", 10),
|
||||
...String(process.env.PAPERCLIP_TEST_POSTGRES_RESERVED_PORTS ?? "")
|
||||
.split(",")
|
||||
.map((value) => Number.parseInt(value.trim(), 10)),
|
||||
];
|
||||
return new Set(configuredPorts.filter((port) => Number.isInteger(port) && port > 0 && port <= 65535));
|
||||
}
|
||||
|
||||
async function getEmbeddedPostgresCtor(): Promise<EmbeddedPostgresCtor> {
|
||||
const mod = await import("embedded-postgres");
|
||||
return mod.default as EmbeddedPostgresCtor;
|
||||
}
|
||||
|
||||
async function getAvailablePort(): Promise<number> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const server = net.createServer();
|
||||
server.unref();
|
||||
server.on("error", reject);
|
||||
server.listen(0, "127.0.0.1", () => {
|
||||
const address = server.address();
|
||||
if (!address || typeof address === "string") {
|
||||
server.close(() => reject(new Error("Failed to allocate test port")));
|
||||
return;
|
||||
}
|
||||
const { port } = address;
|
||||
server.close((error) => {
|
||||
if (error) reject(error);
|
||||
else resolve(port);
|
||||
const reservedPorts = getReservedTestPorts();
|
||||
for (let attempt = 0; attempt < 20; attempt += 1) {
|
||||
const port = await new Promise<number>((resolve, reject) => {
|
||||
const server = net.createServer();
|
||||
server.unref();
|
||||
server.on("error", reject);
|
||||
server.listen(0, "127.0.0.1", () => {
|
||||
const address = server.address();
|
||||
if (!address || typeof address === "string") {
|
||||
server.close(() => reject(new Error("Failed to allocate test port")));
|
||||
return;
|
||||
}
|
||||
const { port } = address;
|
||||
server.close((error) => {
|
||||
if (error) reject(error);
|
||||
else resolve(port);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function formatEmbeddedPostgresError(error: unknown): string {
|
||||
if (error instanceof Error && error.message.length > 0) return error.message;
|
||||
if (typeof error === "string" && error.length > 0) return error;
|
||||
return "embedded Postgres startup failed";
|
||||
}
|
||||
|
||||
async function probeEmbeddedPostgresSupport(): Promise<EmbeddedPostgresTestSupport> {
|
||||
const dataDir = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-embedded-postgres-probe-"));
|
||||
const port = await getAvailablePort();
|
||||
const EmbeddedPostgres = await getEmbeddedPostgresCtor();
|
||||
const instance = new EmbeddedPostgres({
|
||||
databaseDir: dataDir,
|
||||
user: "paperclip",
|
||||
password: "paperclip",
|
||||
port,
|
||||
persistent: true,
|
||||
initdbFlags: ["--encoding=UTF8", "--locale=C", "--lc-messages=C"],
|
||||
onLog: () => {},
|
||||
onError: () => {},
|
||||
});
|
||||
|
||||
try {
|
||||
await instance.initialise();
|
||||
await instance.start();
|
||||
return { supported: true };
|
||||
} catch (error) {
|
||||
return {
|
||||
supported: false,
|
||||
reason: formatEmbeddedPostgresError(error),
|
||||
};
|
||||
} finally {
|
||||
await instance.stop().catch(() => {});
|
||||
fs.rmSync(dataDir, { recursive: true, force: true });
|
||||
if (!reservedPorts.has(port)) return port;
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Failed to allocate embedded Postgres test port outside reserved Paperclip ports: ${[
|
||||
...reservedPorts,
|
||||
].join(", ")}`,
|
||||
);
|
||||
}
|
||||
|
||||
export async function getEmbeddedPostgresTestSupport(): Promise<EmbeddedPostgresTestSupport> {
|
||||
if (!embeddedPostgresSupportPromise) {
|
||||
embeddedPostgresSupportPromise = probeEmbeddedPostgresSupport();
|
||||
}
|
||||
return await embeddedPostgresSupportPromise;
|
||||
}
|
||||
|
||||
export async function startEmbeddedPostgresTestDatabase(
|
||||
tempDirPrefix: string,
|
||||
): Promise<EmbeddedPostgresTestDatabase> {
|
||||
async function createEmbeddedPostgresTestInstance(tempDirPrefix: string) {
|
||||
const dataDir = fs.mkdtempSync(path.join(os.tmpdir(), tempDirPrefix));
|
||||
const port = await getAvailablePort();
|
||||
const EmbeddedPostgres = await getEmbeddedPostgresCtor();
|
||||
@@ -118,6 +97,51 @@ export async function startEmbeddedPostgresTestDatabase(
|
||||
onError: () => {},
|
||||
});
|
||||
|
||||
return { dataDir, port, instance };
|
||||
}
|
||||
|
||||
function cleanupEmbeddedPostgresTestDirs(dataDir: string) {
|
||||
fs.rmSync(dataDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function formatEmbeddedPostgresError(error: unknown): string {
|
||||
if (error instanceof Error && error.message.length > 0) return error.message;
|
||||
if (typeof error === "string" && error.length > 0) return error;
|
||||
return "embedded Postgres startup failed";
|
||||
}
|
||||
|
||||
async function probeEmbeddedPostgresSupport(): Promise<EmbeddedPostgresTestSupport> {
|
||||
const { dataDir, instance } = await createEmbeddedPostgresTestInstance(
|
||||
"paperclip-embedded-postgres-probe-",
|
||||
);
|
||||
|
||||
try {
|
||||
await instance.initialise();
|
||||
await instance.start();
|
||||
return { supported: true };
|
||||
} catch (error) {
|
||||
return {
|
||||
supported: false,
|
||||
reason: formatEmbeddedPostgresError(error),
|
||||
};
|
||||
} finally {
|
||||
await instance.stop().catch(() => {});
|
||||
cleanupEmbeddedPostgresTestDirs(dataDir);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getEmbeddedPostgresTestSupport(): Promise<EmbeddedPostgresTestSupport> {
|
||||
if (!embeddedPostgresSupportPromise) {
|
||||
embeddedPostgresSupportPromise = probeEmbeddedPostgresSupport();
|
||||
}
|
||||
return await embeddedPostgresSupportPromise;
|
||||
}
|
||||
|
||||
export async function startEmbeddedPostgresTestDatabase(
|
||||
tempDirPrefix: string,
|
||||
): Promise<EmbeddedPostgresTestDatabase> {
|
||||
const { dataDir, port, instance } = await createEmbeddedPostgresTestInstance(tempDirPrefix);
|
||||
|
||||
try {
|
||||
await instance.initialise();
|
||||
await instance.start();
|
||||
@@ -131,12 +155,12 @@ export async function startEmbeddedPostgresTestDatabase(
|
||||
connectionString,
|
||||
cleanup: async () => {
|
||||
await instance.stop().catch(() => {});
|
||||
fs.rmSync(dataDir, { recursive: true, force: true });
|
||||
cleanupEmbeddedPostgresTestDirs(dataDir);
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
await instance.stop().catch(() => {});
|
||||
fs.rmSync(dataDir, { recursive: true, force: true });
|
||||
cleanupEmbeddedPostgresTestDirs(dataDir);
|
||||
throw new Error(
|
||||
`Failed to start embedded PostgreSQL test database: ${formatEmbeddedPostgresError(error)}`,
|
||||
);
|
||||
|
||||
@@ -450,7 +450,7 @@ export function createToolDefinitions(client: PaperclipApiClient): ToolDefinitio
|
||||
),
|
||||
makeTool(
|
||||
"paperclipUpdateIssue",
|
||||
"Patch an issue, optionally including a comment",
|
||||
"Patch an issue, optionally including a comment; include resume=true when intentionally requesting follow-up on resumable closed work",
|
||||
updateIssueToolSchema,
|
||||
async ({ issueId, ...body }) =>
|
||||
client.requestJson("PATCH", `/issues/${encodeURIComponent(issueId)}`, { body }),
|
||||
@@ -475,7 +475,7 @@ export function createToolDefinitions(client: PaperclipApiClient): ToolDefinitio
|
||||
),
|
||||
makeTool(
|
||||
"paperclipAddComment",
|
||||
"Add a comment to an issue",
|
||||
"Add a comment to an issue; include resume=true when intentionally requesting follow-up on resumable closed work",
|
||||
addCommentToolSchema,
|
||||
async ({ issueId, ...body }) =>
|
||||
client.requestJson("POST", `/issues/${encodeURIComponent(issueId)}/comments`, { body }),
|
||||
|
||||
@@ -4,9 +4,9 @@ import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const VALID_TEMPLATES = ["default", "connector", "workspace"] as const;
|
||||
const VALID_TEMPLATES = ["default", "connector", "workspace", "environment"] as const;
|
||||
type PluginTemplate = (typeof VALID_TEMPLATES)[number];
|
||||
const VALID_CATEGORIES = new Set(["connector", "workspace", "automation", "ui"] as const);
|
||||
const VALID_CATEGORIES = new Set(["connector", "workspace", "automation", "ui", "environment"] as const);
|
||||
|
||||
export interface ScaffoldPluginOptions {
|
||||
pluginName: string;
|
||||
@@ -15,7 +15,7 @@ export interface ScaffoldPluginOptions {
|
||||
displayName?: string;
|
||||
description?: string;
|
||||
author?: string;
|
||||
category?: "connector" | "workspace" | "automation" | "ui";
|
||||
category?: "connector" | "workspace" | "automation" | "ui" | "environment";
|
||||
sdkPath?: string;
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ export function scaffoldPluginProject(options: ScaffoldPluginOptions): string {
|
||||
const displayName = options.displayName ?? makeDisplayName(options.pluginName);
|
||||
const description = options.description ?? "A Paperclip plugin";
|
||||
const author = options.author ?? "Plugin Author";
|
||||
const category = options.category ?? (template === "workspace" ? "workspace" : "connector");
|
||||
const category = options.category ?? (template === "workspace" ? "workspace" : template === "environment" ? "environment" : "connector");
|
||||
const manifestId = packageToManifestId(options.pluginName);
|
||||
const localSdkPath = path.resolve(options.sdkPath ?? getLocalSdkPackagePath());
|
||||
const localSharedPath = getLocalSharedPackagePath(localSdkPath);
|
||||
@@ -296,9 +296,231 @@ export default defineConfig({
|
||||
`,
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "manifest.ts"),
|
||||
`import type { PaperclipPluginManifestV1 } from "@paperclipai/plugin-sdk";
|
||||
if (template === "environment") {
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "manifest.ts"),
|
||||
`import type { PaperclipPluginManifestV1 } from "@paperclipai/plugin-sdk";
|
||||
|
||||
const manifest: PaperclipPluginManifestV1 = {
|
||||
id: ${quote(manifestId)},
|
||||
apiVersion: 1,
|
||||
version: "0.1.0",
|
||||
displayName: ${quote(displayName)},
|
||||
description: ${quote(description)},
|
||||
author: ${quote(author)},
|
||||
categories: [${quote(category)}],
|
||||
capabilities: [
|
||||
"environment.drivers.register",
|
||||
"plugin.state.read",
|
||||
"plugin.state.write"
|
||||
],
|
||||
entrypoints: {
|
||||
worker: "./dist/worker.js",
|
||||
ui: "./dist/ui"
|
||||
},
|
||||
environmentDrivers: [
|
||||
{
|
||||
driverKey: ${quote(manifestId + "-driver")},
|
||||
displayName: ${quote(displayName + " Driver")}
|
||||
}
|
||||
],
|
||||
ui: {
|
||||
slots: [
|
||||
{
|
||||
type: "dashboardWidget",
|
||||
id: "health-widget",
|
||||
displayName: ${quote(`${displayName} Health`)},
|
||||
exportName: "DashboardWidget"
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
export default manifest;
|
||||
`,
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "worker.ts"),
|
||||
`import { definePlugin, runWorker } from "@paperclipai/plugin-sdk";
|
||||
import type {
|
||||
PluginEnvironmentValidateConfigParams,
|
||||
PluginEnvironmentProbeParams,
|
||||
PluginEnvironmentAcquireLeaseParams,
|
||||
PluginEnvironmentResumeLeaseParams,
|
||||
PluginEnvironmentReleaseLeaseParams,
|
||||
PluginEnvironmentDestroyLeaseParams,
|
||||
PluginEnvironmentRealizeWorkspaceParams,
|
||||
PluginEnvironmentExecuteParams,
|
||||
} from "@paperclipai/plugin-sdk";
|
||||
|
||||
const plugin = definePlugin({
|
||||
async setup(ctx) {
|
||||
ctx.data.register("health", async () => {
|
||||
return { status: "ok", checkedAt: new Date().toISOString() };
|
||||
});
|
||||
},
|
||||
|
||||
async onHealth() {
|
||||
return { status: "ok", message: "Environment plugin worker is running" };
|
||||
},
|
||||
|
||||
async onEnvironmentValidateConfig(params: PluginEnvironmentValidateConfigParams) {
|
||||
if (!params.config || typeof params.config !== "object") {
|
||||
return { ok: false, errors: ["Config must be a non-null object"] };
|
||||
}
|
||||
return { ok: true, normalizedConfig: params.config };
|
||||
},
|
||||
|
||||
async onEnvironmentProbe(_params: PluginEnvironmentProbeParams) {
|
||||
return { ok: true, summary: "Environment is reachable" };
|
||||
},
|
||||
|
||||
async onEnvironmentAcquireLease(params: PluginEnvironmentAcquireLeaseParams) {
|
||||
const providerLeaseId = \`lease-\${params.runId}-\${Date.now()}\`;
|
||||
return {
|
||||
providerLeaseId,
|
||||
metadata: { acquiredAt: new Date().toISOString() },
|
||||
};
|
||||
},
|
||||
|
||||
async onEnvironmentResumeLease(params: PluginEnvironmentResumeLeaseParams) {
|
||||
return {
|
||||
providerLeaseId: params.providerLeaseId,
|
||||
metadata: { ...params.leaseMetadata, resumed: true },
|
||||
};
|
||||
},
|
||||
|
||||
async onEnvironmentReleaseLease(_params: PluginEnvironmentReleaseLeaseParams) {
|
||||
// Release provider-side resources here
|
||||
},
|
||||
|
||||
async onEnvironmentDestroyLease(_params: PluginEnvironmentDestroyLeaseParams) {
|
||||
// Destroy provider-side resources here
|
||||
},
|
||||
|
||||
async onEnvironmentRealizeWorkspace(params: PluginEnvironmentRealizeWorkspaceParams) {
|
||||
const cwd = params.workspace.remotePath ?? params.workspace.localPath ?? "/tmp/workspace";
|
||||
return { cwd, metadata: { realized: true } };
|
||||
},
|
||||
|
||||
async onEnvironmentExecute(params: PluginEnvironmentExecuteParams) {
|
||||
// Replace this with real command execution against your provider
|
||||
return {
|
||||
exitCode: 0,
|
||||
timedOut: false,
|
||||
stdout: \`Executed: \${params.command}\`,
|
||||
stderr: "",
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
export default plugin;
|
||||
runWorker(plugin, import.meta.url);
|
||||
`,
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "ui", "index.tsx"),
|
||||
`import { usePluginData, type PluginWidgetProps } from "@paperclipai/plugin-sdk/ui";
|
||||
|
||||
type HealthData = {
|
||||
status: "ok" | "degraded" | "error";
|
||||
checkedAt: string;
|
||||
};
|
||||
|
||||
export function DashboardWidget(_props: PluginWidgetProps) {
|
||||
const { data, loading, error } = usePluginData<HealthData>("health");
|
||||
|
||||
if (loading) return <div>Loading environment health...</div>;
|
||||
if (error) return <div>Plugin error: {error.message}</div>;
|
||||
|
||||
return (
|
||||
<div style={{ display: "grid", gap: "0.5rem" }}>
|
||||
<strong>${displayName}</strong>
|
||||
<div>Health: {data?.status ?? "unknown"}</div>
|
||||
<div>Checked: {data?.checkedAt ?? "never"}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
`,
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "tests", "plugin.spec.ts"),
|
||||
`import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
createEnvironmentTestHarness,
|
||||
createFakeEnvironmentDriver,
|
||||
assertEnvironmentEventOrder,
|
||||
assertLeaseLifecycle,
|
||||
} from "@paperclipai/plugin-sdk/testing";
|
||||
import manifest from "../src/manifest.js";
|
||||
import plugin from "../src/worker.js";
|
||||
|
||||
const ENV_ID = "env-test-1";
|
||||
const BASE_PARAMS = {
|
||||
driverKey: manifest.environmentDrivers![0].driverKey,
|
||||
companyId: "co-1",
|
||||
environmentId: ENV_ID,
|
||||
config: {},
|
||||
};
|
||||
|
||||
describe("environment plugin scaffold", () => {
|
||||
it("validates config", async () => {
|
||||
const driver = createFakeEnvironmentDriver({ driverKey: BASE_PARAMS.driverKey });
|
||||
const harness = createEnvironmentTestHarness({ manifest, environmentDriver: driver });
|
||||
await plugin.definition.setup(harness.ctx);
|
||||
|
||||
const result = await plugin.definition.onEnvironmentValidateConfig!({
|
||||
driverKey: BASE_PARAMS.driverKey,
|
||||
config: { host: "test" },
|
||||
});
|
||||
expect(result.ok).toBe(true);
|
||||
});
|
||||
|
||||
it("probes the environment", async () => {
|
||||
const driver = createFakeEnvironmentDriver({ driverKey: BASE_PARAMS.driverKey });
|
||||
const harness = createEnvironmentTestHarness({ manifest, environmentDriver: driver });
|
||||
await plugin.definition.setup(harness.ctx);
|
||||
|
||||
const result = await plugin.definition.onEnvironmentProbe!(BASE_PARAMS);
|
||||
expect(result.ok).toBe(true);
|
||||
});
|
||||
|
||||
it("runs a full lease lifecycle through the harness", async () => {
|
||||
const driver = createFakeEnvironmentDriver({ driverKey: BASE_PARAMS.driverKey });
|
||||
const harness = createEnvironmentTestHarness({ manifest, environmentDriver: driver });
|
||||
await plugin.definition.setup(harness.ctx);
|
||||
|
||||
const lease = await harness.acquireLease({ ...BASE_PARAMS, runId: "run-1" });
|
||||
expect(lease.providerLeaseId).toBeTruthy();
|
||||
|
||||
await harness.realizeWorkspace({
|
||||
...BASE_PARAMS,
|
||||
lease,
|
||||
workspace: { localPath: "/tmp/test" },
|
||||
});
|
||||
|
||||
await harness.releaseLease({
|
||||
...BASE_PARAMS,
|
||||
providerLeaseId: lease.providerLeaseId,
|
||||
});
|
||||
|
||||
assertEnvironmentEventOrder(harness.environmentEvents, [
|
||||
"acquireLease",
|
||||
"realizeWorkspace",
|
||||
"releaseLease",
|
||||
]);
|
||||
assertLeaseLifecycle(harness.environmentEvents, ENV_ID);
|
||||
});
|
||||
});
|
||||
`,
|
||||
);
|
||||
} else {
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "manifest.ts"),
|
||||
`import type { PaperclipPluginManifestV1 } from "@paperclipai/plugin-sdk";
|
||||
|
||||
const manifest: PaperclipPluginManifestV1 = {
|
||||
id: ${quote(manifestId)},
|
||||
@@ -331,11 +553,11 @@ const manifest: PaperclipPluginManifestV1 = {
|
||||
|
||||
export default manifest;
|
||||
`,
|
||||
);
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "worker.ts"),
|
||||
`import { definePlugin, runWorker } from "@paperclipai/plugin-sdk";
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "worker.ts"),
|
||||
`import { definePlugin, runWorker } from "@paperclipai/plugin-sdk";
|
||||
|
||||
const plugin = definePlugin({
|
||||
async setup(ctx) {
|
||||
@@ -363,11 +585,11 @@ const plugin = definePlugin({
|
||||
export default plugin;
|
||||
runWorker(plugin, import.meta.url);
|
||||
`,
|
||||
);
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "ui", "index.tsx"),
|
||||
`import { usePluginAction, usePluginData, type PluginWidgetProps } from "@paperclipai/plugin-sdk/ui";
|
||||
writeFile(
|
||||
path.join(outputDir, "src", "ui", "index.tsx"),
|
||||
`import { usePluginAction, usePluginData, type PluginWidgetProps } from "@paperclipai/plugin-sdk/ui";
|
||||
|
||||
type HealthData = {
|
||||
status: "ok" | "degraded" | "error";
|
||||
@@ -391,11 +613,11 @@ export function DashboardWidget(_props: PluginWidgetProps) {
|
||||
);
|
||||
}
|
||||
`,
|
||||
);
|
||||
);
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "tests", "plugin.spec.ts"),
|
||||
`import { describe, expect, it } from "vitest";
|
||||
writeFile(
|
||||
path.join(outputDir, "tests", "plugin.spec.ts"),
|
||||
`import { describe, expect, it } from "vitest";
|
||||
import { createTestHarness } from "@paperclipai/plugin-sdk/testing";
|
||||
import manifest from "../src/manifest.js";
|
||||
import plugin from "../src/worker.js";
|
||||
@@ -416,7 +638,8 @@ describe("plugin scaffold", () => {
|
||||
});
|
||||
});
|
||||
`,
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
writeFile(
|
||||
path.join(outputDir, "README.md"),
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
"private": true,
|
||||
"description": "A Paperclip plugin",
|
||||
"scripts": {
|
||||
"prebuild": "node ../../../../scripts/ensure-plugin-build-deps.mjs",
|
||||
"prebuild": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps",
|
||||
"build": "node ./esbuild.config.mjs",
|
||||
"build:rollup": "rollup -c",
|
||||
"dev": "node ./esbuild.config.mjs --watch",
|
||||
"dev:ui": "paperclip-plugin-dev-server --root . --ui-dir dist/ui --port 4177",
|
||||
"test": "vitest run --config ./vitest.config.ts",
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk build && tsc --noEmit"
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps && tsc --noEmit"
|
||||
},
|
||||
"paperclipPlugin": {
|
||||
"manifest": "./dist/manifest.js",
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
"ui": "./dist/ui/"
|
||||
},
|
||||
"scripts": {
|
||||
"prebuild": "node ../../../../scripts/ensure-plugin-build-deps.mjs",
|
||||
"prebuild": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps",
|
||||
"build": "tsc && node ./scripts/build-ui.mjs",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk build && tsc --noEmit"
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps && tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@codemirror/lang-javascript": "^6.2.2",
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
"ui": "./dist/ui/"
|
||||
},
|
||||
"scripts": {
|
||||
"prebuild": "node ../../../../scripts/ensure-plugin-build-deps.mjs",
|
||||
"prebuild": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps",
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk build && tsc --noEmit"
|
||||
"typecheck": "pnpm --filter @paperclipai/plugin-sdk ensure-build-deps && tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/plugin-sdk": "workspace:*"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user