Compare commits
39 Commits
pap-9066-l
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0808b388ee | ||
|
|
c445e59256 | ||
|
|
9746dab4e8 | ||
|
|
563413ecd4 | ||
|
|
94ce7af715 | ||
|
|
508355b8fc | ||
|
|
ad0bb57350 | ||
|
|
eaa80cf88b | ||
|
|
8af38fb054 | ||
|
|
0c6f9bdcf8 | ||
|
|
21404e8a34 | ||
|
|
5a64cf52a1 | ||
|
|
74cb560c41 | ||
|
|
486fb88a15 | ||
|
|
4ad1c83b84 | ||
|
|
c0c58d6b01 | ||
|
|
0fe39a2d5c | ||
|
|
b24c6909e8 | ||
|
|
6e4fa78d86 | ||
|
|
534aee66ae | ||
|
|
0096b56a1c | ||
|
|
eb12c42009 | ||
|
|
a72731f118 | ||
|
|
a1b2875165 | ||
|
|
2f72cb29ea | ||
|
|
e3af7aa489 | ||
|
|
433dfed33d | ||
|
|
778e775c35 | ||
|
|
06e6ee25cd | ||
|
|
f784d8d90e | ||
|
|
0e1a582831 | ||
|
|
a904effb96 | ||
|
|
4269545b19 | ||
|
|
fe3904f434 | ||
|
|
12cb7b40fd | ||
|
|
824298f414 | ||
|
|
e400315cbf | ||
|
|
6f30003421 | ||
|
|
772fc92619 |
8
.github/workflows/pr.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Typecheck workspaces whose build scripts skip TypeScript
|
||||
run: pnpm run typecheck:build-gaps
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run serialized server test shard
|
||||
run: pnpm test:run:serialized -- --shard-index ${{ matrix.shard_index }} --shard-count ${{ matrix.shard_count }}
|
||||
@@ -162,7 +162,7 @@ jobs:
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# `release.sh` always executes its Step 2/7 workspace build, even when
|
||||
# `--skip-verify` bypasses the initial verification gate.
|
||||
@@ -193,7 +193,7 @@ jobs:
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps chromium
|
||||
|
||||
@@ -25,15 +25,16 @@ COPY packages/mcp-server/package.json packages/mcp-server/
|
||||
COPY packages/adapters/acpx-local/package.json packages/adapters/acpx-local/
|
||||
COPY packages/adapters/claude-local/package.json packages/adapters/claude-local/
|
||||
COPY packages/adapters/codex-local/package.json packages/adapters/codex-local/
|
||||
COPY packages/adapters/cursor-cloud/package.json packages/adapters/cursor-cloud/
|
||||
COPY packages/adapters/cursor-local/package.json packages/adapters/cursor-local/
|
||||
COPY packages/adapters/gemini-local/package.json packages/adapters/gemini-local/
|
||||
COPY packages/adapters/openclaw-gateway/package.json packages/adapters/openclaw-gateway/
|
||||
COPY packages/adapters/opencode-local/package.json packages/adapters/opencode-local/
|
||||
COPY packages/adapters/pi-local/package.json packages/adapters/pi-local/
|
||||
COPY packages/plugins/sdk/package.json packages/plugins/sdk/
|
||||
COPY packages/plugins/plugin-llm-wiki/package.json packages/plugins/plugin-llm-wiki/
|
||||
COPY --parents packages/plugins/sandbox-providers/./*/package.json packages/plugins/sandbox-providers/
|
||||
COPY packages/plugins/paperclip-plugin-fake-sandbox/package.json packages/plugins/paperclip-plugin-fake-sandbox/
|
||||
COPY packages/plugins/plugin-llm-wiki/package.json packages/plugins/plugin-llm-wiki/
|
||||
COPY patches/ patches/
|
||||
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
"@paperclipai/adapter-acpx-local": "workspace:*",
|
||||
"@paperclipai/adapter-claude-local": "workspace:*",
|
||||
"@paperclipai/adapter-codex-local": "workspace:*",
|
||||
"@paperclipai/adapter-cursor-cloud": "workspace:*",
|
||||
"@paperclipai/adapter-cursor-local": "workspace:*",
|
||||
"@paperclipai/adapter-gemini-local": "workspace:*",
|
||||
"@paperclipai/adapter-opencode-local": "workspace:*",
|
||||
@@ -49,7 +50,7 @@
|
||||
"@paperclipai/db": "workspace:*",
|
||||
"@paperclipai/server": "workspace:*",
|
||||
"@paperclipai/shared": "workspace:*",
|
||||
"drizzle-orm": "0.38.4",
|
||||
"drizzle-orm": "0.45.2",
|
||||
"dotenv": "^17.0.1",
|
||||
"commander": "^13.1.0",
|
||||
"embedded-postgres": "^18.1.0-beta.16",
|
||||
|
||||
@@ -24,8 +24,7 @@ describe("home path resolution", () => {
|
||||
const paths = describeLocalInstancePaths();
|
||||
expect(paths.homeDir).toBe(home);
|
||||
expect(paths.instanceId).toBe("default");
|
||||
expect(paths.activeSpaceId).toBe("default");
|
||||
expect(paths.configPath).toBe(path.resolve(home, "instances", "default", "spaces", "default", "config.json"));
|
||||
expect(paths.configPath).toBe(path.resolve(home, "instances", "default", "config.json"));
|
||||
});
|
||||
|
||||
it("supports PAPERCLIP_HOME and explicit instance ids", () => {
|
||||
|
||||
@@ -89,7 +89,6 @@ describe("onboard", () => {
|
||||
delete process.env.PAPERCLIP_HOME;
|
||||
delete process.env.PAPERCLIP_CONFIG;
|
||||
delete process.env.PAPERCLIP_INSTANCE_ID;
|
||||
delete process.env.PAPERCLIP_SPACE_ID;
|
||||
delete process.env.PAPERCLIP_BIND;
|
||||
delete process.env.PAPERCLIP_BIND_HOST;
|
||||
delete process.env.PAPERCLIP_TAILNET_BIND_HOST;
|
||||
@@ -135,8 +134,8 @@ describe("onboard", () => {
|
||||
expect(raw.server.host).toBe("127.0.0.1");
|
||||
});
|
||||
|
||||
it("creates default-space config and data paths for a fresh PAPERCLIP_HOME", async () => {
|
||||
const home = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-onboard-space-"));
|
||||
it("creates instance-root config and data paths for a fresh PAPERCLIP_HOME", async () => {
|
||||
const home = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-onboard-home-"));
|
||||
const cwd = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-onboard-cwd-"));
|
||||
process.chdir(cwd);
|
||||
process.env.PAPERCLIP_HOME = home;
|
||||
@@ -144,24 +143,16 @@ describe("onboard", () => {
|
||||
await onboard({ yes: true, invokedByRun: true });
|
||||
|
||||
const instanceRoot = path.join(home, "instances", "default");
|
||||
const spaceRoot = path.join(instanceRoot, "spaces", "default");
|
||||
const configPath = path.join(spaceRoot, "config.json");
|
||||
const registryPath = path.join(instanceRoot, "config.json");
|
||||
const configPath = path.join(instanceRoot, "config.json");
|
||||
const raw = JSON.parse(fs.readFileSync(configPath, "utf8")) as PaperclipConfig;
|
||||
const registry = JSON.parse(fs.readFileSync(registryPath, "utf8")) as { activeSpaceId?: string };
|
||||
|
||||
expect(registry.activeSpaceId).toBe("default");
|
||||
expect(raw.database.embeddedPostgresDataDir).toBe(path.join(spaceRoot, "db"));
|
||||
expect(raw.database.backup.dir).toBe(path.join(spaceRoot, "data", "backups"));
|
||||
expect(raw.logging.logDir).toBe(path.join(spaceRoot, "logs"));
|
||||
expect(raw.storage.localDisk.baseDir).toBe(path.join(spaceRoot, "data", "storage"));
|
||||
expect(raw.secrets.localEncrypted.keyFilePath).toBe(path.join(spaceRoot, "secrets", "master.key"));
|
||||
expect(fs.existsSync(path.join(spaceRoot, ".env"))).toBe(true);
|
||||
expect(fs.existsSync(path.join(spaceRoot, "secrets", "master.key"))).toBe(true);
|
||||
|
||||
for (const rootName of ["db", "data", "secrets", "logs", "workspaces", "projects", "companies"]) {
|
||||
expect(fs.existsSync(path.join(instanceRoot, rootName))).toBe(false);
|
||||
}
|
||||
expect(raw.database.embeddedPostgresDataDir).toBe(path.join(instanceRoot, "db"));
|
||||
expect(raw.database.backup.dir).toBe(path.join(instanceRoot, "data", "backups"));
|
||||
expect(raw.logging.logDir).toBe(path.join(instanceRoot, "logs"));
|
||||
expect(raw.storage.localDisk.baseDir).toBe(path.join(instanceRoot, "data", "storage"));
|
||||
expect(raw.secrets.localEncrypted.keyFilePath).toBe(path.join(instanceRoot, "secrets", "master.key"));
|
||||
expect(fs.existsSync(path.join(instanceRoot, ".env"))).toBe(true);
|
||||
expect(fs.existsSync(path.join(instanceRoot, "secrets", "master.key"))).toBe(true);
|
||||
});
|
||||
|
||||
it("supports authenticated/private quickstart bind presets", async () => {
|
||||
|
||||
257
cli/src/__tests__/secrets.test.ts
Normal file
@@ -0,0 +1,257 @@
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
import type { Agent, CompanySecret } from "@paperclipai/shared";
|
||||
import type { PaperclipConfig } from "../config/schema.js";
|
||||
import { secretsCheck } from "../checks/secrets-check.js";
|
||||
import {
|
||||
buildInlineMigrationSecretName,
|
||||
buildMigratedAgentEnv,
|
||||
collectInlineSecretMigrationCandidates,
|
||||
parseSecretsInclude,
|
||||
toPlainEnvValue,
|
||||
} from "../commands/client/secrets.js";
|
||||
|
||||
function agent(partial: Partial<Agent>): Agent {
|
||||
return {
|
||||
id: "agent-12345678",
|
||||
companyId: "company-1",
|
||||
name: "Coder",
|
||||
urlKey: "coder",
|
||||
role: "engineer",
|
||||
title: null,
|
||||
icon: null,
|
||||
status: "idle",
|
||||
reportsTo: null,
|
||||
capabilities: null,
|
||||
adapterType: "codex_local",
|
||||
adapterConfig: {},
|
||||
runtimeConfig: {},
|
||||
budgetMonthlyCents: 0,
|
||||
spentMonthlyCents: 0,
|
||||
pauseReason: null,
|
||||
pausedAt: null,
|
||||
permissions: {
|
||||
canCreateAgents: false,
|
||||
},
|
||||
lastHeartbeatAt: null,
|
||||
metadata: null,
|
||||
createdAt: new Date("2026-04-26T00:00:00.000Z"),
|
||||
updatedAt: new Date("2026-04-26T00:00:00.000Z"),
|
||||
...partial,
|
||||
};
|
||||
}
|
||||
|
||||
function secret(partial: Partial<CompanySecret>): CompanySecret {
|
||||
return {
|
||||
id: "secret-1",
|
||||
companyId: "company-1",
|
||||
key: "agent_agent-12_anthropic_api_key",
|
||||
name: "agent_agent-12_anthropic_api_key",
|
||||
provider: "local_encrypted",
|
||||
status: "active",
|
||||
managedMode: "paperclip_managed",
|
||||
externalRef: null,
|
||||
providerConfigId: null,
|
||||
providerMetadata: null,
|
||||
latestVersion: 1,
|
||||
description: null,
|
||||
lastResolvedAt: null,
|
||||
lastRotatedAt: null,
|
||||
deletedAt: null,
|
||||
createdByAgentId: null,
|
||||
createdByUserId: null,
|
||||
createdAt: new Date("2026-04-26T00:00:00.000Z"),
|
||||
updatedAt: new Date("2026-04-26T00:00:00.000Z"),
|
||||
...partial,
|
||||
};
|
||||
}
|
||||
|
||||
function configWithSecretsProvider(provider: PaperclipConfig["secrets"]["provider"]): PaperclipConfig {
|
||||
return {
|
||||
$meta: {
|
||||
version: 1,
|
||||
updatedAt: "2026-05-02T00:00:00.000Z",
|
||||
source: "configure",
|
||||
},
|
||||
database: {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: "/tmp/paperclip/db",
|
||||
embeddedPostgresPort: 55432,
|
||||
backup: {
|
||||
enabled: true,
|
||||
intervalMinutes: 60,
|
||||
retentionDays: 30,
|
||||
dir: "/tmp/paperclip/backups",
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
mode: "file",
|
||||
logDir: "/tmp/paperclip/logs",
|
||||
},
|
||||
server: {
|
||||
deploymentMode: "local_trusted",
|
||||
exposure: "private",
|
||||
host: "127.0.0.1",
|
||||
port: 3100,
|
||||
allowedHostnames: [],
|
||||
serveUi: true,
|
||||
},
|
||||
auth: {
|
||||
baseUrlMode: "auto",
|
||||
disableSignUp: false,
|
||||
},
|
||||
telemetry: {
|
||||
enabled: true,
|
||||
},
|
||||
storage: {
|
||||
provider: "local_disk",
|
||||
localDisk: {
|
||||
baseDir: "/tmp/paperclip/storage",
|
||||
},
|
||||
s3: {
|
||||
bucket: "paperclip",
|
||||
region: "us-east-1",
|
||||
prefix: "",
|
||||
forcePathStyle: false,
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
provider,
|
||||
strictMode: true,
|
||||
localEncrypted: {
|
||||
keyFilePath: "/tmp/paperclip/secrets/master.key",
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe("secrets CLI helpers", () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
delete process.env.PAPERCLIP_SECRETS_AWS_REGION;
|
||||
delete process.env.AWS_REGION;
|
||||
delete process.env.AWS_DEFAULT_REGION;
|
||||
delete process.env.PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID;
|
||||
delete process.env.PAPERCLIP_SECRETS_AWS_KMS_KEY_ID;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
it("parses declaration include filters", () => {
|
||||
expect(parseSecretsInclude("agents,projects,tasks")).toEqual({
|
||||
company: false,
|
||||
agents: true,
|
||||
projects: true,
|
||||
issues: true,
|
||||
skills: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("detects inline sensitive env values that need migration", () => {
|
||||
const rows = collectInlineSecretMigrationCandidates(
|
||||
[
|
||||
agent({
|
||||
id: "agent-12345678",
|
||||
adapterConfig: {
|
||||
env: {
|
||||
ANTHROPIC_API_KEY: "sk-ant-test",
|
||||
GH_TOKEN: {
|
||||
type: "plain",
|
||||
value: "ghp-test",
|
||||
},
|
||||
PATH: {
|
||||
type: "plain",
|
||||
value: "/usr/bin",
|
||||
},
|
||||
OPENAI_API_KEY: {
|
||||
type: "secret_ref",
|
||||
secretId: "secret-existing",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
[
|
||||
secret({
|
||||
id: "secret-gh-token",
|
||||
name: buildInlineMigrationSecretName("agent-12345678", "GH_TOKEN"),
|
||||
}),
|
||||
],
|
||||
);
|
||||
|
||||
expect(rows).toEqual([
|
||||
{
|
||||
agentId: "agent-12345678",
|
||||
agentName: "Coder",
|
||||
envKey: "ANTHROPIC_API_KEY",
|
||||
secretName: "agent_agent-12_anthropic_api_key",
|
||||
existingSecretId: null,
|
||||
},
|
||||
{
|
||||
agentId: "agent-12345678",
|
||||
agentName: "Coder",
|
||||
envKey: "GH_TOKEN",
|
||||
secretName: "agent_agent-12_gh_token",
|
||||
existingSecretId: "secret-gh-token",
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("builds migrated env bindings without preserving secret values", () => {
|
||||
const next = buildMigratedAgentEnv(
|
||||
{
|
||||
ANTHROPIC_API_KEY: "sk-ant-test",
|
||||
NODE_ENV: {
|
||||
type: "plain",
|
||||
value: "development",
|
||||
},
|
||||
},
|
||||
new Map([["ANTHROPIC_API_KEY", "secret-1"]]),
|
||||
);
|
||||
|
||||
expect(next).toEqual({
|
||||
ANTHROPIC_API_KEY: {
|
||||
type: "secret_ref",
|
||||
secretId: "secret-1",
|
||||
version: "latest",
|
||||
},
|
||||
NODE_ENV: {
|
||||
type: "plain",
|
||||
value: "development",
|
||||
},
|
||||
});
|
||||
expect(JSON.stringify(next)).not.toContain("sk-ant-test");
|
||||
});
|
||||
|
||||
it("reads only explicit plain env values", () => {
|
||||
expect(toPlainEnvValue("plain-value")).toBe("plain-value");
|
||||
expect(toPlainEnvValue({ type: "plain", value: "wrapped" })).toBe("wrapped");
|
||||
expect(toPlainEnvValue({ type: "secret_ref", secretId: "secret-1" })).toBeNull();
|
||||
});
|
||||
|
||||
it("reports the AWS bootstrap config required by doctor", () => {
|
||||
const result = secretsCheck(configWithSecretsProvider("aws_secrets_manager"));
|
||||
|
||||
expect(result.status).toBe("fail");
|
||||
expect(result.message).toContain("PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID");
|
||||
expect(result.repairHint).toContain("AWS SDK default credential chain");
|
||||
expect(result.repairHint).toContain("Do not store AWS root credentials");
|
||||
});
|
||||
|
||||
it("passes AWS doctor checks when non-secret provider config is present", () => {
|
||||
process.env.PAPERCLIP_SECRETS_AWS_REGION = "us-east-1";
|
||||
process.env.PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID = "prod-us-1";
|
||||
process.env.PAPERCLIP_SECRETS_AWS_KMS_KEY_ID =
|
||||
"arn:aws:kms:us-east-1:123456789012:key/test";
|
||||
process.env.AWS_PROFILE = "paperclip-prod";
|
||||
|
||||
const result = secretsCheck(configWithSecretsProvider("aws_secrets_manager"));
|
||||
|
||||
expect(result.status).toBe("pass");
|
||||
expect(result.message).toContain("prod-us-1");
|
||||
expect(result.message).toContain("AWS_PROFILE/shared config");
|
||||
});
|
||||
});
|
||||
@@ -1,186 +0,0 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
import { migrateDefaultSpaceInstall } from "../commands/spaces.js";
|
||||
import { readConfig, resolveConfigPath } from "../config/store.js";
|
||||
import type { PaperclipConfig } from "../config/schema.js";
|
||||
|
||||
const ORIGINAL_ENV = { ...process.env };
|
||||
const ORIGINAL_CWD = process.cwd();
|
||||
|
||||
function writeJson(filePath: string, value: unknown): void {
|
||||
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
fs.writeFileSync(filePath, `${JSON.stringify(value, null, 2)}\n`, { mode: 0o600 });
|
||||
}
|
||||
|
||||
function writeText(filePath: string, value: string): void {
|
||||
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
fs.writeFileSync(filePath, value);
|
||||
}
|
||||
|
||||
function createLegacyInstallFixture() {
|
||||
const home = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-space-migrate-home-"));
|
||||
const cwd = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-space-migrate-cwd-"));
|
||||
process.chdir(cwd);
|
||||
process.env.PAPERCLIP_HOME = home;
|
||||
delete process.env.PAPERCLIP_CONFIG;
|
||||
delete process.env.PAPERCLIP_INSTANCE_ID;
|
||||
delete process.env.PAPERCLIP_SPACE_ID;
|
||||
|
||||
const instanceRoot = path.join(home, "instances", "default");
|
||||
const spaceRoot = path.join(instanceRoot, "spaces", "default");
|
||||
const config: PaperclipConfig = {
|
||||
$meta: {
|
||||
version: 1,
|
||||
updatedAt: "2026-05-09T00:00:00.000Z",
|
||||
source: "onboard",
|
||||
},
|
||||
database: {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: path.join(instanceRoot, "db"),
|
||||
embeddedPostgresPort: 54329,
|
||||
backup: {
|
||||
enabled: true,
|
||||
intervalMinutes: 60,
|
||||
retentionDays: 30,
|
||||
dir: path.join(instanceRoot, "data", "backups"),
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
mode: "file",
|
||||
logDir: path.join(instanceRoot, "logs"),
|
||||
},
|
||||
server: {
|
||||
deploymentMode: "local_trusted",
|
||||
exposure: "private",
|
||||
host: "127.0.0.1",
|
||||
port: 3100,
|
||||
allowedHostnames: [],
|
||||
serveUi: true,
|
||||
},
|
||||
auth: {
|
||||
baseUrlMode: "auto",
|
||||
disableSignUp: false,
|
||||
},
|
||||
telemetry: {
|
||||
enabled: true,
|
||||
},
|
||||
storage: {
|
||||
provider: "local_disk",
|
||||
localDisk: {
|
||||
baseDir: path.join(instanceRoot, "data", "storage"),
|
||||
},
|
||||
s3: {
|
||||
bucket: "paperclip",
|
||||
region: "us-east-1",
|
||||
prefix: "",
|
||||
forcePathStyle: false,
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
provider: "local_encrypted",
|
||||
strictMode: false,
|
||||
localEncrypted: {
|
||||
keyFilePath: path.join(instanceRoot, "secrets", "master.key"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
writeJson(path.join(instanceRoot, "config.json"), config);
|
||||
writeText(path.join(instanceRoot, ".env"), "PAPERCLIP_AGENT_JWT_SECRET=test-secret\n");
|
||||
writeText(path.join(instanceRoot, "db", "PG_VERSION"), "17\n");
|
||||
writeText(path.join(instanceRoot, "data", "storage", "hello.txt"), "hello\n");
|
||||
writeText(path.join(instanceRoot, "logs", "server.log"), "log\n");
|
||||
writeText(path.join(instanceRoot, "secrets", "master.key"), "01234567890123456789012345678901");
|
||||
writeText(path.join(instanceRoot, "workspaces", "agent-1", "README.md"), "workspace\n");
|
||||
writeText(path.join(instanceRoot, "projects", "company", "project", "repo", "README.md"), "project\n");
|
||||
writeText(path.join(instanceRoot, "companies", "company-1", "codex-home", "config.toml"), "model=\"x\"\n");
|
||||
writeText(path.join(instanceRoot, "codex-home", "config.toml"), "model=\"shared\"\n");
|
||||
|
||||
return { instanceRoot, spaceRoot };
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
process.env = { ...ORIGINAL_ENV };
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...ORIGINAL_ENV };
|
||||
process.chdir(ORIGINAL_CWD);
|
||||
});
|
||||
|
||||
describe("spaces migrate-default", () => {
|
||||
it("moves a legacy root-shaped install into spaces/default and rewrites config paths", async () => {
|
||||
const fixture = createLegacyInstallFixture();
|
||||
|
||||
const result = await migrateDefaultSpaceInstall({
|
||||
serverRunningCheck: async () => false,
|
||||
});
|
||||
|
||||
expect(result.status).toBe("migrated");
|
||||
expect(result.movedPaths).toEqual([
|
||||
"config.json",
|
||||
".env",
|
||||
"db",
|
||||
"data",
|
||||
"logs",
|
||||
"secrets",
|
||||
"workspaces",
|
||||
"projects",
|
||||
"companies",
|
||||
"codex-home",
|
||||
]);
|
||||
|
||||
expect(fs.existsSync(path.join(fixture.instanceRoot, "db"))).toBe(false);
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, ".env"), "utf8")).toContain("PAPERCLIP_AGENT_JWT_SECRET");
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, "db", "PG_VERSION"), "utf8")).toBe("17\n");
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, "data", "storage", "hello.txt"), "utf8")).toBe("hello\n");
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, "companies", "company-1", "codex-home", "config.toml"), "utf8")).toContain("model");
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, "codex-home", "config.toml"), "utf8")).toContain("shared");
|
||||
|
||||
const migratedConfig = JSON.parse(fs.readFileSync(path.join(fixture.spaceRoot, "config.json"), "utf8")) as PaperclipConfig;
|
||||
expect(migratedConfig.database.embeddedPostgresDataDir).toBe(path.join(fixture.spaceRoot, "db"));
|
||||
expect(migratedConfig.database.backup.dir).toBe(path.join(fixture.spaceRoot, "data", "backups"));
|
||||
expect(migratedConfig.logging.logDir).toBe(path.join(fixture.spaceRoot, "logs"));
|
||||
expect(migratedConfig.storage.localDisk.baseDir).toBe(path.join(fixture.spaceRoot, "data", "storage"));
|
||||
expect(migratedConfig.secrets.localEncrypted.keyFilePath).toBe(path.join(fixture.spaceRoot, "secrets", "master.key"));
|
||||
|
||||
const marker = JSON.parse(fs.readFileSync(path.join(fixture.instanceRoot, "config.json"), "utf8")) as {
|
||||
activeSpaceId?: string;
|
||||
defaultSpaceMigration?: { sourceRoot?: string; destinationRoot?: string; movedPaths?: string[] };
|
||||
};
|
||||
expect(marker.activeSpaceId).toBe("default");
|
||||
expect(marker.defaultSpaceMigration).toMatchObject({
|
||||
sourceRoot: fixture.instanceRoot,
|
||||
destinationRoot: fixture.spaceRoot,
|
||||
movedPaths: result.movedPaths,
|
||||
});
|
||||
|
||||
expect(resolveConfigPath()).toBe(path.join(fixture.spaceRoot, "config.json"));
|
||||
expect(readConfig()?.database.embeddedPostgresDataDir).toBe(path.join(fixture.spaceRoot, "db"));
|
||||
});
|
||||
|
||||
it("refuses to merge when destination data already exists", async () => {
|
||||
const fixture = createLegacyInstallFixture();
|
||||
writeText(path.join(fixture.spaceRoot, "db", "PG_VERSION"), "existing\n");
|
||||
|
||||
await expect(migrateDefaultSpaceInstall({
|
||||
serverRunningCheck: async () => false,
|
||||
})).rejects.toThrow(/destination paths already exist:[\s\S]*db/);
|
||||
|
||||
expect(fs.readFileSync(path.join(fixture.instanceRoot, "db", "PG_VERSION"), "utf8")).toBe("17\n");
|
||||
expect(fs.readFileSync(path.join(fixture.spaceRoot, "db", "PG_VERSION"), "utf8")).toBe("existing\n");
|
||||
});
|
||||
|
||||
it("refuses to migrate when the server appears to be running", async () => {
|
||||
const fixture = createLegacyInstallFixture();
|
||||
|
||||
await expect(migrateDefaultSpaceInstall({
|
||||
serverRunningCheck: async () => true,
|
||||
})).rejects.toThrow(/server appears to be running/);
|
||||
|
||||
expect(fs.existsSync(path.join(fixture.instanceRoot, "config.json"))).toBe(true);
|
||||
expect(fs.existsSync(path.join(fixture.spaceRoot, "config.json"))).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -3,6 +3,7 @@ import { printAcpxStreamEvent } from "@paperclipai/adapter-acpx-local/cli";
|
||||
import { printClaudeStreamEvent } from "@paperclipai/adapter-claude-local/cli";
|
||||
import { printCodexStreamEvent } from "@paperclipai/adapter-codex-local/cli";
|
||||
import { printCursorStreamEvent } from "@paperclipai/adapter-cursor-local/cli";
|
||||
import { printCursorCloudEvent } from "@paperclipai/adapter-cursor-cloud/cli";
|
||||
import { printGeminiStreamEvent } from "@paperclipai/adapter-gemini-local/cli";
|
||||
import { printOpenCodeStreamEvent } from "@paperclipai/adapter-opencode-local/cli";
|
||||
import { printPiStreamEvent } from "@paperclipai/adapter-pi-local/cli";
|
||||
@@ -40,6 +41,11 @@ const cursorLocalCLIAdapter: CLIAdapterModule = {
|
||||
formatStdoutEvent: printCursorStreamEvent,
|
||||
};
|
||||
|
||||
const cursorCloudCLIAdapter: CLIAdapterModule = {
|
||||
type: "cursor_cloud",
|
||||
formatStdoutEvent: printCursorCloudEvent,
|
||||
};
|
||||
|
||||
const geminiLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "gemini_local",
|
||||
formatStdoutEvent: printGeminiStreamEvent,
|
||||
@@ -58,6 +64,7 @@ const adaptersByType = new Map<string, CLIAdapterModule>(
|
||||
openCodeLocalCLIAdapter,
|
||||
piLocalCLIAdapter,
|
||||
cursorLocalCLIAdapter,
|
||||
cursorCloudCLIAdapter,
|
||||
geminiLocalCLIAdapter,
|
||||
openclawGatewayCLIAdapter,
|
||||
processCLIAdapter,
|
||||
|
||||
@@ -5,6 +5,9 @@ import type { PaperclipConfig } from "../config/schema.js";
|
||||
import type { CheckResult } from "./index.js";
|
||||
import { resolveRuntimeLikePath } from "./path-resolver.js";
|
||||
|
||||
const AWS_CREDENTIAL_SOURCE_HINT =
|
||||
"Provide AWS runtime credentials through the AWS SDK default credential chain: IAM role/workload identity, AWS_PROFILE/SSO/shared credentials, web identity, container/instance metadata, or short-lived shell credentials";
|
||||
|
||||
function decodeMasterKey(raw: string): Buffer | null {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) return null;
|
||||
@@ -47,13 +50,16 @@ function withStrictModeNote(
|
||||
|
||||
export function secretsCheck(config: PaperclipConfig, configPath?: string): CheckResult {
|
||||
const provider = config.secrets.provider;
|
||||
if (provider === "aws_secrets_manager") {
|
||||
return withStrictModeNote(awsSecretsManagerCheck(), config);
|
||||
}
|
||||
if (provider !== "local_encrypted") {
|
||||
return {
|
||||
name: "Secrets adapter",
|
||||
status: "fail",
|
||||
message: `${provider} is configured, but this build only supports local_encrypted`,
|
||||
message: `${provider} is configured, but this build only supports local_encrypted and aws_secrets_manager`,
|
||||
canRepair: false,
|
||||
repairHint: "Run `paperclipai configure --section secrets` and set provider to local_encrypted",
|
||||
repairHint: "Run `paperclipai configure --section secrets` and choose local_encrypted or aws_secrets_manager",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -135,12 +141,100 @@ export function secretsCheck(config: PaperclipConfig, configPath?: string): Chec
|
||||
};
|
||||
}
|
||||
|
||||
const keyMode = fs.statSync(keyFilePath).mode & 0o777;
|
||||
const permissionWarning =
|
||||
(keyMode & 0o077) !== 0
|
||||
? `; key file permissions are ${keyMode.toString(8)} (run chmod 600 ${keyFilePath})`
|
||||
: "";
|
||||
|
||||
return withStrictModeNote(
|
||||
{
|
||||
name: "Secrets adapter",
|
||||
status: "pass",
|
||||
message: `Local encrypted provider configured with key file ${keyFilePath}`,
|
||||
status: permissionWarning ? "warn" : "pass",
|
||||
message: `Local encrypted provider configured with key file ${keyFilePath}${permissionWarning}`,
|
||||
repairHint: permissionWarning
|
||||
? "Restrict the local encrypted secrets key file to owner read/write permissions"
|
||||
: undefined,
|
||||
},
|
||||
config,
|
||||
);
|
||||
}
|
||||
|
||||
function awsSecretsManagerCheck(): CheckResult {
|
||||
const missingConfig = missingAwsSecretsManagerConfig();
|
||||
if (missingConfig.length > 0) {
|
||||
return {
|
||||
name: "Secrets adapter",
|
||||
status: "fail",
|
||||
message: `AWS Secrets Manager provider is missing non-secret config: ${missingConfig.join(", ")}`,
|
||||
canRepair: false,
|
||||
repairHint:
|
||||
`Set ${missingConfig.join(", ")} in the Paperclip server runtime. ${AWS_CREDENTIAL_SOURCE_HINT}. Do not store AWS root credentials or long-lived IAM user keys in Paperclip secrets.`,
|
||||
};
|
||||
}
|
||||
|
||||
const staticEnvCredentials =
|
||||
process.env.AWS_ACCESS_KEY_ID?.trim() && process.env.AWS_SECRET_ACCESS_KEY?.trim();
|
||||
const credentialSource = detectedAwsCredentialSources().join(", ");
|
||||
const message =
|
||||
`AWS Secrets Manager provider configured for deployment ${process.env.PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID}; ` +
|
||||
`runtime credentials source: ${credentialSource || "AWS SDK default credential chain"}`;
|
||||
|
||||
if (staticEnvCredentials) {
|
||||
return {
|
||||
name: "Secrets adapter",
|
||||
status: "warn",
|
||||
message,
|
||||
canRepair: false,
|
||||
repairHint:
|
||||
"AWS static environment credentials are visible. Use only short-lived shell credentials locally; prefer IAM role/workload identity for hosted deployments and never store AWS access keys in Paperclip company secrets.",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
name: "Secrets adapter",
|
||||
status: "pass",
|
||||
message,
|
||||
};
|
||||
}
|
||||
|
||||
function missingAwsSecretsManagerConfig(): string[] {
|
||||
const missing: string[] = [];
|
||||
if (
|
||||
!(
|
||||
process.env.PAPERCLIP_SECRETS_AWS_REGION?.trim() ||
|
||||
process.env.AWS_REGION?.trim() ||
|
||||
process.env.AWS_DEFAULT_REGION?.trim()
|
||||
)
|
||||
) {
|
||||
missing.push("PAPERCLIP_SECRETS_AWS_REGION or AWS_REGION/AWS_DEFAULT_REGION");
|
||||
}
|
||||
if (!process.env.PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID?.trim()) {
|
||||
missing.push("PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID");
|
||||
}
|
||||
if (!process.env.PAPERCLIP_SECRETS_AWS_KMS_KEY_ID?.trim()) {
|
||||
missing.push("PAPERCLIP_SECRETS_AWS_KMS_KEY_ID");
|
||||
}
|
||||
return missing;
|
||||
}
|
||||
|
||||
function detectedAwsCredentialSources(): string[] {
|
||||
const sources: string[] = [];
|
||||
if (process.env.AWS_PROFILE?.trim()) sources.push("AWS_PROFILE/shared config");
|
||||
if (process.env.AWS_ACCESS_KEY_ID?.trim() && process.env.AWS_SECRET_ACCESS_KEY?.trim()) {
|
||||
sources.push("temporary AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY environment credentials");
|
||||
}
|
||||
if (process.env.AWS_WEB_IDENTITY_TOKEN_FILE?.trim() && process.env.AWS_ROLE_ARN?.trim()) {
|
||||
sources.push("AWS web identity token");
|
||||
}
|
||||
if (
|
||||
process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI?.trim() ||
|
||||
process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI?.trim()
|
||||
) {
|
||||
sources.push("AWS container credentials endpoint");
|
||||
}
|
||||
if (process.env.AWS_SHARED_CREDENTIALS_FILE?.trim() || process.env.AWS_CONFIG_FILE?.trim()) {
|
||||
sources.push("custom AWS shared credentials/config file");
|
||||
}
|
||||
return sources;
|
||||
}
|
||||
|
||||
501
cli/src/commands/client/secrets.ts
Normal file
@@ -0,0 +1,501 @@
|
||||
import { Command } from "commander";
|
||||
import pc from "picocolors";
|
||||
import type {
|
||||
Agent,
|
||||
AgentEnvConfig,
|
||||
CompanyPortabilityEnvInput,
|
||||
CompanyPortabilityExportPreviewResult,
|
||||
CompanyPortabilityInclude,
|
||||
CompanySecret,
|
||||
EnvBinding,
|
||||
SecretProvider,
|
||||
SecretProviderDescriptor,
|
||||
} from "@paperclipai/shared";
|
||||
import {
|
||||
addCommonClientOptions,
|
||||
formatInlineRecord,
|
||||
handleCommandError,
|
||||
printOutput,
|
||||
resolveCommandContext,
|
||||
type BaseClientOptions,
|
||||
} from "./common.js";
|
||||
|
||||
interface SecretListOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
}
|
||||
|
||||
interface SecretDeclarationsOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
include?: string;
|
||||
kind?: "all" | "secret" | "plain";
|
||||
}
|
||||
|
||||
interface SecretCreateOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
name?: string;
|
||||
key?: string;
|
||||
provider?: SecretProvider;
|
||||
value?: string;
|
||||
valueEnv?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
interface SecretLinkOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
name?: string;
|
||||
key?: string;
|
||||
provider?: SecretProvider;
|
||||
externalRef?: string;
|
||||
providerVersionRef?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
interface SecretDoctorOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
}
|
||||
|
||||
interface SecretMigrateInlineEnvOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
apply?: boolean;
|
||||
}
|
||||
|
||||
interface SecretProviderHealth {
|
||||
provider: SecretProvider;
|
||||
status: "ok" | "warn" | "error";
|
||||
message: string;
|
||||
warnings?: string[];
|
||||
backupGuidance?: string[];
|
||||
details?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
interface SecretProviderHealthResponse {
|
||||
providers: SecretProviderHealth[];
|
||||
}
|
||||
|
||||
export interface InlineSecretMigrationCandidate {
|
||||
agentId: string;
|
||||
agentName: string;
|
||||
envKey: string;
|
||||
secretName: string;
|
||||
existingSecretId: string | null;
|
||||
}
|
||||
|
||||
const SENSITIVE_ENV_KEY_RE =
|
||||
/(^token$|[-_]?token$|api[-_]?key|access[-_]?token|auth(?:_?token)?|authorization|bearer|secret|passwd|password|credential|jwt|private[-_]?key|cookie|connectionstring)/i;
|
||||
|
||||
const DEFAULT_DECLARATION_INCLUDE: CompanyPortabilityInclude = {
|
||||
company: true,
|
||||
agents: true,
|
||||
projects: true,
|
||||
issues: false,
|
||||
skills: false,
|
||||
};
|
||||
|
||||
export function parseSecretsInclude(input: string | undefined): CompanyPortabilityInclude {
|
||||
if (!input?.trim()) return { ...DEFAULT_DECLARATION_INCLUDE };
|
||||
const values = input.split(",").map((part) => part.trim().toLowerCase()).filter(Boolean);
|
||||
const include = {
|
||||
company: values.includes("company"),
|
||||
agents: values.includes("agents"),
|
||||
projects: values.includes("projects"),
|
||||
issues: values.includes("issues") || values.includes("tasks"),
|
||||
skills: values.includes("skills"),
|
||||
};
|
||||
if (!Object.values(include).some(Boolean)) {
|
||||
throw new Error("Invalid --include value. Use one or more of: company,agents,projects,issues,tasks,skills");
|
||||
}
|
||||
return include;
|
||||
}
|
||||
|
||||
export function isSensitiveEnvKey(key: string): boolean {
|
||||
return SENSITIVE_ENV_KEY_RE.test(key);
|
||||
}
|
||||
|
||||
export function toPlainEnvValue(binding: unknown): string | null {
|
||||
if (typeof binding === "string") return binding;
|
||||
if (typeof binding !== "object" || binding === null || Array.isArray(binding)) return null;
|
||||
const record = binding as Record<string, unknown>;
|
||||
if (record.type === "plain" && typeof record.value === "string") return record.value;
|
||||
return null;
|
||||
}
|
||||
|
||||
export function buildInlineMigrationSecretName(agentId: string, key: string): string {
|
||||
return `agent_${agentId.slice(0, 8)}_${key.toLowerCase()}`;
|
||||
}
|
||||
|
||||
export function collectInlineSecretMigrationCandidates(
|
||||
agents: Agent[],
|
||||
existingSecrets: CompanySecret[],
|
||||
): InlineSecretMigrationCandidate[] {
|
||||
const secretByName = new Map(existingSecrets.map((secret) => [secret.name, secret]));
|
||||
const candidates: InlineSecretMigrationCandidate[] = [];
|
||||
|
||||
for (const agent of agents) {
|
||||
const env = asRecord(agent.adapterConfig.env);
|
||||
if (!env) continue;
|
||||
for (const [envKey, binding] of Object.entries(env)) {
|
||||
if (!isSensitiveEnvKey(envKey)) continue;
|
||||
const plain = toPlainEnvValue(binding);
|
||||
if (plain === null || plain.trim().length === 0) continue;
|
||||
const secretName = buildInlineMigrationSecretName(agent.id, envKey);
|
||||
candidates.push({
|
||||
agentId: agent.id,
|
||||
agentName: agent.name,
|
||||
envKey,
|
||||
secretName,
|
||||
existingSecretId: secretByName.get(secretName)?.id ?? null,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return candidates;
|
||||
}
|
||||
|
||||
export function buildMigratedAgentEnv(
|
||||
env: Record<string, unknown>,
|
||||
secretIdByEnvKey: Map<string, string>,
|
||||
): AgentEnvConfig {
|
||||
const next: AgentEnvConfig = { ...(env as Record<string, EnvBinding>) };
|
||||
for (const [envKey, secretId] of secretIdByEnvKey) {
|
||||
next[envKey] = {
|
||||
type: "secret_ref",
|
||||
secretId,
|
||||
version: "latest",
|
||||
};
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function readValueFromOptions(opts: SecretCreateOptions): string {
|
||||
if (opts.value !== undefined && opts.valueEnv !== undefined) {
|
||||
throw new Error("Use only one of --value or --value-env.");
|
||||
}
|
||||
if (opts.valueEnv !== undefined) {
|
||||
const value = process.env[opts.valueEnv];
|
||||
if (!value) throw new Error(`Environment variable ${opts.valueEnv} is empty or unset.`);
|
||||
return value;
|
||||
}
|
||||
if (opts.value !== undefined) return opts.value;
|
||||
throw new Error("Secret value is required. Pass --value or --value-env.");
|
||||
}
|
||||
|
||||
function renderDeclaration(input: CompanyPortabilityEnvInput): Record<string, unknown> {
|
||||
const scope = input.agentSlug
|
||||
? `agent:${input.agentSlug}`
|
||||
: input.projectSlug
|
||||
? `project:${input.projectSlug}`
|
||||
: "company";
|
||||
return {
|
||||
key: input.key,
|
||||
scope,
|
||||
kind: input.kind,
|
||||
requirement: input.requirement,
|
||||
portability: input.portability,
|
||||
hasDefault: input.defaultValue !== null && input.defaultValue.length > 0,
|
||||
description: input.description,
|
||||
};
|
||||
}
|
||||
|
||||
function renderSecret(secret: CompanySecret): Record<string, unknown> {
|
||||
return {
|
||||
id: secret.id,
|
||||
name: secret.name,
|
||||
key: secret.key,
|
||||
provider: secret.provider,
|
||||
status: secret.status,
|
||||
managedMode: secret.managedMode,
|
||||
latestVersion: secret.latestVersion,
|
||||
externalRef: secret.externalRef ? "yes" : "no",
|
||||
};
|
||||
}
|
||||
|
||||
function printProviderHealth(rows: SecretProviderHealth[], json: boolean): void {
|
||||
if (json) {
|
||||
printOutput(rows, { json: true });
|
||||
return;
|
||||
}
|
||||
if (rows.length === 0) {
|
||||
printOutput([], { json: false });
|
||||
return;
|
||||
}
|
||||
for (const row of rows) {
|
||||
console.log(
|
||||
formatInlineRecord({
|
||||
id: row.provider,
|
||||
status: row.status,
|
||||
message: row.message,
|
||||
}),
|
||||
);
|
||||
for (const warning of row.warnings ?? []) {
|
||||
console.log(pc.yellow(`warning=${warning}`));
|
||||
}
|
||||
const missingConfig = asStringArray(row.details?.missingConfig);
|
||||
if (missingConfig.length > 0) {
|
||||
console.log(pc.dim(`missingConfig=${missingConfig.join(",")}`));
|
||||
}
|
||||
const credentialSource = typeof row.details?.credentialSource === "string"
|
||||
? row.details.credentialSource
|
||||
: null;
|
||||
if (credentialSource) {
|
||||
console.log(pc.dim(`credentialSource=${credentialSource}`));
|
||||
}
|
||||
const detectedCredentialSources = asStringArray(row.details?.detectedCredentialSources);
|
||||
if (detectedCredentialSources.length > 0) {
|
||||
console.log(pc.dim(`detectedCredentialSources=${detectedCredentialSources.join(",")}`));
|
||||
}
|
||||
for (const guidance of row.backupGuidance ?? []) {
|
||||
console.log(pc.dim(`backup=${guidance}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function asStringArray(value: unknown): string[] {
|
||||
return Array.isArray(value)
|
||||
? value.filter((entry): entry is string => typeof entry === "string" && entry.length > 0)
|
||||
: [];
|
||||
}
|
||||
|
||||
async function migrateInlineEnv(opts: SecretMigrateInlineEnvOptions): Promise<void> {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const companyId = ctx.companyId!;
|
||||
const agents = (await ctx.api.get<Agent[]>(`/api/companies/${companyId}/agents`)) ?? [];
|
||||
const secrets = (await ctx.api.get<CompanySecret[]>(`/api/companies/${companyId}/secrets`)) ?? [];
|
||||
const candidates = collectInlineSecretMigrationCandidates(agents, secrets);
|
||||
|
||||
if (!opts.apply) {
|
||||
printOutput(
|
||||
{
|
||||
apply: false,
|
||||
agentsToUpdate: new Set(candidates.map((candidate) => candidate.agentId)).size,
|
||||
secretsToCreate: candidates.filter((candidate) => !candidate.existingSecretId).length,
|
||||
secretsToRotate: candidates.filter((candidate) => candidate.existingSecretId).length,
|
||||
candidates,
|
||||
},
|
||||
{ json: ctx.json },
|
||||
);
|
||||
if (!ctx.json) {
|
||||
console.log(pc.dim("Re-run with --apply to create/rotate secrets and update agent env bindings."));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const createdOrRotated = new Map<string, string>();
|
||||
let createdSecrets = 0;
|
||||
let rotatedSecrets = 0;
|
||||
|
||||
for (const candidate of candidates) {
|
||||
const agent = agents.find((row) => row.id === candidate.agentId);
|
||||
const env = asRecord(agent?.adapterConfig.env);
|
||||
const value = env ? toPlainEnvValue(env[candidate.envKey]) : null;
|
||||
if (!value) continue;
|
||||
|
||||
if (candidate.existingSecretId) {
|
||||
await ctx.api.post(`/api/secrets/${candidate.existingSecretId}/rotate`, { value });
|
||||
createdOrRotated.set(`${candidate.agentId}:${candidate.envKey}`, candidate.existingSecretId);
|
||||
rotatedSecrets += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
const created = await ctx.api.post<CompanySecret>(`/api/companies/${companyId}/secrets`, {
|
||||
name: candidate.secretName,
|
||||
provider: "local_encrypted",
|
||||
value,
|
||||
description: `Migrated from agent ${candidate.agentId} env ${candidate.envKey}`,
|
||||
});
|
||||
if (!created) throw new Error(`Secret create returned no data for ${candidate.secretName}`);
|
||||
createdOrRotated.set(`${candidate.agentId}:${candidate.envKey}`, created.id);
|
||||
createdSecrets += 1;
|
||||
}
|
||||
|
||||
let updatedAgents = 0;
|
||||
for (const agent of agents) {
|
||||
const env = asRecord(agent.adapterConfig.env);
|
||||
if (!env) continue;
|
||||
const secretIdByEnvKey = new Map<string, string>();
|
||||
for (const [key] of Object.entries(env)) {
|
||||
const secretId = createdOrRotated.get(`${agent.id}:${key}`);
|
||||
if (secretId) secretIdByEnvKey.set(key, secretId);
|
||||
}
|
||||
if (secretIdByEnvKey.size === 0) continue;
|
||||
const adapterConfig = {
|
||||
...agent.adapterConfig,
|
||||
env: buildMigratedAgentEnv(env, secretIdByEnvKey),
|
||||
};
|
||||
await ctx.api.patch(`/api/agents/${agent.id}`, {
|
||||
adapterConfig,
|
||||
replaceAdapterConfig: true,
|
||||
});
|
||||
updatedAgents += 1;
|
||||
}
|
||||
|
||||
printOutput(
|
||||
{
|
||||
apply: true,
|
||||
updatedAgents,
|
||||
createdSecrets,
|
||||
rotatedSecrets,
|
||||
},
|
||||
{ json: ctx.json },
|
||||
);
|
||||
}
|
||||
|
||||
export function registerSecretCommands(program: Command): void {
|
||||
const secrets = program.command("secrets").description("Secret declaration and provider operations");
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("list")
|
||||
.description("List secret metadata for a company")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.action(async (opts: SecretListOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const rows = (await ctx.api.get<CompanySecret[]>(`/api/companies/${ctx.companyId}/secrets`)) ?? [];
|
||||
printOutput(ctx.json ? rows : rows.map(renderSecret), { json: ctx.json });
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("declarations")
|
||||
.description("List portable env declarations emitted by company export")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.option("--include <values>", "Comma-separated include set: company,agents,projects,issues,tasks,skills", "company,agents,projects")
|
||||
.option("--kind <kind>", "Filter declarations: all | secret | plain", "all")
|
||||
.action(async (opts: SecretDeclarationsOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const kind = opts.kind ?? "all";
|
||||
if (!["all", "secret", "plain"].includes(kind)) {
|
||||
throw new Error("Invalid --kind value. Use: all, secret, plain");
|
||||
}
|
||||
const preview = await ctx.api.post<CompanyPortabilityExportPreviewResult>(
|
||||
`/api/companies/${ctx.companyId}/exports/preview`,
|
||||
{ include: parseSecretsInclude(opts.include) },
|
||||
);
|
||||
const declarations = (preview?.manifest.envInputs ?? [])
|
||||
.filter((entry) => kind === "all" || entry.kind === kind);
|
||||
printOutput(ctx.json ? declarations : declarations.map(renderDeclaration), { json: ctx.json });
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("create")
|
||||
.description("Create a Paperclip-managed secret")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.requiredOption("--name <name>", "Secret display name")
|
||||
.option("--key <key>", "Portable secret key")
|
||||
.option("--provider <provider>", "Secret provider id")
|
||||
.option("--value <value>", "Secret value")
|
||||
.option("--value-env <name>", "Read secret value from an environment variable")
|
||||
.option("--description <text>", "Description")
|
||||
.action(async (opts: SecretCreateOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const created = await ctx.api.post<CompanySecret>(`/api/companies/${ctx.companyId}/secrets`, {
|
||||
name: opts.name,
|
||||
key: opts.key,
|
||||
provider: opts.provider,
|
||||
value: readValueFromOptions(opts),
|
||||
description: opts.description,
|
||||
});
|
||||
printOutput(ctx.json ? created : renderSecret(created!), { json: ctx.json });
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("link")
|
||||
.description("Link an external provider-owned secret without storing its value in Paperclip")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.requiredOption("--name <name>", "Secret display name")
|
||||
.requiredOption("--provider <provider>", "Secret provider id")
|
||||
.requiredOption("--external-ref <ref>", "Provider secret ARN/name/path/reference")
|
||||
.option("--key <key>", "Portable secret key")
|
||||
.option("--provider-version-ref <ref>", "Provider version id or label")
|
||||
.option("--description <text>", "Description")
|
||||
.action(async (opts: SecretLinkOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const created = await ctx.api.post<CompanySecret>(`/api/companies/${ctx.companyId}/secrets`, {
|
||||
name: opts.name,
|
||||
key: opts.key,
|
||||
provider: opts.provider,
|
||||
managedMode: "external_reference",
|
||||
externalRef: opts.externalRef,
|
||||
providerVersionRef: opts.providerVersionRef,
|
||||
description: opts.description,
|
||||
});
|
||||
printOutput(ctx.json ? created : renderSecret(created!), { json: ctx.json });
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("doctor")
|
||||
.description("Run secret provider health checks through the Paperclip API")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.action(async (opts: SecretDoctorOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const health = await ctx.api.get<SecretProviderHealthResponse>(
|
||||
`/api/companies/${ctx.companyId}/secret-providers/health`,
|
||||
);
|
||||
printProviderHealth(health?.providers ?? [], ctx.json);
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("providers")
|
||||
.description("List configured secret provider descriptors")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.action(async (opts: SecretDoctorOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const rows = (await ctx.api.get<SecretProviderDescriptor[]>(
|
||||
`/api/companies/${ctx.companyId}/secret-providers`,
|
||||
)) ?? [];
|
||||
printOutput(rows, { json: ctx.json });
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
secrets
|
||||
.command("migrate-inline-env")
|
||||
.description("Migrate inline sensitive agent env values into secret references")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.option("--apply", "Persist changes; default is a dry run", false)
|
||||
.action(async (opts: SecretMigrateInlineEnvOptions) => {
|
||||
try {
|
||||
await migrateInlineEnv(opts);
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
);
|
||||
}
|
||||
@@ -15,7 +15,6 @@ import {
|
||||
resolveDefaultLogsDir,
|
||||
resolvePaperclipInstanceId,
|
||||
} from "../config/home.js";
|
||||
import { ensureDefaultSpaceRegistry } from "@paperclipai/shared/space-paths";
|
||||
import { printPaperclipCliBanner } from "../utils/banner.js";
|
||||
|
||||
type Section = "llm" | "database" | "logging" | "server" | "storage" | "secrets";
|
||||
@@ -178,9 +177,6 @@ export async function configure(opts: {
|
||||
config.$meta.updatedAt = new Date().toISOString();
|
||||
config.$meta.source = "configure";
|
||||
|
||||
if (configPath === resolveConfigPath()) {
|
||||
ensureDefaultSpaceRegistry({ source: "configure" });
|
||||
}
|
||||
writeConfig(config, opts.config);
|
||||
p.log.success(`${SECTION_LABELS[section]} configuration updated.`);
|
||||
|
||||
|
||||
@@ -36,7 +36,6 @@ import {
|
||||
resolveDefaultLogsDir,
|
||||
resolvePaperclipInstanceId,
|
||||
} from "../config/home.js";
|
||||
import { ensureDefaultSpaceRegistry } from "@paperclipai/shared/space-paths";
|
||||
import { bootstrapCeoInvite } from "./auth-bootstrap-ceo.js";
|
||||
import { printPaperclipCliBanner } from "../utils/banner.js";
|
||||
import {
|
||||
@@ -334,7 +333,7 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
const instance = describeLocalInstancePaths(resolvePaperclipInstanceId());
|
||||
p.log.message(
|
||||
pc.dim(
|
||||
`Local home: ${instance.homeDir} | instance: ${instance.instanceId} | space: ${instance.activeSpaceId} | config: ${configPath}`,
|
||||
`Local home: ${instance.homeDir} | instance: ${instance.instanceId} | config: ${configPath}`,
|
||||
),
|
||||
);
|
||||
|
||||
@@ -626,10 +625,6 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
p.log.message(pc.dim(`Using existing local secrets key file at ${keyResult.path}`));
|
||||
}
|
||||
|
||||
if (configPath === resolveConfigPath()) {
|
||||
ensureDefaultSpaceRegistry({ source: "onboard" });
|
||||
}
|
||||
|
||||
writeConfig(config, opts.config);
|
||||
|
||||
if (tc) trackInstallCompleted(tc, {
|
||||
|
||||
@@ -1,267 +0,0 @@
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { Command } from "commander";
|
||||
import pc from "picocolors";
|
||||
import {
|
||||
DEFAULT_PAPERCLIP_SPACE_ID,
|
||||
DEFAULT_SPACE_ADAPTER_LOCAL_PATH_NAMES,
|
||||
DEFAULT_SPACE_OWNED_PATH_NAMES,
|
||||
createDefaultSpaceRegistry,
|
||||
expandHomePrefix,
|
||||
isPaperclipRuntimeConfig,
|
||||
resolvePaperclipInstanceConfigPath,
|
||||
resolvePaperclipInstanceId,
|
||||
resolvePaperclipInstanceRoot,
|
||||
resolvePaperclipSpacesRoot,
|
||||
} from "@paperclipai/shared/space-paths";
|
||||
|
||||
type JsonObject = Record<string, unknown>;
|
||||
|
||||
export interface DefaultSpaceMigrationPlan {
|
||||
instanceId: string;
|
||||
sourceRoot: string;
|
||||
destinationRoot: string;
|
||||
legacyRuntimeConfig: boolean;
|
||||
sourcePathNames: string[];
|
||||
conflicts: Array<{ pathName: string; sourcePath: string; destinationPath: string }>;
|
||||
}
|
||||
|
||||
export interface DefaultSpaceMigrationResult {
|
||||
status: "migrated" | "noop" | "dry_run";
|
||||
plan: DefaultSpaceMigrationPlan;
|
||||
movedPaths: string[];
|
||||
markerPath: string | null;
|
||||
}
|
||||
|
||||
export interface MigrateDefaultSpaceOptions {
|
||||
instanceId?: string;
|
||||
dryRun?: boolean;
|
||||
skipServerCheck?: boolean;
|
||||
serverRunningCheck?: (plan: DefaultSpaceMigrationPlan) => Promise<boolean>;
|
||||
}
|
||||
|
||||
const MIGRATION_PATH_NAMES = [
|
||||
...DEFAULT_SPACE_OWNED_PATH_NAMES,
|
||||
...DEFAULT_SPACE_ADAPTER_LOCAL_PATH_NAMES,
|
||||
] as const;
|
||||
|
||||
function readJsonIfPresent(filePath: string): unknown | null {
|
||||
if (!fs.existsSync(filePath)) return null;
|
||||
try {
|
||||
return JSON.parse(fs.readFileSync(filePath, "utf8"));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function isObject(value: unknown): value is JsonObject {
|
||||
return typeof value === "object" && value !== null && !Array.isArray(value);
|
||||
}
|
||||
|
||||
function isPathWithin(rootPath: string, candidatePath: string): boolean {
|
||||
const root = path.resolve(rootPath);
|
||||
const candidate = path.resolve(candidatePath);
|
||||
const relative = path.relative(root, candidate);
|
||||
return relative === "" || (!relative.startsWith("..") && !path.isAbsolute(relative));
|
||||
}
|
||||
|
||||
function rewriteSpaceRootString(value: string, sourceRoot: string, destinationRoot: string): string {
|
||||
const resolved = path.resolve(expandHomePrefix(value));
|
||||
if (!isPathWithin(sourceRoot, resolved)) return value;
|
||||
const relative = path.relative(path.resolve(sourceRoot), resolved);
|
||||
return path.resolve(destinationRoot, relative);
|
||||
}
|
||||
|
||||
function rewriteConfigPaths(value: unknown, sourceRoot: string, destinationRoot: string): unknown {
|
||||
if (typeof value === "string") return rewriteSpaceRootString(value, sourceRoot, destinationRoot);
|
||||
if (Array.isArray(value)) return value.map((entry) => rewriteConfigPaths(entry, sourceRoot, destinationRoot));
|
||||
if (!isObject(value)) return value;
|
||||
return Object.fromEntries(
|
||||
Object.entries(value).map(([key, entry]) => [key, rewriteConfigPaths(entry, sourceRoot, destinationRoot)]),
|
||||
);
|
||||
}
|
||||
|
||||
function writeJson(filePath: string, value: unknown, mode = 0o600): void {
|
||||
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
fs.writeFileSync(filePath, `${JSON.stringify(value, null, 2)}\n`, { mode });
|
||||
}
|
||||
|
||||
function atomicWriteJson(filePath: string, value: unknown, mode = 0o600): void {
|
||||
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
const tmpPath = path.join(path.dirname(filePath), `.${path.basename(filePath)}.${process.pid}.tmp`);
|
||||
writeJson(tmpPath, value, mode);
|
||||
fs.renameSync(tmpPath, filePath);
|
||||
}
|
||||
|
||||
export function buildDefaultSpaceMigrationPlan(options: Pick<MigrateDefaultSpaceOptions, "instanceId"> = {}): DefaultSpaceMigrationPlan {
|
||||
const instanceId = resolvePaperclipInstanceId(options.instanceId);
|
||||
const sourceRoot = resolvePaperclipInstanceRoot({ instanceId });
|
||||
const destinationRoot = path.resolve(resolvePaperclipSpacesRoot({ instanceId }), DEFAULT_PAPERCLIP_SPACE_ID);
|
||||
const instanceConfigPath = resolvePaperclipInstanceConfigPath({ instanceId });
|
||||
const legacyRuntimeConfig = isPaperclipRuntimeConfig(readJsonIfPresent(instanceConfigPath));
|
||||
|
||||
const sourcePathNames = MIGRATION_PATH_NAMES.filter((pathName) => {
|
||||
if (pathName === "config.json" && !legacyRuntimeConfig) return false;
|
||||
return fs.existsSync(path.join(sourceRoot, pathName));
|
||||
});
|
||||
|
||||
const conflicts = sourcePathNames
|
||||
.map((pathName) => ({
|
||||
pathName,
|
||||
sourcePath: path.join(sourceRoot, pathName),
|
||||
destinationPath: path.join(destinationRoot, pathName),
|
||||
}))
|
||||
.filter((entry) => fs.existsSync(entry.destinationPath));
|
||||
|
||||
return {
|
||||
instanceId,
|
||||
sourceRoot,
|
||||
destinationRoot,
|
||||
legacyRuntimeConfig,
|
||||
sourcePathNames,
|
||||
conflicts,
|
||||
};
|
||||
}
|
||||
|
||||
function readServerEndpointFromConfig(configPath: string): { host: string; port: number } | null {
|
||||
const parsed = readJsonIfPresent(configPath);
|
||||
if (!isObject(parsed)) return null;
|
||||
const server = parsed.server;
|
||||
if (!isObject(server)) return null;
|
||||
const rawHost = typeof server.host === "string" && server.host.trim().length > 0
|
||||
? server.host.trim()
|
||||
: "127.0.0.1";
|
||||
const host = rawHost === "0.0.0.0" || rawHost === "::" ? "127.0.0.1" : rawHost;
|
||||
const port = typeof server.port === "number" && Number.isInteger(server.port) ? server.port : 3100;
|
||||
if (port <= 0 || port > 65535) return null;
|
||||
return { host, port };
|
||||
}
|
||||
|
||||
export async function defaultServerRunningCheck(plan: DefaultSpaceMigrationPlan): Promise<boolean> {
|
||||
const endpoint = readServerEndpointFromConfig(path.join(plan.sourceRoot, "config.json"));
|
||||
if (!endpoint) return false;
|
||||
|
||||
const host = endpoint.host.includes(":") ? `[${endpoint.host}]` : endpoint.host;
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 500);
|
||||
try {
|
||||
const response = await fetch(`http://${host}:${endpoint.port}/api/health`, {
|
||||
signal: controller.signal,
|
||||
});
|
||||
return response.status >= 200 && response.status < 500;
|
||||
} catch {
|
||||
return false;
|
||||
} finally {
|
||||
clearTimeout(timeout);
|
||||
}
|
||||
}
|
||||
|
||||
function assertMigrationPreflight(plan: DefaultSpaceMigrationPlan): void {
|
||||
if (plan.conflicts.length === 0) return;
|
||||
const details = plan.conflicts
|
||||
.map((conflict) => `- ${conflict.pathName}: ${conflict.sourcePath} -> ${conflict.destinationPath}`)
|
||||
.join("\n");
|
||||
throw new Error(`Cannot migrate default space because destination paths already exist:\n${details}`);
|
||||
}
|
||||
|
||||
function moveConfigWithRewrittenPaths(sourcePath: string, destinationPath: string, sourceRoot: string, destinationRoot: string): void {
|
||||
const parsed = JSON.parse(fs.readFileSync(sourcePath, "utf8")) as unknown;
|
||||
const rewritten = rewriteConfigPaths(parsed, sourceRoot, destinationRoot);
|
||||
atomicWriteJson(destinationPath, rewritten);
|
||||
fs.unlinkSync(sourcePath);
|
||||
}
|
||||
|
||||
function writeMigrationMarker(plan: DefaultSpaceMigrationPlan, movedPaths: string[]): string {
|
||||
const migratedAt = new Date().toISOString();
|
||||
const registry = createDefaultSpaceRegistry("system");
|
||||
registry.$meta.updatedAt = migratedAt;
|
||||
registry.defaultSpaceMigration = {
|
||||
migratedAt,
|
||||
sourceRoot: plan.sourceRoot,
|
||||
destinationRoot: plan.destinationRoot,
|
||||
movedPaths,
|
||||
};
|
||||
|
||||
const markerPath = path.join(plan.sourceRoot, "config.json");
|
||||
writeJson(markerPath, registry);
|
||||
return markerPath;
|
||||
}
|
||||
|
||||
export async function migrateDefaultSpaceInstall(
|
||||
options: MigrateDefaultSpaceOptions = {},
|
||||
): Promise<DefaultSpaceMigrationResult> {
|
||||
const plan = buildDefaultSpaceMigrationPlan(options);
|
||||
|
||||
if (plan.sourcePathNames.length === 0) {
|
||||
return { status: "noop", plan, movedPaths: [], markerPath: null };
|
||||
}
|
||||
|
||||
assertMigrationPreflight(plan);
|
||||
|
||||
const serverRunningCheck = options.serverRunningCheck ?? defaultServerRunningCheck;
|
||||
if (!options.skipServerCheck && await serverRunningCheck(plan)) {
|
||||
throw new Error(
|
||||
`Cannot migrate default space while a Paperclip server appears to be running for ${plan.sourceRoot}. Stop Paperclip and retry.`,
|
||||
);
|
||||
}
|
||||
|
||||
if (options.dryRun) {
|
||||
return { status: "dry_run", plan, movedPaths: plan.sourcePathNames, markerPath: null };
|
||||
}
|
||||
|
||||
fs.mkdirSync(plan.destinationRoot, { recursive: true });
|
||||
const movedPaths: string[] = [];
|
||||
|
||||
for (const pathName of plan.sourcePathNames) {
|
||||
const sourcePath = path.join(plan.sourceRoot, pathName);
|
||||
const destinationPath = path.join(plan.destinationRoot, pathName);
|
||||
fs.mkdirSync(path.dirname(destinationPath), { recursive: true });
|
||||
|
||||
if (pathName === "config.json") {
|
||||
moveConfigWithRewrittenPaths(sourcePath, destinationPath, plan.sourceRoot, plan.destinationRoot);
|
||||
} else {
|
||||
fs.renameSync(sourcePath, destinationPath);
|
||||
}
|
||||
movedPaths.push(pathName);
|
||||
}
|
||||
|
||||
const markerPath = writeMigrationMarker(plan, movedPaths);
|
||||
return { status: "migrated", plan, movedPaths, markerPath };
|
||||
}
|
||||
|
||||
function printMigrationResult(result: DefaultSpaceMigrationResult): void {
|
||||
if (result.status === "noop") {
|
||||
console.log(pc.dim(`No legacy default-space data found under ${result.plan.sourceRoot}.`));
|
||||
return;
|
||||
}
|
||||
|
||||
const verb = result.status === "dry_run" ? "Would migrate" : "Migrated";
|
||||
console.log(`${pc.green(verb)} default space:`);
|
||||
console.log(` ${pc.dim("from")} ${result.plan.sourceRoot}`);
|
||||
console.log(` ${pc.dim("to")} ${result.plan.destinationRoot}`);
|
||||
for (const pathName of result.movedPaths) {
|
||||
console.log(` ${pc.dim("-")} ${pathName}`);
|
||||
}
|
||||
if (result.markerPath) {
|
||||
console.log(` ${pc.dim("marker")} ${result.markerPath}`);
|
||||
}
|
||||
}
|
||||
|
||||
export function registerSpacesCommands(program: Command): void {
|
||||
const spaces = program.command("spaces").description("Manage local Paperclip spaces");
|
||||
|
||||
spaces
|
||||
.command("migrate-default")
|
||||
.description("Offline migration from legacy root-shaped instance data into spaces/default")
|
||||
.option("-i, --instance <id>", "Local instance id (default: default)")
|
||||
.option("--dry-run", "Show the migration plan without moving files", false)
|
||||
.option("--skip-server-check", "Skip the local /api/health preflight", false)
|
||||
.action(async (opts: { instance?: string; dryRun?: boolean; skipServerCheck?: boolean }) => {
|
||||
const result = await migrateDefaultSpaceInstall({
|
||||
instanceId: opts.instance,
|
||||
dryRun: opts.dryRun,
|
||||
skipServerCheck: opts.skipServerCheck,
|
||||
});
|
||||
printMigrationResult(result);
|
||||
});
|
||||
}
|
||||
@@ -11,26 +11,19 @@ import {
|
||||
resolvePaperclipHomeDir,
|
||||
resolvePaperclipInstanceId,
|
||||
resolvePaperclipInstanceRoot as resolveSharedPaperclipInstanceRoot,
|
||||
resolvePaperclipSpaceId,
|
||||
resolvePaperclipSpaceRoot as resolveSharedPaperclipSpaceRoot,
|
||||
} from "@paperclipai/shared/space-paths";
|
||||
} from "@paperclipai/shared/home-paths";
|
||||
|
||||
export {
|
||||
expandHomePrefix,
|
||||
resolveHomeAwarePath,
|
||||
resolvePaperclipHomeDir,
|
||||
resolvePaperclipInstanceId,
|
||||
resolvePaperclipSpaceId,
|
||||
};
|
||||
|
||||
export function resolvePaperclipInstanceRoot(instanceId?: string): string {
|
||||
return resolveSharedPaperclipInstanceRoot({ instanceId });
|
||||
}
|
||||
|
||||
export function resolvePaperclipSpaceRoot(instanceId?: string): string {
|
||||
return resolveSharedPaperclipSpaceRoot({ instanceId });
|
||||
}
|
||||
|
||||
export function resolveDefaultConfigPath(instanceId?: string): string {
|
||||
return resolvePaperclipConfigPathForInstance({ instanceId });
|
||||
}
|
||||
@@ -66,17 +59,10 @@ export function resolveDefaultBackupDir(instanceId?: string): string {
|
||||
export function describeLocalInstancePaths(instanceId?: string) {
|
||||
const resolvedInstanceId = resolvePaperclipInstanceId(instanceId);
|
||||
const instanceRoot = resolvePaperclipInstanceRoot(resolvedInstanceId);
|
||||
const activeSpaceId = resolvePaperclipSpaceId({ instanceId: resolvedInstanceId });
|
||||
const activeSpaceRoot = resolveSharedPaperclipSpaceRoot({
|
||||
instanceId: resolvedInstanceId,
|
||||
spaceId: activeSpaceId,
|
||||
});
|
||||
return {
|
||||
homeDir: resolvePaperclipHomeDir(),
|
||||
instanceId: resolvedInstanceId,
|
||||
instanceRoot,
|
||||
activeSpaceId,
|
||||
activeSpaceRoot,
|
||||
configPath: resolveDefaultConfigPath(resolvedInstanceId),
|
||||
embeddedPostgresDataDir: resolveDefaultEmbeddedPostgresDir(resolvedInstanceId),
|
||||
backupDir: resolveDefaultBackupDir(resolvedInstanceId),
|
||||
|
||||
@@ -18,13 +18,13 @@ import { registerActivityCommands } from "./commands/client/activity.js";
|
||||
import { registerDashboardCommands } from "./commands/client/dashboard.js";
|
||||
import { registerRoutineCommands } from "./commands/routines.js";
|
||||
import { registerFeedbackCommands } from "./commands/client/feedback.js";
|
||||
import { registerSecretCommands } from "./commands/client/secrets.js";
|
||||
import { applyDataDirOverride, type DataDirOptionLike } from "./config/data-dir.js";
|
||||
import { loadPaperclipEnvFile } from "./config/env.js";
|
||||
import { initTelemetryFromConfigFile, flushTelemetry } from "./telemetry.js";
|
||||
import { registerWorktreeCommands } from "./commands/worktree.js";
|
||||
import { registerPluginCommands } from "./commands/client/plugin.js";
|
||||
import { registerClientAuthCommands } from "./commands/client/auth.js";
|
||||
import { registerSpacesCommands } from "./commands/spaces.js";
|
||||
import { cliVersion } from "./version.js";
|
||||
|
||||
const program = new Command();
|
||||
@@ -148,10 +148,10 @@ registerActivityCommands(program);
|
||||
registerDashboardCommands(program);
|
||||
registerRoutineCommands(program);
|
||||
registerFeedbackCommands(program);
|
||||
registerSecretCommands(program);
|
||||
registerWorktreeCommands(program);
|
||||
registerEnvLabCommands(program);
|
||||
registerPluginCommands(program);
|
||||
registerSpacesCommands(program);
|
||||
|
||||
const auth = program.command("auth").description("Authentication and bootstrap utilities");
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ export async function promptSecrets(current?: SecretsConfig): Promise<SecretsCon
|
||||
{
|
||||
value: "aws_secrets_manager" as const,
|
||||
label: "AWS Secrets Manager",
|
||||
hint: "requires external adapter integration",
|
||||
hint: "requires runtime AWS credentials and provider env config",
|
||||
},
|
||||
{
|
||||
value: "gcp_secret_manager" as const,
|
||||
@@ -84,7 +84,9 @@ export async function promptSecrets(current?: SecretsConfig): Promise<SecretsCon
|
||||
|
||||
if (provider !== "local_encrypted") {
|
||||
p.note(
|
||||
`${provider} is not fully wired in this build yet. Keep local_encrypted unless you are actively implementing that adapter.`,
|
||||
provider === "aws_secrets_manager"
|
||||
? "AWS credentials must come from the Paperclip server runtime (IAM role/workload identity, AWS_PROFILE/SSO/shared credentials, or short-lived shell env), not from Paperclip company secrets."
|
||||
: `${provider} is not fully wired in this build yet. Keep local_encrypted unless you are actively implementing that adapter.`,
|
||||
"Heads up",
|
||||
);
|
||||
}
|
||||
|
||||
109
doc/CLI.md
@@ -143,6 +143,32 @@ pnpm paperclipai agent local-cli codexcoder --company-id <company-id>
|
||||
pnpm paperclipai agent local-cli claudecoder --company-id <company-id>
|
||||
```
|
||||
|
||||
## Secrets Commands
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets list --company-id <company-id>
|
||||
pnpm paperclipai secrets declarations --company-id <company-id> [--include agents,projects] [--kind secret]
|
||||
pnpm paperclipai secrets create --company-id <company-id> --name anthropic-api-key --value-env ANTHROPIC_API_KEY
|
||||
pnpm paperclipai secrets link --company-id <company-id> --name prod-stripe-key --provider aws_secrets_manager --external-ref <provider-ref>
|
||||
pnpm paperclipai secrets doctor --company-id <company-id>
|
||||
pnpm paperclipai secrets migrate-inline-env --company-id <company-id> [--apply]
|
||||
```
|
||||
|
||||
Secret listing and declarations never print secret values. `create` accepts
|
||||
`--value-env` so shell history does not capture the value. `link` records
|
||||
provider-owned references without copying the secret value into Paperclip.
|
||||
For AWS-backed secrets, `secrets doctor` reports missing non-secret provider
|
||||
env and the expected AWS SDK runtime credential source; do not store AWS
|
||||
bootstrap credentials in Paperclip secrets.
|
||||
|
||||
Per-company provider vaults (multiple vault instances per provider, default
|
||||
vault selection, coming-soon GCP/Vault) are configured from the board UI under
|
||||
`Company Settings → Secrets → Provider vaults` or through
|
||||
`/api/companies/{companyId}/secret-provider-configs`. There is no CLI surface
|
||||
for vault management today. See the
|
||||
[secrets deploy guide](../docs/deploy/secrets.md#provider-vaults) and
|
||||
[API reference](../docs/api/secrets.md#provider-vaults) for the contract.
|
||||
|
||||
## Approval Commands
|
||||
|
||||
```sh
|
||||
@@ -178,84 +204,41 @@ pnpm paperclipai heartbeat run --agent-id <agent-id> [--api-base http://localhos
|
||||
|
||||
## Local Storage Defaults
|
||||
|
||||
Local Paperclip data is split between an instance root and one or more spaces inside it. The default install creates a single space called `default` under `spaces/default`.
|
||||
Local Paperclip data lives under the selected instance root. `PAPERCLIP_HOME` chooses the home directory and `PAPERCLIP_INSTANCE_ID` chooses the instance.
|
||||
|
||||
```text
|
||||
~/.paperclip/ # PAPERCLIP_HOME
|
||||
└── instances/
|
||||
└── default/ # instance root (PAPERCLIP_INSTANCE_ID)
|
||||
├── config.json # space registry (activeSpaceId, known spaces)
|
||||
└── spaces/
|
||||
└── default/ # active space root (PAPERCLIP_SPACE_ID)
|
||||
├── config.json # runtime config for this space
|
||||
├── .env # space-scoped env file
|
||||
├── db/ # embedded PostgreSQL data
|
||||
├── data/
|
||||
│ ├── storage/ # local_disk uploads
|
||||
│ └── backups/ # automatic DB backups
|
||||
├── logs/
|
||||
├── secrets/
|
||||
│ └── master.key # local_encrypted master key
|
||||
├── workspaces/ # default agent workspaces
|
||||
├── projects/ # project execution workspaces
|
||||
├── companies/ # per-company adapter homes (e.g. codex-home)
|
||||
└── codex-home/ # per-instance codex home (when not company-scoped)
|
||||
├── config.json # runtime config
|
||||
├── .env # instance env file
|
||||
├── db/ # embedded PostgreSQL data
|
||||
├── data/
|
||||
│ ├── storage/ # local_disk uploads
|
||||
│ └── backups/ # automatic DB backups
|
||||
├── logs/
|
||||
├── secrets/
|
||||
│ └── master.key # local_encrypted master key
|
||||
├── workspaces/ # default agent workspaces
|
||||
├── projects/ # project execution workspaces
|
||||
├── companies/ # per-company adapter homes (e.g. codex-home)
|
||||
└── codex-home/ # per-instance codex home (when not company-scoped)
|
||||
```
|
||||
|
||||
Default paths for the canonical install:
|
||||
|
||||
- instance registry: `~/.paperclip/instances/default/config.json`
|
||||
- space config: `~/.paperclip/instances/default/spaces/default/config.json`
|
||||
- embedded db: `~/.paperclip/instances/default/spaces/default/db`
|
||||
- logs: `~/.paperclip/instances/default/spaces/default/logs`
|
||||
- storage: `~/.paperclip/instances/default/spaces/default/data/storage`
|
||||
- secrets key: `~/.paperclip/instances/default/spaces/default/secrets/master.key`
|
||||
- config: `~/.paperclip/instances/default/config.json`
|
||||
- embedded db: `~/.paperclip/instances/default/db`
|
||||
- logs: `~/.paperclip/instances/default/logs`
|
||||
- storage: `~/.paperclip/instances/default/data/storage`
|
||||
- secrets key: `~/.paperclip/instances/default/secrets/master.key`
|
||||
|
||||
The instance root holds only cross-space metadata. All runtime state — database, storage, secrets, logs, agent workspaces — lives inside the active space.
|
||||
|
||||
Override base home, instance, or active space with env vars:
|
||||
Override base home or instance with env vars:
|
||||
|
||||
```sh
|
||||
PAPERCLIP_HOME=/custom/home PAPERCLIP_INSTANCE_ID=dev pnpm paperclipai run
|
||||
PAPERCLIP_SPACE_ID=staging pnpm paperclipai run # use an alternate space inside the same instance
|
||||
```
|
||||
|
||||
`PAPERCLIP_SPACE_ID` overrides whatever `activeSpaceId` is recorded in the instance registry. When unset, Paperclip resolves the active space from `instances/<id>/config.json` and falls back to `default`.
|
||||
|
||||
## Migrating a Legacy Default-Space Install
|
||||
|
||||
Earlier Paperclip versions stored default-space data directly at the instance root (for example `~/.paperclip/instances/default/db`). Those installs continue to start through a compatibility resolver, but new code paths assume `spaces/default`. Migrate explicitly with:
|
||||
|
||||
```sh
|
||||
# Stop Paperclip first; the migration refuses to run while a local server is up.
|
||||
pnpm paperclipai spaces migrate-default
|
||||
|
||||
# Preview the move without touching files.
|
||||
pnpm paperclipai spaces migrate-default --dry-run
|
||||
|
||||
# Operate on a non-default instance.
|
||||
pnpm paperclipai spaces migrate-default --instance dev
|
||||
```
|
||||
|
||||
The command:
|
||||
|
||||
- detects a legacy root-shaped install by inspecting `instances/<id>/config.json`
|
||||
- preflight-checks `http://<server.host>:<server.port>/api/health` and refuses to run when the server responds
|
||||
- refuses to merge if the destination already contains conflicting paths under `spaces/default/`
|
||||
- moves only known space-owned paths: `config.json`, `.env`, `db`, `data`, `logs`, `secrets`, `workspaces`, `projects`, `companies`, and the legacy top-level `codex-home`
|
||||
- rewrites absolute paths inside the migrated `config.json` from the instance root to the new space root (`embeddedPostgresDataDir`, backup `dir`, `logging.logDir`, `storage.localDisk.baseDir`, `secrets.localEncrypted.keyFilePath`)
|
||||
- writes the registry/marker back at `instances/<id>/config.json` recording `activeSpaceId`, the migration timestamp, and the moved paths
|
||||
|
||||
Available flags:
|
||||
|
||||
| Option | Description |
|
||||
|---|---|
|
||||
| `-i, --instance <id>` | Local instance id (default: `default`) |
|
||||
| `--dry-run` | Show the migration plan without moving files |
|
||||
| `--skip-server-check` | Skip the `/api/health` preflight (only for offline installs) |
|
||||
|
||||
Restart Paperclip after a successful migration. Subsequent commands resolve the active space root from the new registry automatically.
|
||||
|
||||
## Storage Configuration
|
||||
|
||||
Configure storage provider and settings:
|
||||
|
||||
@@ -12,12 +12,12 @@ pnpm dev
|
||||
|
||||
That's it. On first start the server:
|
||||
|
||||
1. Creates a `~/.paperclip/instances/default/spaces/default/db/` directory for storage
|
||||
1. Creates a `~/.paperclip/instances/default/db/` directory for storage
|
||||
2. Ensures the `paperclip` database exists
|
||||
3. Runs migrations automatically for empty databases
|
||||
4. Starts serving requests
|
||||
|
||||
Data persists across restarts in `~/.paperclip/instances/default/spaces/default/db/`. To reset local dev data, delete that directory. Older installs that still keep data at `~/.paperclip/instances/default/db/` continue to start through the legacy resolver — see [Migrating a Legacy Default-Space Install](CLI.md#migrating-a-legacy-default-space-install).
|
||||
Data persists across restarts in `~/.paperclip/instances/default/db/`. To reset local dev data, delete that directory.
|
||||
|
||||
If you need to apply pending migrations manually, run:
|
||||
|
||||
@@ -137,7 +137,7 @@ The database mode is controlled by `DATABASE_URL`:
|
||||
|
||||
| `DATABASE_URL` | Mode |
|
||||
|---|---|
|
||||
| Not set | Embedded PostgreSQL (`~/.paperclip/instances/default/spaces/default/db/`) |
|
||||
| Not set | Embedded PostgreSQL (`~/.paperclip/instances/default/db/`) |
|
||||
| `postgres://...localhost...` | Local Docker PostgreSQL |
|
||||
| `postgres://...supabase.com...` | Hosted Supabase |
|
||||
|
||||
@@ -169,8 +169,10 @@ Paperclip stores secret metadata and versions in:
|
||||
For local/default installs, the active provider is `local_encrypted`:
|
||||
|
||||
- Secret material is encrypted at rest with a local master key.
|
||||
- Default key file: `~/.paperclip/instances/default/spaces/default/secrets/master.key` (auto-created if missing).
|
||||
- CLI config location: `~/.paperclip/instances/default/spaces/default/config.json` under `secrets.localEncrypted.keyFilePath`. (The instance-level `~/.paperclip/instances/default/config.json` is the cross-space registry that records which space is active.)
|
||||
- Default key file: `~/.paperclip/instances/default/secrets/master.key` (auto-created if missing).
|
||||
- CLI config location: `~/.paperclip/instances/default/config.json` under `secrets.localEncrypted.keyFilePath`.
|
||||
- Backup/restore requires both the database metadata and the local master key file; either artifact alone is insufficient.
|
||||
- The server best-effort enforces `0600` key file permissions and provider health reports permission warnings.
|
||||
|
||||
Optional overrides:
|
||||
|
||||
@@ -192,5 +194,10 @@ pnpm paperclipai configure --section secrets
|
||||
Inline secret migration command:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets migrate-inline-env --company-id <company-id> --apply
|
||||
|
||||
# direct database maintenance fallback
|
||||
pnpm secrets:migrate-inline-env --apply
|
||||
```
|
||||
|
||||
Hosted AWS provider notes live in [SECRETS-AWS-PROVIDER.md](./SECRETS-AWS-PROVIDER.md).
|
||||
|
||||
@@ -159,42 +159,36 @@ For a separate review-oriented container that keeps `codex`/`claude` login state
|
||||
|
||||
## Local Instance Layout
|
||||
|
||||
Every local install nests instance metadata, the active space, and runtime state under `~/.paperclip/`:
|
||||
Every local install keeps runtime state directly under the selected instance root:
|
||||
|
||||
```text
|
||||
~/.paperclip/instances/default/ # instance root (cross-space metadata only)
|
||||
config.json # space registry (activeSpaceId, known spaces)
|
||||
spaces/
|
||||
default/ # active space root
|
||||
config.json # space runtime config
|
||||
.env # space-scoped env file
|
||||
db/ # embedded PostgreSQL data
|
||||
data/
|
||||
storage/ # local_disk uploads
|
||||
backups/ # automatic DB backups
|
||||
logs/
|
||||
secrets/master.key # local_encrypted master key
|
||||
workspaces/<agent-id>/ # default agent workspaces
|
||||
projects/ # project execution workspaces
|
||||
companies/<company-id>/codex-home/ # per-company codex_local home
|
||||
~/.paperclip/instances/default/ # instance root
|
||||
config.json # runtime config
|
||||
.env # instance env file
|
||||
db/ # embedded PostgreSQL data
|
||||
data/
|
||||
storage/ # local_disk uploads
|
||||
backups/ # automatic DB backups
|
||||
logs/
|
||||
secrets/master.key # local_encrypted master key
|
||||
workspaces/<agent-id>/ # default agent workspaces
|
||||
projects/ # project execution workspaces
|
||||
companies/<company-id>/codex-home/ # per-company codex_local home
|
||||
```
|
||||
|
||||
The instance root no longer doubles as the default space. New installs created by `paperclipai onboard`, `paperclipai run`, and `paperclipai configure` always write the registry at `instances/<id>/config.json` and put runtime data under `spaces/default/`. To migrate an older root-shaped install in place, see [Migrating a Legacy Default-Space Install](CLI.md#migrating-a-legacy-default-space-install).
|
||||
|
||||
`PAPERCLIP_HOME`, `PAPERCLIP_INSTANCE_ID`, and `PAPERCLIP_SPACE_ID` override the home root, instance id, and active space respectively. `paperclipai onboard` echoes the resolved values in its banner (`Local home: <home> | instance: <id> | space: <id> | config: <path>`) so you can confirm where state will land before continuing.
|
||||
`PAPERCLIP_HOME` and `PAPERCLIP_INSTANCE_ID` override the home root and instance id respectively. `paperclipai onboard` echoes the resolved values in its banner (`Local home: <home> | instance: <id> | config: <path>`) so you can confirm where state will land before continuing.
|
||||
|
||||
## Database in Dev (Auto-Handled)
|
||||
|
||||
For local development, leave `DATABASE_URL` unset.
|
||||
The server will automatically use embedded PostgreSQL and persist data at:
|
||||
|
||||
- `~/.paperclip/instances/default/spaces/default/db`
|
||||
- `~/.paperclip/instances/default/db`
|
||||
|
||||
Override home, instance, or space:
|
||||
Override home or instance:
|
||||
|
||||
```sh
|
||||
PAPERCLIP_HOME=/custom/path PAPERCLIP_INSTANCE_ID=dev pnpm paperclipai run
|
||||
PAPERCLIP_SPACE_ID=staging pnpm paperclipai run
|
||||
```
|
||||
|
||||
No Docker or external database is required for this mode.
|
||||
@@ -203,7 +197,7 @@ No Docker or external database is required for this mode.
|
||||
|
||||
For local development, the default storage provider is `local_disk`, which persists uploaded images/attachments at:
|
||||
|
||||
- `~/.paperclip/instances/default/spaces/default/data/storage`
|
||||
- `~/.paperclip/instances/default/data/storage`
|
||||
|
||||
Configure storage provider/settings:
|
||||
|
||||
@@ -213,15 +207,15 @@ pnpm paperclipai configure --section storage
|
||||
|
||||
## Default Agent Workspaces
|
||||
|
||||
When a local agent run has no resolved project/session workspace, Paperclip falls back to an agent home workspace under the active space:
|
||||
When a local agent run has no resolved project/session workspace, Paperclip falls back to an agent home workspace under the instance root:
|
||||
|
||||
- `~/.paperclip/instances/default/spaces/default/workspaces/<agent-id>`
|
||||
- `~/.paperclip/instances/default/workspaces/<agent-id>`
|
||||
|
||||
This path honors `PAPERCLIP_HOME`, `PAPERCLIP_INSTANCE_ID`, and `PAPERCLIP_SPACE_ID` in non-default setups.
|
||||
This path honors `PAPERCLIP_HOME` and `PAPERCLIP_INSTANCE_ID` in non-default setups.
|
||||
|
||||
For `codex_local`, Paperclip also manages a per-company Codex home under the active space and seeds it from the shared Codex login/config home (`$CODEX_HOME` or `~/.codex`):
|
||||
For `codex_local`, Paperclip also manages a per-company Codex home under the instance root and seeds it from the shared Codex login/config home (`$CODEX_HOME` or `~/.codex`):
|
||||
|
||||
- `~/.paperclip/instances/default/spaces/default/companies/<company-id>/codex-home`
|
||||
- `~/.paperclip/instances/default/companies/<company-id>/codex-home`
|
||||
|
||||
If the `codex` CLI is not installed or not on `PATH`, `codex_local` agent runs fail at execution time with a clear adapter error. Quota polling uses a short-lived `codex app-server` subprocess: when `codex` cannot be spawned, that provider reports `ok: false` in aggregated quota results and the API server keeps running (it must not exit on a missing binary).
|
||||
|
||||
@@ -307,13 +301,13 @@ paperclipai worktree init --from-data-dir ~/.paperclip
|
||||
paperclipai worktree init --force
|
||||
```
|
||||
|
||||
Repair an already-created repo-managed worktree and reseed its isolated instance from the main default install. Point `--from-config` at the active space config (`spaces/default/config.json` for post-migration installs; the legacy `~/.paperclip/instances/default/config.json` still works on root-shaped installs that haven't run `paperclipai spaces migrate-default` yet):
|
||||
Repair an already-created repo-managed worktree and reseed its isolated instance from the main default install. Point `--from-config` at the instance config:
|
||||
|
||||
```sh
|
||||
cd /path/to/paperclip/.paperclip/worktrees/PAP-884-ai-commits-component
|
||||
pnpm paperclipai worktree init --force --seed-mode minimal \
|
||||
--name PAP-884-ai-commits-component \
|
||||
--from-config ~/.paperclip/instances/default/spaces/default/config.json
|
||||
--from-config ~/.paperclip/instances/default/config.json
|
||||
```
|
||||
|
||||
That rewrites the worktree-local `.paperclip/config.json` + `.paperclip/.env`, recreates the isolated instance under `~/.paperclip-worktrees/instances/<worktree-id>/`, and preserves the git worktree contents themselves.
|
||||
@@ -438,12 +432,10 @@ Expected:
|
||||
To wipe local dev data and start fresh:
|
||||
|
||||
```sh
|
||||
rm -rf ~/.paperclip/instances/default/spaces/default/db
|
||||
rm -rf ~/.paperclip/instances/default/db
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
If you migrated from a legacy root-shaped install and still have a stale `~/.paperclip/instances/default/db`, remove that too — but only after confirming `paperclipai spaces migrate-default` ran and the active database lives under `spaces/default/`.
|
||||
|
||||
## Optional: Use External Postgres
|
||||
|
||||
If you set `DATABASE_URL`, the server will use that instead of embedded PostgreSQL.
|
||||
@@ -457,7 +449,7 @@ schemas. Defaults:
|
||||
- enabled
|
||||
- every 60 minutes
|
||||
- retain 30 days
|
||||
- backup dir: `~/.paperclip/instances/default/spaces/default/data/backups`
|
||||
- backup dir: `~/.paperclip/instances/default/data/backups`
|
||||
|
||||
Configure these in:
|
||||
|
||||
@@ -488,9 +480,10 @@ those providers are enabled.
|
||||
|
||||
Agent env vars now support secret references. By default, secret values are stored with local encryption and only secret refs are persisted in agent config.
|
||||
|
||||
- Default local key path: `~/.paperclip/instances/default/spaces/default/secrets/master.key`
|
||||
- Default local key path: `~/.paperclip/instances/default/secrets/master.key`
|
||||
- Override key material directly: `PAPERCLIP_SECRETS_MASTER_KEY`
|
||||
- Override key file path: `PAPERCLIP_SECRETS_MASTER_KEY_FILE`
|
||||
- Back up the key file and database together; either one alone is not enough to restore local encrypted secrets.
|
||||
|
||||
Strict mode (recommended outside local trusted machines):
|
||||
|
||||
@@ -499,12 +492,20 @@ PAPERCLIP_SECRETS_STRICT_MODE=true
|
||||
```
|
||||
|
||||
When strict mode is enabled, sensitive env keys (for example `*_API_KEY`, `*_TOKEN`, `*_SECRET`) must use secret references instead of inline plain values.
|
||||
Authenticated deployments default strict mode on unless explicitly overridden.
|
||||
|
||||
CLI configuration support:
|
||||
|
||||
- `pnpm paperclipai onboard` writes a default `secrets` config section (`local_encrypted`, strict mode off, key file path set) and creates a local key file when needed.
|
||||
- `pnpm paperclipai configure --section secrets` lets you update provider/strict mode/key path and creates the local key file when needed.
|
||||
- `pnpm paperclipai doctor` validates secrets adapter configuration and can create a missing local key file with `--repair`.
|
||||
- `pnpm paperclipai doctor` validates secrets adapter configuration, can create a missing local key file with `--repair`, and reports missing AWS Secrets Manager bootstrap env when that provider is selected.
|
||||
- Provider health is available at `GET /api/companies/:companyId/secret-providers/health` and reports local key permission warnings plus backup guidance.
|
||||
|
||||
Per-company provider vaults are configured in the board UI under
|
||||
`Company Settings → Secrets → Provider vaults`, backed by
|
||||
`/api/companies/{companyId}/secret-provider-configs`. The CLI does not own
|
||||
vault lifecycle today. See `docs/deploy/secrets.md` (`Provider Vaults` section)
|
||||
for the operator model.
|
||||
|
||||
Migration helper for existing inline env secrets:
|
||||
|
||||
|
||||
368
doc/SECRETS-AWS-PROVIDER.md
Normal file
@@ -0,0 +1,368 @@
|
||||
# AWS Secrets Manager Provider
|
||||
|
||||
Operational contract for the hosted `aws_secrets_manager` secret provider used by Paperclip Cloud.
|
||||
|
||||
## Scope
|
||||
|
||||
- Hosted provider for Paperclip-managed secrets when Paperclip Cloud runs on AWS.
|
||||
- Source of truth for secret values is AWS Secrets Manager, not Postgres.
|
||||
- Paperclip stores only metadata needed for ownership, bindings, version selection, audit, and runtime resolution.
|
||||
- AWS provider bootstrap credentials are deployment/runtime credentials, not Paperclip-managed company secrets.
|
||||
- Remote import for existing AWS secrets is metadata-only. Preview/import uses
|
||||
AWS inventory metadata and creates Paperclip external references; it does not
|
||||
copy plaintext into Paperclip.
|
||||
- Per-company AWS provider vaults (named instances of `aws_secrets_manager`
|
||||
with their own region, namespace, prefix, KMS key id, and tags) are managed
|
||||
in the board UI under `Company Settings → Secrets → Provider vaults`. See
|
||||
[Provider Vaults](../docs/deploy/secrets.md#provider-vaults) for the operator
|
||||
model and [Provider Vaults API](../docs/api/secrets.md#provider-vaults) for
|
||||
the routes. The bootstrap trust model in this document still applies — vault
|
||||
config carries non-sensitive routing metadata only, never AWS credentials.
|
||||
|
||||
## Bootstrap Trust Model
|
||||
|
||||
The AWS provider has a chicken-and-egg boundary: Paperclip cannot use
|
||||
`company_secrets` to unlock the AWS provider that stores those secrets. The
|
||||
initial AWS trust must exist before the Paperclip server starts.
|
||||
|
||||
Allowed bootstrap locations:
|
||||
|
||||
- Infrastructure IAM or workload identity attached to the Paperclip server
|
||||
runtime.
|
||||
- Process environment or orchestrator secret store used to start the Paperclip
|
||||
server.
|
||||
- Local AWS SDK sources such as `AWS_PROFILE`, AWS SSO/shared config, web
|
||||
identity, container metadata, or instance metadata.
|
||||
- Short-lived shell credentials for local development only.
|
||||
|
||||
Do not ask operators to paste AWS root credentials or long-lived IAM user access
|
||||
keys into the Paperclip board UI. Do not store those bootstrap keys in
|
||||
`company_secrets`.
|
||||
|
||||
## Paperclip Cloud Bootstrap
|
||||
|
||||
Paperclip Cloud must provision the AWS backing resources before any board user
|
||||
can create AWS-backed company secrets:
|
||||
|
||||
1. Create or select the deployment KMS key.
|
||||
2. Create the Paperclip server runtime role for the deployment.
|
||||
3. Attach a minimum IAM policy scoped to the deployment Secrets Manager prefix
|
||||
and the configured KMS key.
|
||||
4. Configure the server runtime with the non-secret provider environment
|
||||
variables below.
|
||||
5. Run `paperclipai doctor` or the provider health endpoint from the deployed
|
||||
runtime and confirm that the provider reports the expected region, prefix,
|
||||
deployment id, KMS setting, and AWS SDK credential source.
|
||||
|
||||
Once this is in place, the board UI can create Paperclip-managed AWS secrets and
|
||||
Paperclip will write them under the deployment/company namespace.
|
||||
|
||||
## Self-Hosted And Local Bootstrap
|
||||
|
||||
Self-hosted AWS deployments should use the AWS SDK default credential provider
|
||||
chain. Preferred sources are role-based:
|
||||
|
||||
- EC2 instance profile.
|
||||
- ECS task role.
|
||||
- EKS IRSA or another OIDC web identity role.
|
||||
- AWS SSO/shared config via `AWS_PROFILE`.
|
||||
|
||||
Local development can use:
|
||||
|
||||
```sh
|
||||
aws sso login --profile paperclip-dev
|
||||
AWS_PROFILE=paperclip-dev \
|
||||
PAPERCLIP_SECRETS_PROVIDER=aws_secrets_manager \
|
||||
PAPERCLIP_SECRETS_AWS_REGION=us-east-1 \
|
||||
PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID=dev-local \
|
||||
PAPERCLIP_SECRETS_AWS_KMS_KEY_ID=arn:aws:kms:us-east-1:123456789012:key/abcd-... \
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Temporary `AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY` environment credentials
|
||||
are acceptable only as a local break-glass or short-lived test source. They
|
||||
should not be written to Paperclip config, committed to `.env` files, stored in
|
||||
`company_secrets`, or used as the default Paperclip Cloud bootstrap path.
|
||||
|
||||
## Deployment Config
|
||||
|
||||
Required environment variables:
|
||||
|
||||
```sh
|
||||
PAPERCLIP_SECRETS_PROVIDER=aws_secrets_manager
|
||||
PAPERCLIP_SECRETS_AWS_REGION=us-east-1
|
||||
PAPERCLIP_SECRETS_AWS_DEPLOYMENT_ID=prod-us-1
|
||||
PAPERCLIP_SECRETS_AWS_KMS_KEY_ID=arn:aws:kms:us-east-1:123456789012:key/abcd-...
|
||||
```
|
||||
|
||||
Optional environment variables:
|
||||
|
||||
```sh
|
||||
PAPERCLIP_SECRETS_AWS_PREFIX=paperclip
|
||||
PAPERCLIP_SECRETS_AWS_ENVIRONMENT=production
|
||||
PAPERCLIP_SECRETS_AWS_PROVIDER_OWNER=paperclip
|
||||
PAPERCLIP_SECRETS_AWS_ENDPOINT=
|
||||
PAPERCLIP_SECRETS_AWS_DELETE_RECOVERY_DAYS=30
|
||||
```
|
||||
|
||||
Naming convention for Paperclip-managed secrets:
|
||||
|
||||
```text
|
||||
paperclip/{deploymentId}/{companyId}/{secretKey}
|
||||
```
|
||||
|
||||
Tag set for Paperclip-managed secrets:
|
||||
|
||||
- `paperclip:managed-by=paperclip`
|
||||
- `paperclip:provider-owner=<owner tag>`
|
||||
- `paperclip:deployment-id=<deployment id>`
|
||||
- `paperclip:company-id=<company id>`
|
||||
- `paperclip:secret-key=<secret key>`
|
||||
- `paperclip:environment=<environment tag>`
|
||||
|
||||
## IAM And KMS Assumptions
|
||||
|
||||
Launch posture:
|
||||
|
||||
- One Paperclip app role per deployment.
|
||||
- One deployment-scoped KMS key per deployment at launch.
|
||||
- Future per-company KMS keys remain compatible because Paperclip stores provider refs and version metadata separately from values.
|
||||
|
||||
Minimum IAM boundary:
|
||||
|
||||
- Allow `secretsmanager:CreateSecret`, `PutSecretValue`, `GetSecretValue`, and `DeleteSecret`.
|
||||
- Scope resources to the deployment prefix:
|
||||
|
||||
```text
|
||||
arn:aws:secretsmanager:<region>:<account-id>:secret:paperclip/<deployment-id>/*
|
||||
```
|
||||
|
||||
- Allow `kms:Encrypt`, `kms:Decrypt`, `kms:GenerateDataKey`, and `kms:DescribeKey` for the configured deployment CMK.
|
||||
- Deny wildcard access outside the deployment prefix.
|
||||
- Prefer workload identity / role-based auth. Do not store AWS credentials inline in Paperclip config.
|
||||
|
||||
Example minimum policy shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "PaperclipDeploymentSecrets",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"secretsmanager:CreateSecret",
|
||||
"secretsmanager:PutSecretValue",
|
||||
"secretsmanager:GetSecretValue",
|
||||
"secretsmanager:DeleteSecret"
|
||||
],
|
||||
"Resource": "arn:aws:secretsmanager:<region>:<account-id>:secret:paperclip/<deployment-id>/*"
|
||||
},
|
||||
{
|
||||
"Sid": "PaperclipDeploymentKms",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:GenerateDataKey",
|
||||
"kms:DescribeKey"
|
||||
],
|
||||
"Resource": "arn:aws:kms:<region>:<account-id>:key/<key-id>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Operational expectation:
|
||||
|
||||
- Paperclip-managed secrets may be deleted only by Paperclip or an operator with equivalent break-glass access.
|
||||
- External references may resolve through Paperclip runtime, but Paperclip should not delete the external secret resource.
|
||||
|
||||
## Remote Import Inventory IAM
|
||||
|
||||
Remote import preview needs one additional AWS permission:
|
||||
|
||||
```json
|
||||
{
|
||||
"Sid": "PaperclipRemoteSecretInventory",
|
||||
"Effect": "Allow",
|
||||
"Action": "secretsmanager:ListSecrets",
|
||||
"Resource": "*"
|
||||
}
|
||||
```
|
||||
|
||||
This is intentionally separate from the managed create/rotate/delete policy.
|
||||
AWS treats `ListSecrets` as an account/Region inventory action; do not document
|
||||
secret ARNs, names, tags, or AWS request filters as an IAM boundary for it. Use
|
||||
`Resource: "*"` and decide whether inventory exposure is acceptable for the AWS
|
||||
account and Region behind each provider vault.
|
||||
|
||||
Remote import preview/import must not call:
|
||||
|
||||
- `secretsmanager:GetSecretValue`
|
||||
- `secretsmanager:BatchGetSecretValue`
|
||||
- `kms:Decrypt`
|
||||
|
||||
Those permissions are only needed later when a bound runtime resolves an
|
||||
imported external reference. For imported refs, scope read permissions to the
|
||||
operator-approved external prefixes that Paperclip is allowed to consume:
|
||||
|
||||
```json
|
||||
{
|
||||
"Sid": "PaperclipResolveImportedExternalReferences",
|
||||
"Effect": "Allow",
|
||||
"Action": "secretsmanager:GetSecretValue",
|
||||
"Resource": [
|
||||
"arn:aws:secretsmanager:<region>:<account-id>:secret:<approved-external-prefix>/*"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If selected external secrets use customer-managed KMS keys, also grant
|
||||
`kms:Decrypt` and `kms:DescribeKey` on those keys. Keep managed write/delete
|
||||
permissions scoped to `paperclip/<deployment-id>/*`; do not broaden them for
|
||||
remote import.
|
||||
|
||||
Safe scoping guidance:
|
||||
|
||||
- Prefer one Paperclip runtime role per environment/account.
|
||||
- Point provider vaults at the intended AWS account and Region instead of a
|
||||
broad central admin role.
|
||||
- Enable `ListSecrets` only in accounts where inventory exposure is acceptable.
|
||||
- Keep preview/import board-only; agent API keys must not call these routes.
|
||||
- Treat AWS tag/name filters as search UX only, not permission enforcement.
|
||||
|
||||
Paperclip also blocks importing refs under its own managed namespace as
|
||||
external references. Use the Paperclip-managed flow for
|
||||
`paperclip/{deploymentId}/{companyId}/{secretKey}` resources.
|
||||
|
||||
## Existing AWS Secrets
|
||||
|
||||
V1 keeps existing AWS Secrets Manager entries as **linked external references**, not adopted
|
||||
Paperclip-managed resources.
|
||||
|
||||
Use the Paperclip-managed flow when Paperclip should create and rotate the value. The AWS
|
||||
secret name is derived from deployment and company scope:
|
||||
|
||||
```text
|
||||
paperclip/{deploymentId}/{companyId}/{secretKey}
|
||||
```
|
||||
|
||||
Use the external-reference flow when the secret already exists at an operator-owned path such
|
||||
as:
|
||||
|
||||
```text
|
||||
/paperclip-bench/anthropic_api_key
|
||||
```
|
||||
|
||||
In that mode Paperclip stores only the path or ARN, resolves it at runtime, and records
|
||||
redacted access events. Operators rotate the actual value in AWS. Update the Paperclip
|
||||
reference only when the AWS path, ARN, or pinned provider version changes.
|
||||
|
||||
Paperclip does not currently offer an "adopt existing AWS secret" flow that takes over future
|
||||
`PutSecretValue` writes for an arbitrary existing secret. Adding that later requires explicit
|
||||
confirmation UX, scope validation, expected Paperclip tags, and security/cloud-ops review.
|
||||
|
||||
## Data Custody
|
||||
|
||||
- Paperclip stores `externalRef`, `providerVersionRef`, provider id, fingerprint hash, status, and binding metadata.
|
||||
- Paperclip does not store AWS secret plaintext in `company_secret_versions.material`.
|
||||
- Runtime resolution fetches the value from AWS only when a bound consumer needs it.
|
||||
|
||||
## Rotation Runbook
|
||||
|
||||
Manual Paperclip-managed rotation:
|
||||
|
||||
1. Write the new value through the Paperclip secret rotate flow.
|
||||
2. Paperclip creates a new AWS secret version with `PutSecretValue`.
|
||||
3. Paperclip records the new `providerVersionRef` in `company_secret_versions`.
|
||||
4. Re-run or restart affected workloads that consume `latest`, or pin consumers to a specific Paperclip version before rollout when you need staged release safety.
|
||||
|
||||
Guidance:
|
||||
|
||||
- Prefer pinned Paperclip secret versions for risky rollouts.
|
||||
- Treat provider-native automatic rotation as a later enhancement; current V1 flow is explicit create-new-version plus controlled rollout.
|
||||
|
||||
## Backup And Restore Runbook
|
||||
|
||||
What must survive:
|
||||
|
||||
- Paperclip database metadata for secret ownership, bindings, status, and provider version refs.
|
||||
- AWS Secrets Manager namespace under the configured deployment prefix.
|
||||
- The configured KMS key and its decrypt permissions.
|
||||
|
||||
Restore checklist:
|
||||
|
||||
1. Restore Paperclip database metadata.
|
||||
2. Confirm the same AWS Secrets Manager namespace still exists.
|
||||
3. Confirm the Paperclip runtime role can call `GetSecretValue` on the restored prefix.
|
||||
4. Confirm the role still has decrypt access to the CMK referenced by `PAPERCLIP_SECRETS_AWS_KMS_KEY_ID`.
|
||||
5. Run the live smoke below or a targeted runtime secret resolution test.
|
||||
|
||||
## Provider Outage Runbook
|
||||
|
||||
Symptoms:
|
||||
|
||||
- Secret create/rotate/resolve operations fail with AWS provider errors.
|
||||
- Agent runs fail before adapter invocation on required secret resolution.
|
||||
- Remote import preview fails to list AWS inventory.
|
||||
|
||||
Immediate actions:
|
||||
|
||||
1. Confirm AWS regional health and Secrets Manager availability.
|
||||
2. Confirm the runtime role still has `GetSecretValue` and KMS decrypt permissions.
|
||||
3. Check for accidental prefix, region, deployment id, or KMS key config drift.
|
||||
4. Retry a single resolution after AWS service health is green.
|
||||
5. If outage persists, pause high-risk runs that require secret access rather than churning retries.
|
||||
|
||||
Remote import-specific actions:
|
||||
|
||||
- Missing list permission: add `secretsmanager:ListSecrets` with
|
||||
`Resource: "*"` only when inventory import is approved for that vault's
|
||||
AWS account and Region.
|
||||
- Throttling: narrow the search, wait briefly, and retry with backoff. Avoid
|
||||
full-account enumeration.
|
||||
- Invalid or stale cursor: refresh the preview and discard the old
|
||||
`NextToken`.
|
||||
- Large account: load pages intentionally, keep one in-flight preview request
|
||||
per vault/search, and do not run background full-account crawls.
|
||||
- Runtime read failure after import: verify `GetSecretValue` and KMS decrypt
|
||||
on the selected external secret. Visibility in `ListSecrets` does not prove
|
||||
read permission.
|
||||
|
||||
## Incident Response Runbook
|
||||
|
||||
Potential incidents:
|
||||
|
||||
- Cross-company access caused by IAM scoping drift.
|
||||
- KMS policy drift causing decrypt failures or over-broad access.
|
||||
- Suspected secret exposure in logs, transcripts, or downstream agent output.
|
||||
|
||||
Response steps:
|
||||
|
||||
1. Stop or pause affected Paperclip runs.
|
||||
2. Audit recent Paperclip secret access events for impacted secret ids and consumers.
|
||||
3. Audit AWS CloudTrail for `ListSecrets`, `GetSecretValue`,
|
||||
`PutSecretValue`, and `DeleteSecret` calls on the relevant vault account,
|
||||
Region, deployment prefix, and approved external prefixes.
|
||||
4. Rotate impacted secrets in AWS through Paperclip-managed versioning.
|
||||
5. Re-scope IAM and KMS policies before resuming normal traffic.
|
||||
6. If a value may have reached an agent transcript or external system, treat it as exposed and rotate immediately.
|
||||
|
||||
## Optional Live Smoke
|
||||
|
||||
This is safe to skip locally. Run it only against a dedicated AWS test namespace.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- AWS credentials or workload identity with the deployment-scoped IAM permissions above.
|
||||
- `PAPERCLIP_SECRETS_PROVIDER=aws_secrets_manager`
|
||||
- The required `PAPERCLIP_SECRETS_AWS_*` environment variables set.
|
||||
|
||||
Suggested smoke:
|
||||
|
||||
1. Create a test secret through the Paperclip board or API under a throwaway company.
|
||||
2. Confirm the resulting AWS secret name matches `paperclip/{deploymentId}/{companyId}/{secretKey}`.
|
||||
3. Rotate the secret once and confirm a new `providerVersionRef` appears in Paperclip metadata.
|
||||
4. Resolve the secret through a bound runtime path, not by adding a general-purpose reveal endpoint.
|
||||
5. Delete the throwaway secret and confirm AWS schedules deletion with the configured recovery window.
|
||||
@@ -37,7 +37,7 @@ These decisions close open questions from `SPEC.md` for V1.
|
||||
| Visibility | Full visibility to board and all agents in same company |
|
||||
| Communication | Tasks + comments only (no separate chat system) |
|
||||
| Task ownership | Single assignee; atomic checkout required for `in_progress` transition |
|
||||
| Recovery | Liveness/watchdog recovery preserves explicit ownership: retry lost execution continuity where safe, otherwise create visible recovery issues or require human escalation (see `doc/execution-semantics.md`) |
|
||||
| Recovery | Liveness/watchdog recovery preserves explicit ownership: retry lost execution continuity where safe, otherwise open visible source-scoped recovery actions by default, use issue-backed recovery only for independent repair work, or require human escalation (see `doc/execution-semantics.md`) |
|
||||
| Agent adapters | Built-in `process`, `http`, local CLI/session adapters, and OpenClaw gateway support; external adapters can also be loaded through the adapter plugin flow |
|
||||
| Plugin framework | Local/self-hosted early plugin runtime is in scope; cloud marketplace and packaged public distribution remain out of scope |
|
||||
| Auth | Mode-dependent human auth (`local_trusted` implicit board in current code; authenticated mode uses sessions), API keys for agents |
|
||||
@@ -93,11 +93,11 @@ V1 implementation extends this baseline into a company-centric, governance-aware
|
||||
## 6.2 Data Stores
|
||||
|
||||
- Primary: PostgreSQL
|
||||
- Local default: embedded PostgreSQL at `~/.paperclip/instances/default/spaces/default/db`
|
||||
- Local default: embedded PostgreSQL at `~/.paperclip/instances/default/db`
|
||||
- Optional local prod-like: Docker Postgres
|
||||
- Optional hosted: Supabase/Postgres-compatible
|
||||
- File/object storage:
|
||||
- local default: `~/.paperclip/instances/default/spaces/default/data/storage` (`local_disk`)
|
||||
- local default: `~/.paperclip/instances/default/data/storage` (`local_disk`)
|
||||
- cloud: S3-compatible object storage (`s3`)
|
||||
|
||||
## 6.3 Background Processing
|
||||
@@ -434,9 +434,10 @@ Side effects:
|
||||
V1 non-terminal liveness rule:
|
||||
|
||||
- agent-owned `todo`, `in_progress`, `in_review`, and `blocked` issues must have a live execution path, an explicit waiting path, or an explicit recovery path
|
||||
- `in_review` is healthy only when a typed execution participant, pending issue-thread interaction or approval, user owner, active run, queued wake, or explicit recovery issue owns the next action
|
||||
- `in_review` is healthy only when a typed execution participant, pending issue-thread interaction or approval, user owner, active run, queued wake, or explicit recovery action owns the next action
|
||||
- a blocked chain is covered only when each unresolved leaf issue is live or explicitly waiting
|
||||
- when Paperclip cannot safely infer the next action, it surfaces the problem through visible blocked/recovery work instead of silently completing or reassigning work
|
||||
- explicit recovery actions are the liveness primitive; source-scoped actions are the default form, issue-backed recovery is a fallback for independent repair work or safety boundaries, and comments alone are evidence rather than a healthy liveness path
|
||||
|
||||
Detailed ownership, execution, blocker, active-run watchdog, crash-recovery, and non-terminal liveness semantics are documented in `doc/execution-semantics.md`.
|
||||
|
||||
@@ -785,7 +786,7 @@ Required UX behaviors:
|
||||
|
||||
- Node 20+
|
||||
- `DATABASE_URL` optional
|
||||
- if unset, auto-use embedded PostgreSQL under `~/.paperclip/instances/default/spaces/default/db`
|
||||
- if unset, auto-use embedded PostgreSQL under `~/.paperclip/instances/default/db`
|
||||
|
||||
## 15.2 Migrations
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ If a parent is truly waiting on a child, model that with blockers. Do not rely o
|
||||
|
||||
For agent-owned, non-terminal issues, Paperclip should never leave work in a state where nobody is responsible for the next move and nothing will wake or surface it.
|
||||
|
||||
This is a visibility contract, not an auto-completion contract. If Paperclip cannot safely infer the next action, it should surface the ambiguity with a blocked state, a visible comment, or an explicit recovery issue. It must not silently mark work done from prose comments or guess that a dependency is complete.
|
||||
This is a visibility contract, not an auto-completion contract. If Paperclip cannot safely infer the next action, it should surface the ambiguity with a blocked state, a visible notice, or an explicit recovery action. It must not silently mark work done from prose comments or guess that a dependency is complete.
|
||||
|
||||
An issue is healthy when the product can answer "what moves this forward next?" without requiring a human to reconstruct intent from the whole thread. An issue is stalled when it is non-terminal but has no live execution path, no explicit waiting path, and no recovery path.
|
||||
|
||||
@@ -169,7 +169,32 @@ The valid action-path primitives are:
|
||||
- a one-shot issue monitor (`executionPolicy.monitor.nextCheckAt`) that will wake the assignee for a future check
|
||||
- a human owner via `assigneeUserId`
|
||||
- a first-class blocker chain whose unresolved leaf issues are themselves healthy
|
||||
- an open explicit recovery issue that names the owner and action needed to restore liveness
|
||||
- an open explicit recovery action that names the owner and action needed to restore liveness
|
||||
|
||||
### Explicit recovery actions
|
||||
|
||||
An explicit recovery action is a typed liveness repair path for a source issue. It is the recovery primitive; the action can be rendered directly on the source issue or backed by a separate recovery issue when the repair needs its own work item.
|
||||
|
||||
A valid recovery action must name:
|
||||
|
||||
- the source issue and company
|
||||
- the recovery kind and idempotency fingerprint
|
||||
- the recovery owner, plus previous or return owner when ownership may temporarily shift
|
||||
- the cause, bounded evidence, and next action
|
||||
- the wake, monitor, timeout, retry, or escalation policy that will move the action forward
|
||||
- the resolution outcome when closed, such as restored, delegated, false positive, blocked, escalated, or cancelled
|
||||
|
||||
A source-scoped recovery action is the default form. Use it when the next safe move is to repair the source issue's liveness directly: restore a wake path, clarify disposition, re-establish a monitor, record a false positive, or delegate real follow-up work from the source issue.
|
||||
|
||||
Use an issue-backed recovery action only when the recovery is genuinely independent work or when source-scoped handling would be unsafe or unclear. Examples include:
|
||||
|
||||
- long or cross-agent repair work with its own assignee, subtasks, or blockers
|
||||
- real delegated follow-up that should block the source issue as a first-class dependency
|
||||
- active-run watchdog work that must observe a still-running source process without interfering with it
|
||||
- recovery that needs separate review, approval, security handling, or escalation ownership
|
||||
- cases where source issue ownership cannot be changed or restored safely
|
||||
|
||||
A comment or system notice can be evidence for a recovery action, but it is not a recovery action by itself. Comment-only recovery is not a healthy liveness path because it does not define a typed owner, wake or monitor policy, retry bound, timeout, escalation path, or resolution outcome.
|
||||
|
||||
### Agent-assigned `todo`
|
||||
|
||||
@@ -183,6 +208,16 @@ A healthy dispatch state means at least one of these is true:
|
||||
|
||||
An assigned `todo` issue is stalled when dispatch was interrupted, no wake remains queued or running, and no recovery path has been opened.
|
||||
|
||||
### Agent-assigned `backlog`
|
||||
|
||||
This is parked state, not dispatch state.
|
||||
|
||||
Assigning an issue normally implies executable intent. When create APIs receive an assignee and no explicit status, Paperclip defaults the issue to `todo` so the assignee has a wake path instead of silently inheriting the unassigned `backlog` default.
|
||||
|
||||
An explicit assigned `backlog` issue remains valid when the creator is deliberately parking the work. It must not wake the assignee just because it has an assignee. Paperclip should make that choice visible in activity and UI so operators can distinguish intentional parking from a missed handoff.
|
||||
|
||||
An assigned `backlog` issue becomes a liveness problem when another issue is blocked on it and there is no explicit waiting path such as a human owner, active run, queued wake, pending interaction or approval, monitor, or open recovery action. In that case the blocked parent should surface "blocked by parked work" rather than treating the dependency chain as healthy.
|
||||
|
||||
### Agent-assigned `in_progress`
|
||||
|
||||
This is active-work state.
|
||||
@@ -192,7 +227,7 @@ A healthy active-work state means at least one of these is true:
|
||||
- there is an active run for the issue
|
||||
- there is already a queued continuation wake
|
||||
- there is an active one-shot monitor that will wake the assignee for a future check
|
||||
- there is an open explicit recovery issue for the lost execution path
|
||||
- there is an open explicit recovery action for the lost execution path
|
||||
|
||||
An agent-owned `in_progress` issue is stalled when it has no active run, no queued continuation, and no explicit recovery surface. A still-running but silent process is not automatically stalled; it is handled by the active-run watchdog contract.
|
||||
|
||||
@@ -207,11 +242,11 @@ A healthy `in_review` issue has at least one valid action path:
|
||||
- a human owner via `assigneeUserId`
|
||||
- an active run or queued wake that is expected to process the review state
|
||||
- an active one-shot monitor for an external service or async review loop that the assignee owns
|
||||
- an open explicit recovery issue for an ambiguous review handoff
|
||||
- an open explicit recovery action for an ambiguous review handoff
|
||||
|
||||
Agent-assigned `in_review` with no typed participant is only healthy when one of the other paths exists. Assignment to the same agent that produced the handoff is not, by itself, a review path.
|
||||
|
||||
An `in_review` issue is stalled when it has no typed participant, no pending interaction or approval, no user owner, no active monitor, no active run, no queued wake, and no explicit recovery issue. Paperclip should surface that state as recovery work rather than silently completing the issue or leaving blocker chains parked indefinitely.
|
||||
An `in_review` issue is stalled when it has no typed participant, no pending interaction or approval, no user owner, no active monitor, no active run, no queued wake, and no explicit recovery action. Paperclip should surface that state as recovery work rather than silently completing the issue or leaving blocker chains parked indefinitely.
|
||||
|
||||
### Issue monitors
|
||||
|
||||
@@ -231,7 +266,7 @@ Monitors are not recurring intervals. When a monitor fires, Paperclip clears the
|
||||
|
||||
Because `serviceName` and `notes` remain visible in issue activity and wake context, operators should keep them short and non-secret. Put enough context for the assignee to know what to inspect, but do not include signed URLs, bearer tokens, customer secrets, tenant-private identifiers, or provider links with embedded credentials.
|
||||
|
||||
Monitor bounds are enforced. Paperclip rejects attempts to re-arm a monitor whose `timeoutAt` or `maxAttempts` is already exhausted. When a scheduled monitor reaches an exhausted bound at trigger time, Paperclip clears it and follows `recoveryPolicy`: `wake_owner` queues a bounded recovery wake for the assignee, `create_recovery_issue` opens visible recovery work, and `escalate_to_board` records a board-visible escalation comment/activity.
|
||||
Monitor bounds are enforced. Paperclip rejects attempts to re-arm a monitor whose `timeoutAt` or `maxAttempts` is already exhausted. When a scheduled monitor reaches an exhausted bound at trigger time, Paperclip clears it and follows `recoveryPolicy`: `wake_owner` queues a bounded recovery wake for the assignee, `create_recovery_issue` opens visible issue-backed recovery work, and `escalate_to_board` records a board-visible escalation comment/activity.
|
||||
|
||||
Use `blocked` instead of a monitor when no Paperclip assignee owns a responsible polling path. In that case, name the external owner/action or create first-class recovery/blocker work.
|
||||
|
||||
@@ -242,12 +277,12 @@ This is explicit waiting state.
|
||||
A healthy `blocked` issue has an explicit waiting path:
|
||||
|
||||
- first-class blockers exist, and each unresolved leaf has a valid action path under this contract
|
||||
- the issue is blocked on an explicit recovery issue that itself has a live or waiting path
|
||||
- the issue has an explicit recovery action that itself has a live or waiting path
|
||||
- the issue is waiting on a pending interaction, linked approval, human owner, or clearly named external owner/action
|
||||
|
||||
A blocker chain is covered only when its unresolved leaf is live or explicitly waiting. An intermediate `blocked` issue does not make the chain healthy by itself.
|
||||
|
||||
A `blocked` issue is stalled when the unresolved blocker leaf has no active run, queued wake, typed participant, pending interaction or approval, user owner, external owner/action, or recovery issue. In that case the parent should show the first stalled leaf instead of presenting the dependency as calmly covered.
|
||||
A `blocked` issue is stalled when the unresolved blocker leaf has no active run, queued wake, typed participant, pending interaction or approval, user owner, external owner/action, or recovery action. In that case the parent should show the first stalled leaf instead of presenting the dependency as calmly covered.
|
||||
|
||||
## 8. Crash and Restart Recovery
|
||||
|
||||
@@ -267,7 +302,7 @@ Example:
|
||||
Recovery rule:
|
||||
|
||||
- if the latest issue-linked run failed/timed out/cancelled and no live execution path remains, Paperclip queues one automatic assignment recovery wake
|
||||
- if that recovery wake also finishes and the issue is still stranded, Paperclip moves the issue to `blocked` and posts a visible comment
|
||||
- if that recovery wake also finishes and the issue is still stranded, Paperclip moves the issue to `blocked` and opens or updates an explicit recovery action when a bounded owner/action is known; the visible comment is evidence, not the recovery path by itself
|
||||
|
||||
This is a dispatch recovery, not a continuation recovery.
|
||||
|
||||
@@ -283,7 +318,7 @@ Example:
|
||||
Recovery rule:
|
||||
|
||||
- Paperclip queues one automatic continuation wake
|
||||
- if that continuation wake also finishes and the issue is still stranded, Paperclip moves the issue to `blocked` and posts a visible comment
|
||||
- if that continuation wake also finishes and the issue is still stranded, Paperclip moves the issue to `blocked` and opens or updates an explicit recovery action when a bounded owner/action is known; the visible comment is evidence, not the recovery path by itself
|
||||
|
||||
This is an active-work continuity recovery.
|
||||
|
||||
@@ -296,7 +331,7 @@ On startup and on the periodic recovery loop, Paperclip now does four things in
|
||||
1. reap orphaned `running` runs
|
||||
2. resume persisted `queued` runs
|
||||
3. reconcile stranded assigned work
|
||||
4. scan silent active runs and create or update explicit watchdog review issues
|
||||
4. scan silent active runs and create or update explicit watchdog recovery actions
|
||||
|
||||
The stranded-work pass closes the gap where issue state survives a crash but the wake/run path does not. The silent-run scan covers the separate case where a live process exists but has stopped producing observable output.
|
||||
|
||||
@@ -309,11 +344,11 @@ The recovery service owns this contract:
|
||||
- classify active-run output silence as `ok`, `suspicious`, `critical`, `snoozed`, or `not_applicable`
|
||||
- collect bounded evidence from run logs, recent run events, child issues, and blockers
|
||||
- preserve redaction and truncation before evidence is written to issue descriptions
|
||||
- create at most one open `stale_active_run_evaluation` issue per run
|
||||
- create at most one open watchdog recovery action per run; issue-backed implementations use `stale_active_run_evaluation` issues
|
||||
- honor active snooze decisions before creating more review work
|
||||
- build the `outputSilence` summary shown by live-run and active-run API responses
|
||||
|
||||
Suspicious silence creates a medium-priority review issue for the selected recovery owner. Critical silence raises that review issue to high priority and blocks the source issue on the explicit evaluation task without cancelling the active process.
|
||||
Suspicious silence creates a medium-priority watchdog recovery action for the selected recovery owner. Critical silence raises that recovery action to high priority and, when issue-backed evaluation is needed for correctness, blocks the source issue on the explicit evaluation task without cancelling the active process.
|
||||
|
||||
Watchdog decisions are explicit operator/recovery-owner decisions:
|
||||
|
||||
@@ -323,7 +358,7 @@ Watchdog decisions are explicit operator/recovery-owner decisions:
|
||||
|
||||
Operators should prefer `snooze` for known time-bounded quiet periods. `continue` is only a short acknowledgement of the current evidence; if the run remains silent after the re-arm window, the periodic watchdog scan can create or update review work again.
|
||||
|
||||
The board can record watchdog decisions. The assigned owner of the watchdog evaluation issue can also record them. Other agents cannot.
|
||||
The board can record watchdog decisions. The assigned owner of an issue-backed watchdog evaluation can also record them. Other agents cannot.
|
||||
|
||||
## 11. Auto-Recover vs Explicit Recovery vs Human Escalation
|
||||
|
||||
@@ -341,9 +376,9 @@ Examples:
|
||||
|
||||
Auto-recovery preserves the existing owner. It does not choose a replacement agent.
|
||||
|
||||
### Explicit Recovery Issue
|
||||
### Explicit Recovery Action
|
||||
|
||||
Paperclip creates an explicit recovery issue when the system can identify a problem but cannot safely complete the work itself.
|
||||
Paperclip opens an explicit recovery action when the system can identify a problem but cannot safely complete the work itself.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -351,9 +386,11 @@ Examples:
|
||||
- a dependency graph has an invalid/uninvokable owner, unassigned blocker, or invalid review participant
|
||||
- an active run is silent past the watchdog threshold
|
||||
|
||||
The source issue remains visible and blocked on the recovery issue when blocking is necessary for correctness. The recovery owner must restore a live path, resolve the source issue manually, or record the reason it is a false positive.
|
||||
The recovery action stays source-scoped by default. The source issue should show the recovery owner, cause, evidence, next action, and wake or monitor policy in its own thread/detail surface.
|
||||
|
||||
Instance-level issue-graph liveness auto-recovery is disabled by default. When enabled, its lookback window means "dependency paths updated within the last N hours"; older findings remain advisory and are counted as outside the configured lookback instead of creating recovery issues automatically. This is an operator noise control, not the older staleness delay for determining whether a chain is old enough to surface.
|
||||
Create an issue-backed recovery action only when a separate issue is the right execution object. In that fallback form, the source issue remains visible and is blocked on the recovery issue when blocking is necessary for correctness. The recovery owner must restore a live path, resolve the source issue manually, delegate real follow-up work, or record the reason the signal is a false positive.
|
||||
|
||||
Instance-level issue-graph liveness auto-recovery is disabled by default. When enabled, its lookback window means "dependency paths updated within the last N hours"; older findings remain advisory and are counted as outside the configured lookback instead of creating recovery actions automatically. This is an operator noise control, not the older staleness delay for determining whether a chain is old enough to surface.
|
||||
|
||||
### Human Escalation
|
||||
|
||||
@@ -381,7 +418,7 @@ The recovery model is intentionally conservative:
|
||||
|
||||
- preserve ownership
|
||||
- retry once when the control plane lost execution continuity
|
||||
- create explicit recovery work when the system can identify a bounded recovery owner/action
|
||||
- open an explicit recovery action when the system can identify a bounded recovery owner/action
|
||||
- escalate visibly when the system cannot safely keep going
|
||||
|
||||
## 13. Practical Interpretation
|
||||
|
||||
86
doc/plans/2026-04-26-plugin-secret-ref-company-scope.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Plugin Secret Refs: Company Scope Reintroduction Plan
|
||||
|
||||
Date: 2026-04-26
|
||||
Status: follow-up after fail-closed mitigation
|
||||
Related issue: PAP-2394
|
||||
|
||||
## Current state
|
||||
|
||||
`PAP-2394` now fails closed:
|
||||
|
||||
- `POST /api/plugins/:pluginId/config` rejects any config containing plugin secret refs.
|
||||
- `ctx.secrets.resolve()` is disabled for plugin workers.
|
||||
|
||||
This removes the release-blocking cross-company exposure path, but it also disables plugin secret-ref support until the runtime carries company scope end to end.
|
||||
|
||||
## Vulnerability summary
|
||||
|
||||
The original design mixed an instance-global config store with company-scoped secret bindings:
|
||||
|
||||
- [server/src/routes/plugins.ts](/Users/dotta/paperclip/.paperclip/worktrees/PAP-2339-secrets-make-a-plan/server/src/routes/plugins.ts:1898) saved one global plugin config row, then wrote bindings into `company_secret_bindings` grouped by each referenced secret's owning company.
|
||||
- [packages/db/src/schema/plugin_config.ts](/Users/dotta/paperclip/.paperclip/worktrees/PAP-2339-secrets-make-a-plan/packages/db/src/schema/plugin_config.ts:15) stored one config row per plugin, with no company dimension.
|
||||
- [packages/db/src/schema/company_secret_bindings.ts](/Users/dotta/paperclip/.paperclip/worktrees/PAP-2339-secrets-make-a-plan/packages/db/src/schema/company_secret_bindings.ts:5) already modeled bindings as company-scoped.
|
||||
- [server/src/services/plugin-secrets-handler.ts](/Users/dotta/paperclip/.paperclip/worktrees/PAP-2339-secrets-make-a-plan/server/src/services/plugin-secrets-handler.ts:212) resolved by `pluginId` + secret UUID, with no active company context from the bridge call.
|
||||
- [packages/plugins/sdk/src/worker-rpc-host.ts](/Users/dotta/paperclip/.paperclip/worktrees/PAP-2339-secrets-make-a-plan/packages/plugins/sdk/src/worker-rpc-host.ts:384) exposed `ctx.config.get()` and `ctx.secrets.resolve()` without a company parameter.
|
||||
|
||||
This violated Least Privilege, Complete Mediation, and Secure Defaults.
|
||||
|
||||
## Recommended end state
|
||||
|
||||
Re-enable plugin secret refs only after both of these are true:
|
||||
|
||||
1. Plugin config reads/writes are company-scoped.
|
||||
2. Runtime secret resolution carries explicit company context and enforces it at resolution time.
|
||||
|
||||
## Implementation plan
|
||||
|
||||
### 1. Make plugin config company-scoped
|
||||
|
||||
- Add `company_id` to `plugin_config`, with a unique index on `(plugin_id, company_id)`.
|
||||
- Update registry helpers to require `companyId` for `getConfig`, `upsertConfig`, `patchConfig`, and `deleteConfig`.
|
||||
- Update plugin config routes to require `companyId` and call `assertCompanyAccess(req, companyId)`.
|
||||
- Keep instance-global plugin lifecycle state separate from company-scoped plugin config.
|
||||
|
||||
### 2. Propagate company context through the worker runtime
|
||||
|
||||
- Extend the SDK so `ctx.config.get()` and `ctx.secrets.resolve()` can receive or derive `companyId`.
|
||||
- Introduce worker request context storage for handlers that already run with company scope:
|
||||
- `getData`
|
||||
- `performAction`
|
||||
- scoped API routes
|
||||
- tool executions
|
||||
- environment driver calls
|
||||
- Fail closed when plugin code tries to read company-scoped config or secrets outside an active company context.
|
||||
|
||||
### 3. Rebind secrets by `(companyId, pluginId, configPath)`
|
||||
|
||||
- On config save, validate every referenced secret belongs to the authorized company.
|
||||
- Store bindings only for that company.
|
||||
- Resolve secrets only by the current company-scoped binding, never by bare plugin ID plus UUID.
|
||||
- Treat stale bindings as invalid and remove them on config replacement.
|
||||
|
||||
### 4. Prevent cross-company config disclosure
|
||||
|
||||
- When returning config to the UI, only materialize the selected company's secret refs.
|
||||
- Never expose another company's secret UUIDs through the global plugin config surface.
|
||||
|
||||
## Required regression coverage
|
||||
|
||||
- Company A board user cannot save plugin config that references a Company B secret.
|
||||
- Company A plugin execution cannot resolve a Company B secret even if the same plugin is configured for Company B.
|
||||
- Company-scoped config reads only return the selected company's secret bindings.
|
||||
- Config replacement removes stale bindings for the same `(companyId, pluginId)` target.
|
||||
- Runtime calls without company context fail closed.
|
||||
|
||||
## Migration notes
|
||||
|
||||
- Existing `plugin_config` rows need a migration strategy before re-enable.
|
||||
- Safest default: do not auto-assume a company for historical secret refs.
|
||||
- Prefer one of:
|
||||
- explicit admin migration per company, or
|
||||
- import existing rows as non-secret config only and require re-entry of secret refs.
|
||||
|
||||
## Release posture
|
||||
|
||||
- Keep plugin secret refs disabled until all steps above land.
|
||||
- Do not restore the feature behind a soft warning; the insecure path must remain unavailable by default.
|
||||
BIN
doc/pr/5429/env-editor-with-secrets.png
Normal file
|
After Width: | Height: | Size: 62 KiB |
BIN
doc/pr/5429/secret-binding-picker.png
Normal file
|
After Width: | Height: | Size: 65 KiB |
BIN
doc/pr/5429/secrets-inventory.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
133
docs/api/secrets-remote-import.md
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
title: Secrets Remote Import
|
||||
summary: AWS Secrets Manager metadata-only remote import API
|
||||
---
|
||||
|
||||
Remote import lets the board link existing AWS Secrets Manager entries as
|
||||
Paperclip `external_reference` secrets without copying plaintext into
|
||||
Paperclip.
|
||||
|
||||
Both routes are board-only and company-scoped. The selected provider vault must
|
||||
belong to the company, use `aws_secrets_manager`, and have a selectable status
|
||||
(`ready` or `warning`). Disabled, coming-soon, or cross-company vaults are
|
||||
rejected.
|
||||
|
||||
Remote import is an inventory and metadata workflow. Preview calls AWS
|
||||
`ListSecrets` only and import stores a Paperclip external reference plus
|
||||
fingerprint/version metadata. Neither route calls `GetSecretValue` or
|
||||
`BatchGetSecretValue`, requests `SecretString`, requires KMS decrypt, logs raw
|
||||
remote metadata, or copies secret plaintext into Paperclip.
|
||||
|
||||
## Preview Remote AWS Secrets
|
||||
|
||||
```
|
||||
POST /api/companies/{companyId}/secrets/remote-import/preview
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"query": "stripe",
|
||||
"nextToken": "optional-provider-page-token",
|
||||
"pageSize": 50
|
||||
}
|
||||
```
|
||||
|
||||
`query` is optional and is sent to AWS as an inventory filter. Treat it as
|
||||
non-secret metadata because AWS may record list request parameters in
|
||||
CloudTrail. `nextToken` is an opaque AWS cursor; pass it back unchanged.
|
||||
`pageSize` is capped at 100.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"provider": "aws_secrets_manager",
|
||||
"nextToken": null,
|
||||
"candidates": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"remoteName": "prod/stripe",
|
||||
"name": "prod/stripe",
|
||||
"key": "prod-stripe",
|
||||
"providerVersionRef": null,
|
||||
"providerMetadata": {
|
||||
"lastChangedDate": "2026-05-06T00:00:00.000Z",
|
||||
"hasDescription": true
|
||||
},
|
||||
"status": "ready",
|
||||
"importable": true,
|
||||
"conflicts": []
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Candidate `status` values:
|
||||
|
||||
- `ready`: no existing exact external reference and no name/key collision.
|
||||
- `duplicate`: an existing secret already has the exact provider `externalRef`.
|
||||
- `conflict`: the suggested Paperclip `name` or `key` is already in use.
|
||||
|
||||
Conflict `type` values are `exact_reference`, `name`, `key`, and
|
||||
`provider_guardrail`. AWS refs under Paperclip's own managed namespace are
|
||||
blocked as external references so one company cannot import another company's
|
||||
Paperclip-managed AWS secret through a broad runtime role.
|
||||
|
||||
## Import Remote AWS Secret References
|
||||
|
||||
```
|
||||
POST /api/companies/{companyId}/secrets/remote-import
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"secrets": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"name": "Stripe production key",
|
||||
"key": "stripe-production-key",
|
||||
"description": "Stripe key used by production checkout",
|
||||
"providerVersionRef": null,
|
||||
"providerMetadata": {
|
||||
"lastChangedDate": "2026-05-06T00:00:00.000Z",
|
||||
"hasDescription": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The import response is row-level. Ready rows become active
|
||||
`external_reference` secrets with version metadata only. Exact-reference
|
||||
duplicates and name/key conflicts are skipped without failing the whole request.
|
||||
The `secrets` array accepts 1-100 rows, and the backend re-checks duplicates and
|
||||
conflicts at submit time.
|
||||
Each row may include an optional Paperclip `description` entered during review;
|
||||
blank descriptions are stored as `null`. AWS provider descriptions are not
|
||||
copied into this field.
|
||||
|
||||
```json
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"provider": "aws_secrets_manager",
|
||||
"importedCount": 1,
|
||||
"skippedCount": 1,
|
||||
"errorCount": 0,
|
||||
"results": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"name": "Stripe production key",
|
||||
"key": "stripe-production-key",
|
||||
"status": "imported",
|
||||
"reason": null,
|
||||
"secretId": "<paperclip-secret-id>",
|
||||
"conflicts": []
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Activity logs record aggregate counts and provider/vault ids only, not remote
|
||||
secret names, ARNs, tags, or values.
|
||||
|
||||
Imported references may still fail during a future bound runtime resolution if
|
||||
the Paperclip runtime role can list the AWS secret but lacks
|
||||
`secretsmanager:GetSecretValue` or required KMS decrypt permission for that
|
||||
specific secret.
|
||||
@@ -25,16 +25,357 @@ POST /api/companies/{companyId}/secrets
|
||||
|
||||
The value is encrypted at rest. Only the secret ID and metadata are returned.
|
||||
|
||||
## Update Secret
|
||||
To link a provider-owned secret without copying the value into Paperclip, create
|
||||
an external-reference secret:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "prod-stripe-key",
|
||||
"provider": "aws_secrets_manager",
|
||||
"managedMode": "external_reference",
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:paperclip/prod/stripe",
|
||||
"providerVersionRef": "version-id-or-label"
|
||||
}
|
||||
```
|
||||
|
||||
Paperclip stores the provider reference and a non-sensitive fingerprint only.
|
||||
The value is resolved, when the provider is configured, through the server
|
||||
runtime path that enforces binding context and records access events.
|
||||
|
||||
## Provider Health
|
||||
|
||||
```
|
||||
PATCH /api/secrets/{secretId}
|
||||
GET /api/companies/{companyId}/secret-providers/health
|
||||
```
|
||||
|
||||
Returns provider setup diagnostics, warnings, and local backup guidance. Health
|
||||
responses must not include secret values or provider credentials.
|
||||
|
||||
For `aws_secrets_manager`, an unready health response names the missing
|
||||
non-secret provider environment variables, the AWS SDK default credential source
|
||||
expected by the server runtime, and the custody rule that AWS bootstrap
|
||||
credentials must not be stored in Paperclip `company_secrets`.
|
||||
|
||||
The equivalent CLI check is:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets doctor --company-id {companyId}
|
||||
```
|
||||
|
||||
## Provider Vaults
|
||||
|
||||
Provider vaults are named, company-scoped configurations that route secret
|
||||
material to one of the supported provider backends. See the
|
||||
[secrets deploy guide](/deploy/secrets#provider-vaults) for the operator model
|
||||
and custody rules.
|
||||
|
||||
All routes below require board auth and company access. Mutating routes emit
|
||||
`secret_provider_config.*` activity-log entries. No route in this surface
|
||||
returns provider credential values; submitting credential-shaped fields in
|
||||
`config` is rejected at validation time.
|
||||
|
||||
### List Vaults
|
||||
|
||||
```
|
||||
GET /api/companies/{companyId}/secret-provider-configs
|
||||
```
|
||||
|
||||
Returns every vault for the company (including disabled rows for audit), each
|
||||
with id, provider, displayName, status, isDefault, non-sensitive `config`,
|
||||
latest health snapshot (`healthStatus`, `healthCheckedAt`, `healthMessage`,
|
||||
`healthDetails`), `disabledAt`, and audit columns.
|
||||
|
||||
### Create Vault
|
||||
|
||||
```
|
||||
POST /api/companies/{companyId}/secret-provider-configs
|
||||
{
|
||||
"provider": "aws_secrets_manager",
|
||||
"displayName": "Prod US-East",
|
||||
"isDefault": true,
|
||||
"config": {
|
||||
"region": "us-east-1",
|
||||
"namespace": "paperclip",
|
||||
"secretNamePrefix": "paperclip",
|
||||
"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/abcd-...",
|
||||
"environmentTag": "production"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Per-provider `config` shapes:
|
||||
|
||||
- `local_encrypted`: optional `backupReminderAcknowledged: boolean`.
|
||||
- `aws_secrets_manager`: required `region`; optional `namespace`,
|
||||
`secretNamePrefix`, `kmsKeyId`, `ownerTag`, `environmentTag`.
|
||||
- `gcp_secret_manager` (coming soon): optional `projectId`, `location`,
|
||||
`namespace`, `secretNamePrefix`.
|
||||
- `vault` (coming soon): optional origin-only HTTPS `address`, `namespace`,
|
||||
`mountPath`, `secretPathPrefix`. `address` values with embedded credentials,
|
||||
paths, query strings, or fragments are rejected.
|
||||
|
||||
`status` defaults to `ready` for `local_encrypted` and `aws_secrets_manager`,
|
||||
and to `coming_soon` for `gcp_secret_manager` and `vault`. Coming-soon and
|
||||
disabled vaults cannot be marked `isDefault`. Setting `isDefault: true` clears
|
||||
the previous default for the same provider in the same transaction.
|
||||
|
||||
### Get Vault
|
||||
|
||||
```
|
||||
GET /api/secret-provider-configs/{id}
|
||||
```
|
||||
|
||||
### Update Vault
|
||||
|
||||
```
|
||||
PATCH /api/secret-provider-configs/{id}
|
||||
{
|
||||
"displayName": "Prod US-East-2",
|
||||
"config": {
|
||||
"region": "us-east-2",
|
||||
"kmsKeyId": "arn:aws:kms:us-east-2:123456789012:key/abcd-..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`config` is replaced wholesale on update — pass the full provider config
|
||||
payload, not a partial diff. Status transitions for `gcp_secret_manager` and
|
||||
`vault` are constrained to `coming_soon` and `disabled` until their runtime
|
||||
modules ship.
|
||||
|
||||
### Disable Vault
|
||||
|
||||
```
|
||||
DELETE /api/secret-provider-configs/{id}
|
||||
```
|
||||
|
||||
Soft-deletes the vault: status flips to `disabled`, `isDefault` clears, and
|
||||
`disabledAt` is stamped. Disabled vaults remain in `GET` results for audit
|
||||
purposes but are no longer offered in the secret create/rotate flow.
|
||||
|
||||
### Set Default
|
||||
|
||||
```
|
||||
POST /api/secret-provider-configs/{id}/default
|
||||
```
|
||||
|
||||
Marks the target vault as the default for its provider family and clears the
|
||||
previous default. Returns 422 when the target is `coming_soon` or `disabled`.
|
||||
|
||||
### Run Health Check
|
||||
|
||||
```
|
||||
POST /api/secret-provider-configs/{id}/health
|
||||
```
|
||||
|
||||
Runs a provider-specific health probe and persists the result on the vault.
|
||||
Response shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"configId": "<uuid>",
|
||||
"provider": "aws_secrets_manager",
|
||||
"status": "ready" | "warning" | "error" | "coming_soon" | "disabled",
|
||||
"message": "Provider vault is ready to handle managed writes",
|
||||
"details": {
|
||||
"code": "provider_ready",
|
||||
"message": "...",
|
||||
"guidance": ["..."]
|
||||
},
|
||||
"checkedAt": "2026-05-06T14:00:00.000Z"
|
||||
}
|
||||
```
|
||||
|
||||
Health responses never include provider credentials or secret values. For AWS
|
||||
vaults, `details.guidance` may include missing non-secret env names and the
|
||||
expected AWS SDK credential source; coming-soon vaults always return
|
||||
`status: "coming_soon"` with `code: "runtime_locked"` and never call into
|
||||
provider modules.
|
||||
|
||||
### Selecting A Vault When Creating Or Rotating Secrets
|
||||
|
||||
`POST /api/companies/{companyId}/secrets` and
|
||||
`POST /api/secrets/{secretId}/rotate` both accept an optional
|
||||
`providerConfigId` field that pins the secret to a specific vault. When
|
||||
omitted (or null), the operation runs through the deployment-level provider
|
||||
configuration — the same path existing installs already use. The board UI
|
||||
preselects the company's default vault for the chosen provider before
|
||||
submitting, so callers should usually send an explicit `providerConfigId`.
|
||||
Coming-soon and disabled vaults are rejected with a 422; a vault that does not
|
||||
match the secret's provider is rejected the same way.
|
||||
|
||||
```json
|
||||
POST /api/companies/{companyId}/secrets
|
||||
{
|
||||
"name": "prod-stripe-key",
|
||||
"provider": "aws_secrets_manager",
|
||||
"providerConfigId": "<vault-uuid>",
|
||||
"managedMode": "external_reference",
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:paperclip/prod/stripe"
|
||||
}
|
||||
```
|
||||
|
||||
### Response Redaction Rules
|
||||
|
||||
Every route in this surface enforces the same redaction contract:
|
||||
|
||||
- Secret values are never returned. The board UI never has a "reveal value"
|
||||
affordance; resolution happens server-side at runtime under a binding.
|
||||
- Provider credential values are never accepted, stored, returned, logged, or
|
||||
echoed in error messages. Submitting credential-shaped fields fails
|
||||
validation with a non-leaking error.
|
||||
- Activity log entries record vault id, provider, displayName, status, and
|
||||
isDefault transitions — never `config` payloads or health detail bodies.
|
||||
|
||||
## Remote Import From AWS Secrets Manager
|
||||
|
||||
Remote import links existing AWS Secrets Manager entries into Paperclip as
|
||||
`external_reference` secrets. Import stores provider reference metadata only; it
|
||||
does not copy the remote secret plaintext into Paperclip.
|
||||
|
||||
The routes are board-only and company-scoped. `providerConfigId` must point to
|
||||
a same-company AWS provider vault with status `ready` or `warning`. Disabled,
|
||||
coming-soon, non-AWS, and cross-company vaults are rejected. Imported secrets
|
||||
resolve later through the selected vault, so runtime reads still need
|
||||
`secretsmanager:GetSecretValue` and any required KMS decrypt permission on the
|
||||
selected external secret.
|
||||
|
||||
### Preview Remote Import Candidates
|
||||
|
||||
```
|
||||
POST /api/companies/{companyId}/secrets/remote-import/preview
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"query": "stripe",
|
||||
"nextToken": "opaque-provider-token",
|
||||
"pageSize": 50
|
||||
}
|
||||
```
|
||||
|
||||
`query` is optional and is passed to AWS Secrets Manager inventory filtering.
|
||||
Treat it as non-secret metadata because AWS may record list request parameters
|
||||
in CloudTrail. `nextToken` is an opaque AWS cursor; callers must pass it back
|
||||
unchanged and must not synthesize offsets. `pageSize` is optional, defaults to
|
||||
50 in the UI, and is capped at 100.
|
||||
|
||||
Preview uses AWS `ListSecrets` only. It must not call `GetSecretValue` or
|
||||
`BatchGetSecretValue`, must not request `SecretString`, and must not require KMS
|
||||
decrypt. The response contains sanitized metadata for display and conflict
|
||||
decisions:
|
||||
|
||||
```json
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"provider": "aws_secrets_manager",
|
||||
"nextToken": null,
|
||||
"candidates": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"remoteName": "prod/stripe",
|
||||
"name": "prod/stripe",
|
||||
"key": "prod-stripe",
|
||||
"providerVersionRef": null,
|
||||
"providerMetadata": {
|
||||
"createdDate": "2026-05-06T00:00:00.000Z",
|
||||
"lastChangedDate": "2026-05-06T00:00:00.000Z",
|
||||
"hasDescription": true,
|
||||
"hasKmsKey": true,
|
||||
"tagCount": 3
|
||||
},
|
||||
"status": "ready",
|
||||
"importable": true,
|
||||
"conflicts": []
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Candidate statuses:
|
||||
|
||||
- `ready`: the row can be selected for import.
|
||||
- `duplicate`: a Paperclip secret already links the same canonical provider
|
||||
reference for the same provider vault.
|
||||
- `conflict`: the row has a name/key collision or provider guardrail failure.
|
||||
|
||||
Conflict types are `exact_reference`, `name`, `key`, and
|
||||
`provider_guardrail`. AWS refs under Paperclip's own managed namespace are
|
||||
blocked as external references; use the Paperclip-managed secret flow for those
|
||||
resources instead.
|
||||
|
||||
### Import Selected Remote References
|
||||
|
||||
```
|
||||
POST /api/companies/{companyId}/secrets/remote-import
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"secrets": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"name": "Stripe production key",
|
||||
"key": "stripe-production-key",
|
||||
"description": "Stripe key used by production checkout",
|
||||
"providerVersionRef": null,
|
||||
"providerMetadata": {
|
||||
"createdDate": "2026-05-06T00:00:00.000Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `secrets` array accepts 1-100 rows. Each row may override the suggested
|
||||
Paperclip `name`, `key`, optional Paperclip `description`,
|
||||
`providerVersionRef`, and sanitized `providerMetadata`. Blank descriptions are
|
||||
stored as `null`; AWS provider descriptions are not copied into Paperclip
|
||||
descriptions. The backend re-checks duplicate refs and name/key conflicts at
|
||||
submit time; a stale preview does not bypass those checks.
|
||||
|
||||
The import response is row-level:
|
||||
|
||||
```json
|
||||
{
|
||||
"providerConfigId": "<aws-vault-uuid>",
|
||||
"provider": "aws_secrets_manager",
|
||||
"importedCount": 1,
|
||||
"skippedCount": 1,
|
||||
"errorCount": 0,
|
||||
"results": [
|
||||
{
|
||||
"externalRef": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prod/stripe",
|
||||
"name": "Stripe production key",
|
||||
"key": "stripe-production-key",
|
||||
"status": "imported",
|
||||
"reason": null,
|
||||
"secretId": "<paperclip-secret-id>",
|
||||
"conflicts": []
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Row statuses:
|
||||
|
||||
- `imported`: Paperclip created an active `external_reference` secret and one
|
||||
metadata-only version row.
|
||||
- `skipped`: the row had an exact-reference duplicate or name/key conflict.
|
||||
- `error`: the provider rejected the reference or the row failed validation.
|
||||
|
||||
Activity logs for preview/import store aggregate counts, provider id, and vault
|
||||
id only. They must not store remote secret names, ARNs, descriptions, tags,
|
||||
plaintext values, provider credentials, or raw AWS error blobs.
|
||||
|
||||
## Rotate Secret
|
||||
|
||||
```
|
||||
POST /api/secrets/{secretId}/rotate
|
||||
{
|
||||
"value": "sk-ant-new-value..."
|
||||
}
|
||||
```
|
||||
|
||||
Creates a new version of the secret. Agents referencing `"version": "latest"` automatically get the new value on next heartbeat.
|
||||
Creates a new version of the secret. Agents referencing `"version": "latest"`
|
||||
automatically get the new value on next heartbeat. Pin to a specific version
|
||||
when a bad `latest` rollout would affect many agents at once.
|
||||
|
||||
## Using Secrets in Agent Config
|
||||
|
||||
@@ -52,4 +393,20 @@ Reference secrets in agent adapter config instead of inline values:
|
||||
}
|
||||
```
|
||||
|
||||
The server resolves and decrypts secret references at runtime, injecting the real value into the agent process environment.
|
||||
The server resolves and decrypts secret references at runtime, injecting the
|
||||
real value into the agent process environment. Paperclip's custody guarantees
|
||||
end at injection: the agent process can read, log, or forward the value, so
|
||||
treat any secret bound to an agent as exposed to that agent. See the custody
|
||||
boundaries note in the [secrets deploy guide](/deploy/secrets#custody-boundaries).
|
||||
|
||||
## Portability
|
||||
|
||||
Company export/import APIs represent agent and project environment requirements
|
||||
as declarations in the package manifest. Exports omit secret values, secret IDs,
|
||||
provider references, and encrypted provider material. Use:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets declarations --company-id {companyId}
|
||||
```
|
||||
|
||||
to inspect the declarations that an export would emit before moving a package.
|
||||
|
||||
BIN
docs/assets/pr-5426/scheduled-retry-story-desktop.png
Normal file
|
After Width: | Height: | Size: 258 KiB |
BIN
docs/assets/pr-5426/scheduled-retry-story-mobile.png
Normal file
|
After Width: | Height: | Size: 321 KiB |
@@ -57,6 +57,16 @@ pnpm paperclipai context set --api-key-env-var-name PAPERCLIP_API_KEY
|
||||
export PAPERCLIP_API_KEY=...
|
||||
```
|
||||
|
||||
Secret operations are available under `paperclipai secrets`:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets declarations --company-id <company-id> --kind secret
|
||||
pnpm paperclipai secrets create --company-id <company-id> --name anthropic-api-key --value-env ANTHROPIC_API_KEY
|
||||
pnpm paperclipai secrets link --company-id <company-id> --name prod-stripe-key --provider aws_secrets_manager --external-ref <provider-ref>
|
||||
pnpm paperclipai secrets doctor --company-id <company-id>
|
||||
pnpm paperclipai secrets migrate-inline-env --company-id <company-id> --apply
|
||||
```
|
||||
|
||||
Context is stored at `~/.paperclip/context.json`.
|
||||
|
||||
## Command Categories
|
||||
|
||||
@@ -67,7 +67,8 @@ Validates:
|
||||
|
||||
- Server configuration
|
||||
- Database connectivity
|
||||
- Secrets adapter configuration
|
||||
- Secrets adapter configuration, including AWS Secrets Manager non-secret env
|
||||
config when selected
|
||||
- Storage configuration
|
||||
- Missing key files
|
||||
|
||||
@@ -81,6 +82,13 @@ pnpm paperclipai configure --section secrets
|
||||
pnpm paperclipai configure --section storage
|
||||
```
|
||||
|
||||
`--section secrets` updates the deployment-level provider used as the fallback
|
||||
for secrets that do not target a specific company vault. Per-company provider
|
||||
vaults (named instances, default vault selection, multiple vaults per provider,
|
||||
coming-soon GCP/Vault) live in the board UI under
|
||||
`Company Settings → Secrets → Provider vaults` and the
|
||||
`/api/companies/{companyId}/secret-provider-configs` API.
|
||||
|
||||
## `paperclipai env`
|
||||
|
||||
Show resolved environment configuration:
|
||||
|
||||
@@ -5,6 +5,52 @@ summary: Master key, encryption, and strict mode
|
||||
|
||||
Paperclip encrypts secrets at rest using a local master key. Agent environment variables that contain sensitive values (API keys, tokens) are stored as encrypted secret references.
|
||||
|
||||
## Custody Boundaries
|
||||
|
||||
Paperclip protects secret values up to the moment they are handed to an agent
|
||||
or workload:
|
||||
|
||||
- Storage: values are encrypted at rest by the active provider. The local
|
||||
provider keeps them encrypted with a key that never leaves the host.
|
||||
- Transport: values are decrypted server-side and injected into the agent
|
||||
process environment, SSH command env, sandbox driver, or HTTP request
|
||||
immediately before the call. Paperclip does not return decrypted values to
|
||||
the board UI.
|
||||
- Audit: each resolution records a non-sensitive event (secret id, version,
|
||||
provider id, consumer, outcome) without the value or provider credentials.
|
||||
|
||||
Once a value reaches the consuming process, Paperclip can no longer guarantee
|
||||
secrecy. The agent (or sandbox, or remote host) can read the value, write it to
|
||||
its own logs or transcript, or pass it to downstream tools. Treat any secret
|
||||
you bind to an agent as exposed to that agent. Limit blast radius with bindings
|
||||
(only bind what each agent needs), short-lived provider credentials where the
|
||||
provider supports them, and rotation when an agent transcript or downstream
|
||||
system might have captured a value.
|
||||
|
||||
## Using Secrets In Runs
|
||||
|
||||
Creating a company secret does not automatically create an environment variable.
|
||||
You use a secret by binding it into an agent, project, environment, or plugin
|
||||
configuration field that supports secret references.
|
||||
|
||||
For agent and project environment variables:
|
||||
|
||||
1. Create or link the secret in `Company Settings > Secrets`.
|
||||
2. Open the agent's `Environment variables` field, or the project's `Env`
|
||||
field.
|
||||
3. Add the environment variable key the process expects, such as `GH_TOKEN` or
|
||||
`OPENAI_API_KEY`.
|
||||
4. Set the row source to `Secret`, select the stored secret, and choose either
|
||||
`latest` or a pinned version.
|
||||
|
||||
At runtime, Paperclip resolves the selected secret server-side and injects the
|
||||
resolved value under the env key from the binding row. The stored secret name
|
||||
can be human-readable; the binding key is what the agent process receives.
|
||||
|
||||
Project env applies to every issue run in that project. When a project env key
|
||||
matches an agent env key, the project value wins before Paperclip injects its
|
||||
own `PAPERCLIP_*` runtime variables.
|
||||
|
||||
## Default Provider: `local_encrypted`
|
||||
|
||||
Secrets are encrypted with a local master key stored at:
|
||||
@@ -14,6 +60,13 @@ Secrets are encrypted with a local master key stored at:
|
||||
```
|
||||
|
||||
This key is auto-created during onboarding. The key never leaves your machine.
|
||||
Paperclip best-effort enforces `0600` permissions when it creates or loads the
|
||||
key file. `paperclipai doctor` and the provider health API warn when the file is
|
||||
readable by group or other users.
|
||||
|
||||
Back up the key file together with database backups. A database backup without
|
||||
the key cannot decrypt local secrets, and a key backup without the database
|
||||
metadata is not enough to restore named secret versions.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -35,6 +88,7 @@ Validate secrets config:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai doctor
|
||||
pnpm paperclipai secrets doctor --company-id <company-id>
|
||||
```
|
||||
|
||||
### Environment Overrides
|
||||
@@ -55,15 +109,279 @@ PAPERCLIP_SECRETS_STRICT_MODE=true
|
||||
|
||||
Recommended for any deployment beyond local trusted.
|
||||
|
||||
Authenticated deployments default strict mode on unless explicitly overridden by
|
||||
configuration or `PAPERCLIP_SECRETS_STRICT_MODE=false`.
|
||||
|
||||
## External References
|
||||
|
||||
Provider-owned secrets can be linked without copying values into Paperclip by
|
||||
using `managedMode: "external_reference"` plus a provider `externalRef`.
|
||||
Paperclip stores metadata and a non-sensitive fingerprint, never the value.
|
||||
Runtime resolution remains server-side and binding-enforced.
|
||||
|
||||
The built-in AWS, GCP, and Vault provider IDs currently accept external
|
||||
reference metadata, but runtime resolution requires provider configuration in the
|
||||
deployment. Their provider health check reports this as a warning until
|
||||
configured.
|
||||
|
||||
For hosted Paperclip Cloud on AWS, see the AWS Secrets Manager operational
|
||||
contract — required env vars, IAM/KMS scoping, naming and tag conventions, and
|
||||
backup/rotation/incident runbooks — in `doc/SECRETS-AWS-PROVIDER.md`.
|
||||
|
||||
## Provider Vaults
|
||||
|
||||
A *provider vault* is a named, company-scoped configuration that points secret
|
||||
material at one of the supported provider backends. Each company can configure
|
||||
multiple vaults, including more than one vault per provider family, and pick a
|
||||
default vault per family for new secret operations. Existing secrets created
|
||||
before any vault was configured continue to resolve through the deployment-level
|
||||
default provider — no migration is required.
|
||||
|
||||
### Where to configure
|
||||
|
||||
Open `Company Settings → Secrets` in the board UI and switch to the
|
||||
`Provider vaults` tab. From there you can:
|
||||
|
||||
- Create a vault for any supported provider family.
|
||||
- Edit the non-secret config of an existing vault.
|
||||
- Set one ready vault per provider family as the company default.
|
||||
- Disable a vault (a soft delete that keeps audit history).
|
||||
- Run a health check against a vault and read the latest result inline.
|
||||
|
||||
The same operations are exposed under
|
||||
`/api/companies/{companyId}/secret-provider-configs` for automation. See the
|
||||
[secrets API reference](/api/secrets#provider-vaults) for the full route table.
|
||||
|
||||
### Custody Of Provider Credentials
|
||||
|
||||
Provider vaults intentionally store only **non-sensitive** configuration:
|
||||
region, project id, namespace, prefix, KMS key id, mount path, address, and
|
||||
similar routing metadata. The API, UI, and activity log never accept, return,
|
||||
or display provider credential values. Submitting fields with names like
|
||||
`accessKeyId`, `secretAccessKey`, `token`, `password`, `serviceAccountJson`,
|
||||
`privateKey`, `keyFile`, `unsealKey`, or any common credential alias is rejected
|
||||
at validation time.
|
||||
|
||||
That keeps the bootstrap rule from the AWS provider applicable to every
|
||||
provider family: **provider credentials live in deployment infrastructure
|
||||
identity, not in Paperclip company secrets**. Allowed credential sources are
|
||||
workload identity attached to the Paperclip server (instance profile, IRSA, ECS
|
||||
task role), `AWS_PROFILE` / SSO / shared config for local runs, an orchestrator
|
||||
secret store that boots the server, or short-lived shell credentials for local
|
||||
development. Do not paste long-lived API keys into the vault config.
|
||||
|
||||
### Vault Status
|
||||
|
||||
Each vault carries a status that drives what the runtime can do with it:
|
||||
|
||||
| Status | Meaning |
|
||||
|---------------|-----------------------------------------------------------------------------------------------|
|
||||
| `ready` | Selectable for create/rotate/resolve. Eligible to be the default. |
|
||||
| `warning` | Saved config exists but health needs attention (for example missing AWS env). Still selectable. |
|
||||
| `coming_soon` | Visible and editable as draft metadata, but locked out of all runtime operations. |
|
||||
| `disabled` | Soft-deleted. Hidden from the secret create/rotate flow. |
|
||||
|
||||
`gcp_secret_manager` and `vault` are pinned to `coming_soon` until their
|
||||
runtime modules ship. The settings UI lets you save draft configuration for
|
||||
those providers (and surfaces them on the vault list), but secret create,
|
||||
rotate, and resolve calls that target a coming-soon vault fail with a clear
|
||||
runtime-locked error.
|
||||
|
||||
### Default Vault Behavior
|
||||
|
||||
A company can mark **one** ready (or warning) vault per provider family as the
|
||||
default. The secret create and rotate dialogs preselect the default vault for
|
||||
the chosen provider so operators don't have to remember which vault to pick.
|
||||
Coming-soon and disabled vaults cannot be marked default; attempting to do so
|
||||
returns a validation error. Setting a new default automatically clears the
|
||||
previous default for that provider.
|
||||
|
||||
If a secret is created without any `providerConfigId` (no vaults exist yet, or
|
||||
the operator clears the selector), runtime resolution falls back to the
|
||||
deployment-level provider configuration — the same path existing installs use.
|
||||
This keeps secrets created before any provider vault was configured working
|
||||
without migration. Picking the default in the UI is an explicit selection, not
|
||||
a runtime fallback: the create call still sends an explicit `providerConfigId`.
|
||||
|
||||
### Multiple Vaults Per Provider
|
||||
|
||||
Multiple vaults from the same provider family are first-class. Common patterns:
|
||||
|
||||
- Two AWS vaults pointing at different regions or KMS keys for environment
|
||||
separation.
|
||||
- A staging Vault address alongside a production address.
|
||||
- A dedicated GCP project for a single product line while the rest of the
|
||||
company uses another.
|
||||
|
||||
Each vault has its own display name, status, default flag, and health record.
|
||||
Operators choose the vault explicitly when creating or rotating a secret; the
|
||||
default vault is preselected to avoid accidental routing to the wrong account.
|
||||
|
||||
### Per-Vault Health Checks
|
||||
|
||||
`POST /api/secret-provider-configs/{id}/health` runs a provider-specific health
|
||||
probe and stores the result on the vault row. The settings UI exposes the same
|
||||
action and renders the result inline. Health responses include a status,
|
||||
operator-facing message, and structured guidance (such as missing env var
|
||||
names, expected credential sources, and backup reminders). They never include
|
||||
provider credentials or secret values. Coming-soon vaults always return a
|
||||
`runtime_locked` health code and never call into provider modules.
|
||||
|
||||
### Provider-Specific Notes
|
||||
|
||||
**Local encrypted vaults** wrap the existing `local_encrypted` provider. The
|
||||
master key path and rotation guidance described above still applies. A local
|
||||
vault config is mostly bookkeeping plus an explicit acknowledgement that the
|
||||
key file is backed up alongside the database.
|
||||
|
||||
**AWS Secrets Manager vaults** read the per-vault `region`, `namespace`,
|
||||
`secretNamePrefix`, `kmsKeyId`, `ownerTag`, and `environmentTag` to route
|
||||
managed writes and external-reference reads. The vault config supplements (and
|
||||
can override) the deployment-level `PAPERCLIP_SECRETS_AWS_*` env. Bootstrap
|
||||
credentials still come from the AWS SDK default credential chain — see
|
||||
`doc/SECRETS-AWS-PROVIDER.md` for the full IAM and KMS contract.
|
||||
|
||||
**GCP Secret Manager** and **HashiCorp Vault** vaults are coming soon. You can
|
||||
save draft `projectId`, `location`, `namespace`, `address`, and `mountPath`
|
||||
metadata so the company is ready to flip them on when the provider modules
|
||||
ship. Vault `address` values must be origin-only `http(s)://host[:port]` URLs;
|
||||
addresses with embedded credentials, paths, query strings, or fragments are
|
||||
rejected.
|
||||
|
||||
### Remote Import From AWS Vaults
|
||||
|
||||
AWS provider vaults can import existing AWS Secrets Manager entries as
|
||||
Paperclip `external_reference` secrets. This is a metadata-only link: Paperclip
|
||||
stores the AWS ARN/path, a fingerprint/version reference, and binding metadata.
|
||||
It does not read, copy, store, log, or display the remote plaintext secret
|
||||
value during preview or import.
|
||||
|
||||
Operator flow in the board UI:
|
||||
|
||||
1. Open `Company Settings -> Secrets`.
|
||||
2. Confirm at least one AWS provider vault is `ready` or `warning`.
|
||||
3. In the `Secrets` tab, choose `Import from vault`.
|
||||
4. Select an AWS vault, search the remote inventory, and load more pages as
|
||||
needed.
|
||||
5. Check the rows to import, review/edit the Paperclip name and key, then
|
||||
submit.
|
||||
6. Review the result summary for created, skipped, and failed rows.
|
||||
|
||||
The preview list is intentionally paged and search-first. AWS accounts can have
|
||||
large per-Region inventories, and `ListSecrets` returns opaque `NextToken`
|
||||
cursors. Do not expect Paperclip to crawl a whole account in the background;
|
||||
load pages deliberately and retry throttled requests with backoff.
|
||||
|
||||
Remote import exposes AWS secret metadata visible to the Paperclip runtime
|
||||
role, including names/ARNs and safe derived fields such as dates, whether a
|
||||
description or KMS key exists, and tag count. Treat names, ARNs, tags, and
|
||||
search text as operational metadata that may be sensitive. The API and activity
|
||||
log must not store raw descriptions, tags, plaintext values, provider
|
||||
credentials, or raw AWS error blobs.
|
||||
|
||||
Required AWS posture:
|
||||
|
||||
- Preview needs optional `secretsmanager:ListSecrets` permission on
|
||||
`Resource: "*"`. AWS does not support constraining `ListSecrets` to
|
||||
individual secret ARNs or tags as an IAM boundary.
|
||||
- Preview/import must not call `secretsmanager:GetSecretValue`,
|
||||
`secretsmanager:BatchGetSecretValue`, or KMS decrypt.
|
||||
- Runtime resolution of an imported reference still needs
|
||||
`secretsmanager:GetSecretValue` on the selected external ARN/path and KMS
|
||||
decrypt when that secret uses a customer-managed key.
|
||||
- Keep managed create/rotate/delete permissions scoped to the Paperclip
|
||||
deployment prefix. Do not broaden managed write/delete permissions just
|
||||
because import inventory is enabled.
|
||||
|
||||
Safe scoping comes from deployment posture rather than AWS list filtering:
|
||||
dedicated Paperclip runtime roles per environment/account, AWS vaults pointed at
|
||||
the intended account and Region, import-enabled roles only where inventory
|
||||
exposure is acceptable, and board-only access to the import routes. Tags and
|
||||
name filters are search aids, not a permission model.
|
||||
|
||||
If import preview fails:
|
||||
|
||||
- `AccessDenied` or `not authorized`: the runtime role is missing
|
||||
`secretsmanager:ListSecrets`; add the optional inventory statement only if
|
||||
remote import should be enabled for that vault.
|
||||
- Throttling: retry after a short delay and narrow the search before loading
|
||||
more pages.
|
||||
- Invalid cursor: refresh the preview; AWS `NextToken` values are opaque and
|
||||
can expire or become stale.
|
||||
- Runtime resolution failure after import: verify `GetSecretValue` and KMS
|
||||
decrypt scope for the selected external secret. Being visible in inventory is
|
||||
not proof that the runtime role can read the value.
|
||||
|
||||
### Backup And Restore
|
||||
|
||||
Each provider family has a different backup story:
|
||||
|
||||
- `local_encrypted`: back up the local master key file and the Paperclip
|
||||
database together. Either alone is not enough to restore the encrypted
|
||||
values, and the vault row only records the path and acknowledgement, not the
|
||||
key bytes.
|
||||
- `aws_secrets_manager`: back up Paperclip's database for vault metadata
|
||||
(vault id, region, prefix, KMS key id, default flag, bindings, version
|
||||
pointers). The actual secret values live in AWS Secrets Manager under the
|
||||
configured prefix; restore by pointing the same Paperclip company at the
|
||||
same AWS namespace and confirming the runtime role still has
|
||||
`GetSecretValue` plus KMS decrypt. The full restore checklist lives in
|
||||
`doc/SECRETS-AWS-PROVIDER.md`.
|
||||
- `gcp_secret_manager` and `vault`: while these are coming soon, only the
|
||||
draft vault config exists in Paperclip. Database backups capture it. There
|
||||
is nothing to restore on the provider side until runtime support lands.
|
||||
|
||||
### AWS Provider Bootstrap Boundary
|
||||
|
||||
The AWS Secrets Manager provider cannot bootstrap itself from Paperclip
|
||||
`company_secrets`. Its initial AWS access must be present before the server can
|
||||
create or resolve AWS-backed company secrets, regardless of whether you use the
|
||||
deployment-level default or a per-company vault.
|
||||
|
||||
For Paperclip Cloud, provision the server runtime IAM role/workload identity,
|
||||
KMS key, deployment prefix, and non-secret `PAPERCLIP_SECRETS_AWS_*` environment
|
||||
configuration before enabling AWS-backed secrets in the board UI. For
|
||||
self-hosted and local runs, use the AWS SDK default credential chain: instance
|
||||
profile, ECS task role, EKS IRSA/OIDC web identity, AWS SSO/shared config via
|
||||
`AWS_PROFILE`, or short-lived shell credentials for local development.
|
||||
|
||||
Do not store AWS root credentials or long-lived IAM user access keys in
|
||||
Paperclip secrets. Bootstrap material belongs in infrastructure IAM/workload
|
||||
identity, the process environment, an AWS profile, or the orchestrator secret
|
||||
store.
|
||||
|
||||
## Migrating Inline Secrets
|
||||
|
||||
If you have existing agents with inline API keys in their config, migrate them to encrypted secret refs:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets migrate-inline-env --company-id <company-id>
|
||||
pnpm paperclipai secrets migrate-inline-env --company-id <company-id> --apply
|
||||
|
||||
# low-level script for direct database maintenance
|
||||
pnpm secrets:migrate-inline-env # dry run
|
||||
pnpm secrets:migrate-inline-env --apply # apply migration
|
||||
```
|
||||
|
||||
Use the CLI command for normal operations because it goes through the Paperclip
|
||||
API, creates or rotates secret records, and updates agent env bindings with
|
||||
audit logging.
|
||||
|
||||
## Portable Declarations
|
||||
|
||||
Company exports include only environment declarations. They do not include
|
||||
secret IDs, provider references, encrypted material, or plaintext values.
|
||||
|
||||
```sh
|
||||
pnpm paperclipai secrets declarations --company-id <company-id> --kind secret
|
||||
```
|
||||
|
||||
Before importing a package into another instance, use those declarations to
|
||||
create local values or link hosted provider references in the target deployment.
|
||||
For hosted providers such as AWS Secrets Manager, the hosted provider remains
|
||||
the value custodian; Paperclip stores metadata and provider version references,
|
||||
not provider credentials or plaintext secret values.
|
||||
|
||||
## Secret References in Agent Config
|
||||
|
||||
Agent environment variables use secret references:
|
||||
|
||||
BIN
docs/pr-screenshots/pr-5428/assigned-backlog-dark.png
Normal file
|
After Width: | Height: | Size: 118 KiB |
BIN
docs/pr-screenshots/pr-5428/assigned-backlog-light.png
Normal file
|
After Width: | Height: | Size: 118 KiB |
@@ -61,7 +61,7 @@ describe("command managed runtime", () => {
|
||||
if (
|
||||
input.stdin != null &&
|
||||
(input.command === "sh" || input.command === "bash") &&
|
||||
args[0] === "-lc" &&
|
||||
(args[0] === "-c" || args[0] === "-lc") &&
|
||||
typeof args[1] === "string"
|
||||
) {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
@@ -131,4 +131,90 @@ describe("command managed runtime", () => {
|
||||
.toMatchObject({ code: "ENOENT" });
|
||||
expect(calls.every((call) => call.stdin == null)).toBe(true);
|
||||
});
|
||||
|
||||
it("runs setup commands from a stable root cwd when staging into a nested remote workspace dir", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-command-runtime-nested-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const localWorkspaceDir = path.join(rootDir, "local-workspace");
|
||||
const remoteBaseDir = path.join(rootDir, "remote-base");
|
||||
const remoteWorkspaceDir = path.join(remoteBaseDir, ".paperclip-runtime", "runs", "test", "workspace");
|
||||
await mkdir(localWorkspaceDir, { recursive: true });
|
||||
await mkdir(remoteBaseDir, { recursive: true });
|
||||
await writeFile(path.join(localWorkspaceDir, "README.md"), "local workspace\n", "utf8");
|
||||
|
||||
const calls: Array<{
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
}> = [];
|
||||
const runner = {
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
}): Promise<RunProcessResult> => {
|
||||
calls.push({ ...input });
|
||||
const startedAt = new Date().toISOString();
|
||||
try {
|
||||
const result = await execFile(input.command === "sh" ? "/bin/sh" : input.command, input.args ?? [], {
|
||||
cwd: input.cwd,
|
||||
env: {
|
||||
...process.env,
|
||||
...input.env,
|
||||
},
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
timeout: input.timeoutMs,
|
||||
});
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const err = error as NodeJS.ErrnoException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
code?: string | number | null;
|
||||
signal?: NodeJS.Signals | null;
|
||||
killed?: boolean;
|
||||
};
|
||||
return {
|
||||
exitCode: typeof err.code === "number" ? err.code : null,
|
||||
signal: err.signal ?? null,
|
||||
timedOut: Boolean(err.killed && input.timeoutMs),
|
||||
stdout: err.stdout ?? "",
|
||||
stderr: err.stderr ?? "",
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
await prepareCommandManagedRuntime({
|
||||
runner,
|
||||
spec: {
|
||||
remoteCwd: remoteBaseDir,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: localWorkspaceDir,
|
||||
workspaceRemoteDir: remoteWorkspaceDir,
|
||||
});
|
||||
|
||||
expect(calls.length).toBeGreaterThan(0);
|
||||
expect(calls.every((call) => call.cwd === "/")).toBe(true);
|
||||
await expect(readFile(path.join(remoteWorkspaceDir, "README.md"), "utf8")).resolves.toBe("local workspace\n");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
type SandboxManagedRuntimeClient,
|
||||
type SandboxRemoteExecutionSpec,
|
||||
} from "./sandbox-managed-runtime.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
import { preferredShellForSandbox, shellCommandArgs } from "./sandbox-shell.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
export interface CommandManagedRuntimeRunner {
|
||||
@@ -57,7 +57,7 @@ function requireSuccessfulResult(result: RunProcessResult, action: string): void
|
||||
|
||||
export function createCommandManagedRuntimeClient(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
commandCwd: string;
|
||||
timeoutMs: number;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
}): SandboxManagedRuntimeClient {
|
||||
@@ -65,8 +65,8 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
const runShell = async (script: string, opts: { stdin?: string; timeoutMs?: number } = {}) => {
|
||||
const result = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", script],
|
||||
cwd: input.remoteCwd,
|
||||
args: shellCommandArgs(script),
|
||||
cwd: input.commandCwd,
|
||||
stdin: opts.stdin,
|
||||
timeoutMs: opts.timeoutMs ?? input.timeoutMs,
|
||||
});
|
||||
@@ -116,8 +116,8 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
remove: async (remotePath) => {
|
||||
const result = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", `rm -rf ${shellQuote(remotePath)}`],
|
||||
cwd: input.remoteCwd,
|
||||
args: shellCommandArgs(`rm -rf ${shellQuote(remotePath)}`),
|
||||
cwd: input.commandCwd,
|
||||
timeoutMs: input.timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, `remove ${remotePath}`);
|
||||
@@ -125,8 +125,8 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
run: async (command, options) => {
|
||||
const result = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", command],
|
||||
cwd: input.remoteCwd,
|
||||
args: shellCommandArgs(command),
|
||||
cwd: input.commandCwd,
|
||||
timeoutMs: options.timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult(result, command);
|
||||
@@ -149,6 +149,10 @@ export async function prepareCommandManagedRuntime(input: {
|
||||
}): Promise<PreparedSandboxManagedRuntime> {
|
||||
const timeoutMs = input.spec.timeoutMs && input.spec.timeoutMs > 0 ? input.spec.timeoutMs : 300_000;
|
||||
const workspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
// Managed-runtime sync/restore scripts use absolute paths throughout, so
|
||||
// run them from a stable cwd. The target workspace itself may be removed or
|
||||
// recreated during a run, which breaks shell startup if we chdir into it.
|
||||
const commandCwd = "/";
|
||||
const runtimeSpec: SandboxRemoteExecutionSpec = {
|
||||
transport: "sandbox",
|
||||
provider: input.spec.providerKey ?? "sandbox",
|
||||
@@ -159,7 +163,7 @@ export async function prepareCommandManagedRuntime(input: {
|
||||
};
|
||||
const client = createCommandManagedRuntimeClient({
|
||||
runner: input.runner,
|
||||
remoteCwd: workspaceRemoteDir,
|
||||
commandCwd,
|
||||
timeoutMs,
|
||||
shellCommand: input.spec.shellCommand,
|
||||
});
|
||||
@@ -175,8 +179,8 @@ export async function prepareCommandManagedRuntime(input: {
|
||||
if (detectCommand) {
|
||||
const probe = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", `command -v ${shellQuote(detectCommand)} >/dev/null 2>&1`],
|
||||
cwd: workspaceRemoteDir,
|
||||
args: shellCommandArgs(`command -v ${shellQuote(detectCommand)} >/dev/null 2>&1`),
|
||||
cwd: commandCwd,
|
||||
timeoutMs,
|
||||
});
|
||||
if (!probe.timedOut && (probe.exitCode ?? 1) === 0) {
|
||||
@@ -194,8 +198,8 @@ export async function prepareCommandManagedRuntime(input: {
|
||||
}
|
||||
const result = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", installCommand],
|
||||
cwd: workspaceRemoteDir,
|
||||
args: shellCommandArgs(installCommand),
|
||||
cwd: commandCwd,
|
||||
timeoutMs,
|
||||
});
|
||||
// A failed install is not always fatal: the CLI may already be on PATH
|
||||
|
||||
@@ -5,9 +5,12 @@ import path from "node:path";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
import {
|
||||
DEFAULT_REMOTE_SANDBOX_ADAPTER_TIMEOUT_SEC,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetToRemoteSpec,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
resolveAdapterExecutionTargetTimeoutSec,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
@@ -109,6 +112,89 @@ describe("sandbox adapter execution targets", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("applies the remote sandbox fallback when adapter timeoutSec is unset", () => {
|
||||
const sandboxTarget: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
remoteCwd: "/workspace",
|
||||
runner: createLocalSandboxRunner(),
|
||||
};
|
||||
|
||||
expect(resolveAdapterExecutionTargetTimeoutSec(sandboxTarget, 0)).toBe(
|
||||
DEFAULT_REMOTE_SANDBOX_ADAPTER_TIMEOUT_SEC,
|
||||
);
|
||||
expect(resolveAdapterExecutionTargetTimeoutSec(sandboxTarget, 90)).toBe(90);
|
||||
expect(resolveAdapterExecutionTargetTimeoutSec({
|
||||
kind: "remote",
|
||||
transport: "ssh",
|
||||
remoteCwd: "/workspace",
|
||||
spec: {
|
||||
host: "127.0.0.1",
|
||||
port: 22,
|
||||
username: "fixture",
|
||||
remoteWorkspacePath: "/workspace",
|
||||
remoteCwd: "/workspace",
|
||||
privateKey: "KEY",
|
||||
knownHosts: "host key",
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
}, 0)).toBe(0);
|
||||
});
|
||||
|
||||
it("uses the caller timeout override when installing a missing sandbox command", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn()
|
||||
.mockResolvedValueOnce({
|
||||
exitCode: 1,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "/usr/bin/opencode\n",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
}),
|
||||
};
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
remoteCwd: "/workspace",
|
||||
timeoutMs: 300_000,
|
||||
runner,
|
||||
};
|
||||
|
||||
await ensureAdapterExecutionTargetCommandResolvable(
|
||||
"opencode",
|
||||
target,
|
||||
"/local/workspace",
|
||||
{},
|
||||
{ installCommand: "npm install -g opencode", timeoutSec: 1800 },
|
||||
);
|
||||
|
||||
expect(runner.execute).toHaveBeenNthCalledWith(2, expect.objectContaining({
|
||||
command: "sh",
|
||||
args: ["-c", "npm install -g opencode"],
|
||||
timeoutMs: 1_800_000,
|
||||
}));
|
||||
});
|
||||
|
||||
it("runs shell commands through the same runner", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
@@ -136,7 +222,7 @@ describe("sandbox adapter execution targets", () => {
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "sh",
|
||||
args: ["-lc", 'printf %s "$HOME"'],
|
||||
args: ["-c", 'printf %s "$HOME"'],
|
||||
cwd: "/workspace",
|
||||
timeoutMs: 7000,
|
||||
}));
|
||||
@@ -284,7 +370,7 @@ describe("sandbox adapter execution targets", () => {
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "bash",
|
||||
args: ["-lc", 'printf %s "$HOME"'],
|
||||
args: ["-c", 'printf %s "$HOME"'],
|
||||
cwd: "/workspace",
|
||||
timeoutMs: 7000,
|
||||
}));
|
||||
@@ -363,6 +449,60 @@ describe("sandbox adapter execution targets", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("uses the effective adapter timeout when starting the sandbox callback bridge", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-execution-target-bridge-timeout-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const remoteCwd = path.join(rootDir, "workspace");
|
||||
const runtimeRootDir = path.join(remoteCwd, ".paperclip-runtime", "codex");
|
||||
await mkdir(runtimeRootDir, { recursive: true });
|
||||
|
||||
const delegateRunner = createLocalSandboxRunner();
|
||||
const runner = {
|
||||
execute: vi.fn(async (input: Parameters<typeof delegateRunner.execute>[0]) => delegateRunner.execute(input)),
|
||||
};
|
||||
const apiServer = createServer((req, res) => {
|
||||
res.writeHead(200, { "content-type": "application/json" });
|
||||
res.end(JSON.stringify({ ok: true }));
|
||||
});
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
apiServer.once("error", reject);
|
||||
apiServer.listen(0, "127.0.0.1", () => resolve());
|
||||
});
|
||||
const address = apiServer.address();
|
||||
if (!address || typeof address === "string") {
|
||||
throw new Error("Expected the bridge timeout test API server to listen on a TCP port.");
|
||||
}
|
||||
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "cloudflare",
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd,
|
||||
runner,
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
|
||||
const bridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId: "run-bridge-timeout",
|
||||
target,
|
||||
runtimeRootDir,
|
||||
adapterKey: "codex",
|
||||
timeoutSec: DEFAULT_REMOTE_SANDBOX_ADAPTER_TIMEOUT_SEC,
|
||||
hostApiToken: "real-run-jwt",
|
||||
hostApiUrl: `http://127.0.0.1:${address.port}`,
|
||||
});
|
||||
try {
|
||||
expect(bridge).not.toBeNull();
|
||||
expect(runner.execute).toHaveBeenCalled();
|
||||
expect(runner.execute.mock.calls.some(([input]) => input.timeoutMs === 1_800_000)).toBe(true);
|
||||
} finally {
|
||||
await bridge?.stop();
|
||||
await new Promise<void>((resolve) => apiServer.close(() => resolve()));
|
||||
}
|
||||
});
|
||||
|
||||
it("fails oversized host responses with a 502 before returning them to the sandbox client", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-execution-target-bridge-limit-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
@@ -45,7 +45,7 @@ describe("runAdapterExecutionTargetShellCommand", () => {
|
||||
},
|
||||
);
|
||||
|
||||
// runSshCommand owns profile sourcing and the outer `sh -lc` wrapper —
|
||||
// runSshCommand owns profile sourcing and the outer shell wrapper —
|
||||
// the caller passes the raw command string. Wrapping it here would
|
||||
// double-nest the login shell and re-source profiles after the explicit
|
||||
// env override, silently undoing identity-var preservation.
|
||||
@@ -317,7 +317,7 @@ describe("ensureAdapterExecutionTargetRuntimeCommandInstalled", () => {
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "sh",
|
||||
args: ["-lc", "npm install -g @google/gemini-cli"],
|
||||
args: ["-c", "npm install -g @google/gemini-cli"],
|
||||
cwd: "/remote/workspace",
|
||||
env: { PATH: "/usr/bin" },
|
||||
timeoutMs: 30_000,
|
||||
|
||||
@@ -27,7 +27,7 @@ import {
|
||||
type TerminalResultCleanupOptions,
|
||||
} from "./server-utils.js";
|
||||
import { sanitizeRemoteExecutionEnv } from "./remote-execution-env.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
import { preferredShellForSandbox, shellCommandArgs } from "./sandbox-shell.js";
|
||||
|
||||
export interface AdapterLocalExecutionTarget {
|
||||
kind: "local";
|
||||
@@ -67,6 +67,7 @@ export type AdapterManagedRuntimeAsset = RemoteManagedRuntimeAsset;
|
||||
|
||||
export interface PreparedAdapterExecutionTargetRuntime {
|
||||
target: AdapterExecutionTarget;
|
||||
workspaceRemoteDir: string | null;
|
||||
runtimeRootDir: string | null;
|
||||
assetDirs: Record<string, string>;
|
||||
restoreWorkspace(): Promise<void>;
|
||||
@@ -98,6 +99,8 @@ export interface AdapterExecutionTargetPaperclipBridgeHandle {
|
||||
|
||||
export { sanitizeRemoteExecutionEnv } from "./remote-execution-env.js";
|
||||
|
||||
export const DEFAULT_REMOTE_SANDBOX_ADAPTER_TIMEOUT_SEC = 1_800;
|
||||
|
||||
function parseObject(value: unknown): Record<string, unknown> {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
@@ -167,6 +170,33 @@ export function adapterExecutionTargetRemoteCwd(
|
||||
return target?.kind === "remote" ? target.remoteCwd : localCwd;
|
||||
}
|
||||
|
||||
export function overrideAdapterExecutionTargetRemoteCwd(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
remoteCwd: string | null | undefined,
|
||||
): AdapterExecutionTarget | null | undefined {
|
||||
const nextRemoteCwd = remoteCwd?.trim();
|
||||
if (!target || target.kind !== "remote" || !nextRemoteCwd) {
|
||||
return target;
|
||||
}
|
||||
if (target.remoteCwd === nextRemoteCwd) {
|
||||
return target;
|
||||
}
|
||||
if (target.transport === "ssh") {
|
||||
return {
|
||||
...target,
|
||||
remoteCwd: nextRemoteCwd,
|
||||
spec: {
|
||||
...target.spec,
|
||||
remoteCwd: nextRemoteCwd,
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
...target,
|
||||
remoteCwd: nextRemoteCwd,
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveAdapterExecutionTargetCwd(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
configuredCwd: string | null | undefined,
|
||||
@@ -194,6 +224,26 @@ export function describeAdapterExecutionTarget(
|
||||
return `sandbox environment${target.providerKey ? ` (${target.providerKey})` : ""}`;
|
||||
}
|
||||
|
||||
export function resolveAdapterExecutionTargetTimeoutSec(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
configuredTimeoutSec: number | null | undefined,
|
||||
): number {
|
||||
const normalizedConfiguredTimeoutSec =
|
||||
typeof configuredTimeoutSec === "number" && Number.isFinite(configuredTimeoutSec) && configuredTimeoutSec > 0
|
||||
? Math.floor(configuredTimeoutSec)
|
||||
: 0;
|
||||
if (normalizedConfiguredTimeoutSec > 0) return normalizedConfiguredTimeoutSec;
|
||||
// Local and SSH adapters preserve the historical "0 means no adapter
|
||||
// timeout" behavior. Sandbox-backed runs execute through provider RPCs
|
||||
// that usually apply their own shorter command defaults, so request an
|
||||
// explicit longer timeout for full adapter runs when the adapter leaves
|
||||
// timeoutSec unset.
|
||||
if (target?.kind === "remote" && target.transport === "sandbox") {
|
||||
return DEFAULT_REMOTE_SANDBOX_ADAPTER_TIMEOUT_SEC;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function requireSandboxRunner(target: AdapterSandboxExecutionTarget): CommandManagedRuntimeRunner {
|
||||
if (target.runner) return target.runner;
|
||||
throw new Error(
|
||||
@@ -233,10 +283,15 @@ export async function ensureAdapterExecutionTargetCommandResolvable(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
cwd: string,
|
||||
env: NodeJS.ProcessEnv,
|
||||
options: { installCommand?: string | null } = {},
|
||||
options: { installCommand?: string | null; timeoutSec?: number | null } = {},
|
||||
) {
|
||||
if (target?.kind === "remote" && target.transport === "sandbox") {
|
||||
await ensureSandboxCommandResolvable(command, target, options.installCommand?.trim() || null);
|
||||
await ensureSandboxCommandResolvable(
|
||||
command,
|
||||
target,
|
||||
options.installCommand?.trim() || null,
|
||||
options.timeoutSec,
|
||||
);
|
||||
return;
|
||||
}
|
||||
await ensureCommandResolvable(command, cwd, env, {
|
||||
@@ -267,6 +322,7 @@ async function ensureSandboxCommandResolvable(
|
||||
command: string,
|
||||
target: AdapterSandboxExecutionTarget,
|
||||
installCommand: string | null,
|
||||
timeoutSec?: number | null,
|
||||
): Promise<void> {
|
||||
// Probe whether the binary is resolvable inside the sandbox. We previously
|
||||
// short-circuited this for sandbox targets, which let the caller report a
|
||||
@@ -288,12 +344,16 @@ async function ensureSandboxCommandResolvable(
|
||||
let installFailureDetail: string | null = null;
|
||||
if (installCommand) {
|
||||
const runner = requireSandboxRunner(target);
|
||||
const installTimeoutMs =
|
||||
typeof timeoutSec === "number" && Number.isFinite(timeoutSec) && timeoutSec > 0
|
||||
? Math.floor(timeoutSec * 1000)
|
||||
: target.timeoutMs ?? 300_000;
|
||||
try {
|
||||
const installResult = await runner.execute({
|
||||
command: "sh",
|
||||
args: ["-lc", installCommand],
|
||||
args: shellCommandArgs(installCommand),
|
||||
cwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs ?? 300_000,
|
||||
timeoutMs: installTimeoutMs,
|
||||
});
|
||||
if (installResult.timedOut) {
|
||||
installFailureDetail = `install command timed out: ${installCommand}`;
|
||||
@@ -389,8 +449,8 @@ export async function runAdapterExecutionTargetShellCommand(
|
||||
if (target.transport === "ssh") {
|
||||
try {
|
||||
// Pass the raw command — `runSshCommand` owns profile sourcing and
|
||||
// the outer `sh -lc` wrapper. Wrapping again here would nest a second
|
||||
// `sh -lc` after the explicit `env KEY=VAL` overrides, re-sourcing
|
||||
// the outer shell wrapper. Wrapping again here would nest a second
|
||||
// shell after the explicit `env KEY=VAL` overrides, re-sourcing
|
||||
// login profiles AFTER the override and silently undoing any
|
||||
// identity var (NVM_DIR / PATH / etc.) that a profile re-exports.
|
||||
const result = await runSshCommand(target.spec, command, {
|
||||
@@ -449,7 +509,7 @@ export async function runAdapterExecutionTargetShellCommand(
|
||||
const shellCommand = preferredSandboxShell(target);
|
||||
return await requireSandboxRunner(target).execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", command],
|
||||
args: shellCommandArgs(command),
|
||||
cwd: target.remoteCwd,
|
||||
env,
|
||||
timeoutMs: (options.timeoutSec ?? 15) * 1000,
|
||||
@@ -858,9 +918,12 @@ export function readAdapterExecutionTarget(input: {
|
||||
}
|
||||
|
||||
export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
adapterKey: string;
|
||||
workspaceLocalDir: string;
|
||||
timeoutSec?: number;
|
||||
workspaceRemoteDir?: string;
|
||||
workspaceExclude?: string[];
|
||||
preserveAbsentOnRestore?: string[];
|
||||
assets?: AdapterManagedRuntimeAsset[];
|
||||
@@ -872,6 +935,7 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
if (target.kind === "local") {
|
||||
return {
|
||||
target,
|
||||
workspaceRemoteDir: null,
|
||||
runtimeRootDir: null,
|
||||
assetDirs: {},
|
||||
restoreWorkspace: async () => {},
|
||||
@@ -881,12 +945,15 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
if (target.transport === "ssh") {
|
||||
const prepared = await prepareRemoteManagedRuntime({
|
||||
spec: target.spec,
|
||||
runId: input.runId,
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
workspaceRemoteDir: input.workspaceRemoteDir,
|
||||
assets: input.assets,
|
||||
});
|
||||
return {
|
||||
target,
|
||||
workspaceRemoteDir: prepared.workspaceRemoteDir,
|
||||
runtimeRootDir: prepared.runtimeRootDir,
|
||||
assetDirs: prepared.assetDirs,
|
||||
restoreWorkspace: prepared.restoreWorkspace,
|
||||
@@ -900,10 +967,14 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
shellCommand: target.shellCommand,
|
||||
leaseId: target.leaseId,
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs,
|
||||
timeoutMs:
|
||||
input.timeoutSec && input.timeoutSec > 0
|
||||
? input.timeoutSec * 1000
|
||||
: target.timeoutMs,
|
||||
},
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
workspaceRemoteDir: input.workspaceRemoteDir,
|
||||
workspaceExclude: input.workspaceExclude,
|
||||
preserveAbsentOnRestore: input.preserveAbsentOnRestore,
|
||||
assets: input.assets,
|
||||
@@ -912,6 +983,7 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
});
|
||||
return {
|
||||
target,
|
||||
workspaceRemoteDir: prepared.workspaceRemoteDir,
|
||||
runtimeRootDir: prepared.runtimeRootDir,
|
||||
assetDirs: prepared.assetDirs,
|
||||
restoreWorkspace: prepared.restoreWorkspace,
|
||||
@@ -981,6 +1053,7 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
runtimeRootDir: string | null | undefined;
|
||||
adapterKey: string;
|
||||
timeoutSec?: number | null;
|
||||
hostApiToken: string | null | undefined;
|
||||
hostApiUrl?: string | null;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
@@ -1019,6 +1092,10 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
resolveDefaultPaperclipApiUrl();
|
||||
const shellCommand = adapterExecutionTargetShellCommand(target);
|
||||
const runner = adapterExecutionTargetCommandRunner(target);
|
||||
const bridgeTimeoutMs =
|
||||
typeof input.timeoutSec === "number" && Number.isFinite(input.timeoutSec) && input.timeoutSec > 0
|
||||
? Math.trunc(input.timeoutSec * 1000)
|
||||
: adapterExecutionTargetTimeoutMs(target);
|
||||
|
||||
await onLog(
|
||||
"stdout",
|
||||
@@ -1032,7 +1109,7 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
const client = createCommandManagedSandboxCallbackBridgeQueueClient({
|
||||
runner,
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: adapterExecutionTargetTimeoutMs(target),
|
||||
timeoutMs: bridgeTimeoutMs,
|
||||
shellCommand,
|
||||
});
|
||||
// PAPERCLIP_BRIDGE_DEBUG opts into verbose stdout logs of every bridge
|
||||
@@ -1087,7 +1164,7 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
bridgeAsset,
|
||||
timeoutMs: adapterExecutionTargetTimeoutMs(target),
|
||||
timeoutMs: bridgeTimeoutMs,
|
||||
maxBodyBytes,
|
||||
shellCommand,
|
||||
});
|
||||
|
||||
@@ -60,6 +60,7 @@ export {
|
||||
REDACTED_COMMAND_TEXT_VALUE,
|
||||
redactCommandText,
|
||||
} from "./command-redaction.js";
|
||||
export { buildSandboxNpmInstallCommand } from "./sandbox-install-command.js";
|
||||
export { inferOpenAiCompatibleBiller } from "./billing.js";
|
||||
// Keep the root adapter-utils entry browser-safe because the UI imports it.
|
||||
// The sandbox callback bridge stays available via its dedicated subpath export.
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
restoreWorkspaceFromSshExecution,
|
||||
syncDirectoryToSsh,
|
||||
} from "./ssh.js";
|
||||
import { captureDirectorySnapshot } from "./workspace-restore-merge.js";
|
||||
|
||||
export interface RemoteManagedRuntimeAsset {
|
||||
key: string;
|
||||
@@ -63,19 +64,31 @@ export function remoteExecutionSessionMatches(saved: unknown, current: SshRemote
|
||||
|
||||
export async function prepareRemoteManagedRuntime(input: {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
runId: string;
|
||||
adapterKey: string;
|
||||
workspaceLocalDir: string;
|
||||
workspaceRemoteDir?: string;
|
||||
assets?: RemoteManagedRuntimeAsset[];
|
||||
}): Promise<PreparedRemoteManagedRuntime> {
|
||||
const workspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
const baseWorkspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
const workspaceRemoteDir = path.posix.join(
|
||||
baseWorkspaceRemoteDir,
|
||||
".paperclip-runtime",
|
||||
"runs",
|
||||
input.runId,
|
||||
"workspace",
|
||||
);
|
||||
const runtimeRootDir = path.posix.join(workspaceRemoteDir, ".paperclip-runtime", input.adapterKey);
|
||||
|
||||
await prepareWorkspaceForSshExecution({
|
||||
const preparedWorkspace = await prepareWorkspaceForSshExecution({
|
||||
spec: input.spec,
|
||||
localDir: input.workspaceLocalDir,
|
||||
remoteDir: workspaceRemoteDir,
|
||||
});
|
||||
const restoreExclude = preparedWorkspace.gitBacked ? [".git", ".paperclip-runtime"] : [".paperclip-runtime"];
|
||||
const baselineSnapshot = await captureDirectorySnapshot(input.workspaceLocalDir, {
|
||||
exclude: restoreExclude,
|
||||
});
|
||||
|
||||
const assetDirs: Record<string, string> = {};
|
||||
try {
|
||||
@@ -95,6 +108,8 @@ export async function prepareRemoteManagedRuntime(input: {
|
||||
spec: input.spec,
|
||||
localDir: input.workspaceLocalDir,
|
||||
remoteDir: workspaceRemoteDir,
|
||||
baselineSnapshot,
|
||||
restoreGitHistory: preparedWorkspace.gitBacked,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
@@ -110,6 +125,8 @@ export async function prepareRemoteManagedRuntime(input: {
|
||||
spec: input.spec,
|
||||
localDir: input.workspaceLocalDir,
|
||||
remoteDir: workspaceRemoteDir,
|
||||
baselineSnapshot,
|
||||
restoreGitHistory: preparedWorkspace.gitBacked,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3,7 +3,7 @@ import { mkdir, mkdtemp, readFile, readdir, rm, writeFile } from "node:fs/promis
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
import { prepareCommandManagedRuntime } from "./command-managed-runtime.js";
|
||||
import {
|
||||
@@ -46,7 +46,7 @@ describe("sandbox callback bridge", () => {
|
||||
if (
|
||||
input.stdin != null &&
|
||||
(input.command === "sh" || input.command === "bash") &&
|
||||
args[0] === "-lc" &&
|
||||
(args[0] === "-c" || args[0] === "-lc") &&
|
||||
typeof args[1] === "string"
|
||||
) {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
@@ -422,6 +422,53 @@ describe("sandbox callback bridge", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("handles SSH queue polling failures without emitting an unhandled rejection", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-ssh-failure-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const queueDir = path.posix.join(rootDir, "queue");
|
||||
const unhandled: unknown[] = [];
|
||||
const onUnhandledRejection = (reason: unknown) => {
|
||||
unhandled.push(reason);
|
||||
};
|
||||
process.on("unhandledRejection", onUnhandledRejection);
|
||||
|
||||
try {
|
||||
const worker = await startSandboxCallbackBridgeWorker({
|
||||
client: {
|
||||
makeDir: async () => {},
|
||||
listJsonFiles: async () => {
|
||||
throw new Error(
|
||||
"list /remote/.paperclip-runtime/gemini/paperclip-bridge/queue/requests failed with exit code 255: kex_exchange_identification: read: Connection reset by peer",
|
||||
);
|
||||
},
|
||||
readTextFile: async () => {
|
||||
throw new Error("unexpected readTextFile");
|
||||
},
|
||||
writeTextFile: async () => {
|
||||
throw new Error("unexpected writeTextFile");
|
||||
},
|
||||
rename: async () => {
|
||||
throw new Error("unexpected rename");
|
||||
},
|
||||
remove: async () => {},
|
||||
},
|
||||
queueDir,
|
||||
authorizeRequest: async () => null,
|
||||
handleRequest: async () => ({
|
||||
status: 200,
|
||||
body: "ok",
|
||||
}),
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
await worker.stop();
|
||||
expect(unhandled).toEqual([]);
|
||||
} finally {
|
||||
process.off("unhandledRejection", onUnhandledRejection);
|
||||
}
|
||||
});
|
||||
|
||||
it("serializes remote response writes so stop does not recreate a late orphaned response", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-bridge-response-lock-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
@@ -461,7 +508,7 @@ describe("sandbox callback bridge", () => {
|
||||
authorizeRequest: async () => null,
|
||||
handleRequest: async (request) => {
|
||||
seenRequestIds.push(request.id);
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
await new Promise((resolve) => setTimeout(resolve, 250));
|
||||
return {
|
||||
status: 200,
|
||||
headers: { "content-type": "application/json" },
|
||||
@@ -504,7 +551,7 @@ describe("sandbox callback bridge", () => {
|
||||
error: "Bridge worker stopped before request could be handled.",
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 150));
|
||||
await new Promise((resolve) => setTimeout(resolve, 300));
|
||||
|
||||
await expect(readdir(directories.responsesDir)).resolves.toEqual([]);
|
||||
await expect(
|
||||
@@ -905,4 +952,32 @@ describe("sandbox callback bridge", () => {
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it("marks command-managed bridge operations with the bridge execution channel", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
};
|
||||
|
||||
const client = createCommandManagedSandboxCallbackBridgeQueueClient({
|
||||
runner,
|
||||
remoteCwd: "/workspace",
|
||||
timeoutMs: 30_000,
|
||||
});
|
||||
|
||||
await client.makeDir("/workspace/.paperclip-runtime/codex/paperclip-bridge/queue");
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
env: {
|
||||
PAPERCLIP_SANDBOX_EXEC_CHANNEL: "bridge",
|
||||
},
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,7 +4,7 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import type { CommandManagedRuntimeRunner } from "./command-managed-runtime.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
import { preferredShellForSandbox, shellCommandArgs } from "./sandbox-shell.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
const DEFAULT_BRIDGE_TOKEN_BYTES = 24;
|
||||
@@ -15,6 +15,8 @@ const DEFAULT_BRIDGE_MAX_QUEUE_DEPTH = 64;
|
||||
const DEFAULT_BRIDGE_MAX_BODY_BYTES = 256 * 1024;
|
||||
const REMOTE_WRITE_BASE64_CHUNK_SIZE = 32 * 1024;
|
||||
const SANDBOX_CALLBACK_BRIDGE_ENTRYPOINT = "paperclip-bridge-server.mjs";
|
||||
const SANDBOX_EXEC_CHANNEL_ENV = "PAPERCLIP_SANDBOX_EXEC_CHANNEL";
|
||||
const SANDBOX_EXEC_CHANNEL_BRIDGE = "bridge";
|
||||
|
||||
export const DEFAULT_SANDBOX_CALLBACK_BRIDGE_MAX_BODY_BYTES = DEFAULT_BRIDGE_MAX_BODY_BYTES;
|
||||
|
||||
@@ -207,8 +209,11 @@ async function runShell(
|
||||
): Promise<RunProcessResult> {
|
||||
return await runner.execute({
|
||||
command: shellCommand,
|
||||
args: ["-lc", script],
|
||||
args: shellCommandArgs(script),
|
||||
cwd,
|
||||
env: {
|
||||
[SANDBOX_EXEC_CHANNEL_ENV]: SANDBOX_EXEC_CHANNEL_BRIDGE,
|
||||
},
|
||||
timeoutMs,
|
||||
stdin,
|
||||
});
|
||||
@@ -569,10 +574,11 @@ async function writeBridgeResponse(
|
||||
requestPath: string,
|
||||
responsePath: string,
|
||||
response: SandboxCallbackBridgeResponse,
|
||||
options: { requireRequestPath?: boolean } = {},
|
||||
) {
|
||||
const body = `${JSON.stringify(response)}\n`;
|
||||
if (client.writeResponseFile) {
|
||||
await client.writeResponseFile(responsePath, body, { requestPath });
|
||||
await client.writeResponseFile(responsePath, body, options.requireRequestPath === false ? {} : { requestPath });
|
||||
return;
|
||||
}
|
||||
const tempPath = `${responsePath}.tmp`;
|
||||
@@ -610,6 +616,8 @@ export async function startSandboxCallbackBridgeWorker(input: {
|
||||
});
|
||||
const authorizeRequest = input.authorizeRequest ??
|
||||
((request: SandboxCallbackBridgeRequest) => authorizeSandboxCallbackBridgeRequestWithRoutes(request));
|
||||
const buildWorkerFailureMessage = (error: unknown) =>
|
||||
`Sandbox callback bridge worker failed: ${error instanceof Error ? error.message : String(error)}`;
|
||||
|
||||
const processRequestFile = async (fileName: string) => {
|
||||
const requestPath = path.posix.join(directories.requestsDir, fileName);
|
||||
@@ -684,12 +692,15 @@ export async function startSandboxCallbackBridgeWorker(input: {
|
||||
try {
|
||||
const raw = await input.client.readTextFile(requestPath);
|
||||
const parsed = JSON.parse(raw) as Partial<SandboxCallbackBridgeRequest>;
|
||||
await input.client.remove(requestPath).catch(() => undefined);
|
||||
await writeBridgeResponse(input.client, requestPath, responsePath, {
|
||||
id: typeof parsed.id === "string" && parsed.id.length > 0 ? parsed.id : requestId,
|
||||
status: 503,
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({ error: message }),
|
||||
completedAt: new Date().toISOString(),
|
||||
}, {
|
||||
requireRequestPath: false,
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
@@ -725,6 +736,16 @@ export async function startSandboxCallbackBridgeWorker(input: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const message = buildWorkerFailureMessage(error);
|
||||
console.warn(`[paperclip] ${message}`);
|
||||
try {
|
||||
await failPendingRequests(message);
|
||||
} catch (failPendingError) {
|
||||
console.warn(
|
||||
`[paperclip] sandbox callback bridge failed to abort queued requests after worker failure: ${failPendingError instanceof Error ? failPendingError.message : String(failPendingError)}`,
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
settled = true;
|
||||
if (settleResolve) {
|
||||
@@ -889,8 +910,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
const nodeCommand = input.nodeCommand?.trim() || "node";
|
||||
const startResult = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: [
|
||||
"-lc",
|
||||
args: shellCommandArgs(
|
||||
[
|
||||
`mkdir -p ${shellQuote(directories.requestsDir)} ${shellQuote(directories.responsesDir)} ${shellQuote(directories.logsDir)}`,
|
||||
`rm -f ${shellQuote(directories.readyFile)} ${shellQuote(directories.pidFile)}`,
|
||||
@@ -901,8 +921,11 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
`printf '%s\\n' \"$pid\" > ${shellQuote(directories.pidFile)}`,
|
||||
"printf '{\"pid\":%s}\\n' \"$pid\"",
|
||||
].join("\n"),
|
||||
],
|
||||
),
|
||||
cwd: input.remoteCwd,
|
||||
env: {
|
||||
[SANDBOX_EXEC_CHANNEL_ENV]: SANDBOX_EXEC_CHANNEL_BRIDGE,
|
||||
},
|
||||
timeoutMs,
|
||||
});
|
||||
requireSuccessfulResult("start sandbox callback bridge", startResult);
|
||||
@@ -963,8 +986,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
stop: async () => {
|
||||
const stopResult = await input.runner.execute({
|
||||
command: shellCommand,
|
||||
args: [
|
||||
"-lc",
|
||||
args: shellCommandArgs(
|
||||
[
|
||||
`if [ -s ${shellQuote(directories.pidFile)} ]; then`,
|
||||
` pid="$(cat ${shellQuote(directories.pidFile)})"`,
|
||||
@@ -977,8 +999,11 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
"fi",
|
||||
`rm -f ${shellQuote(directories.pidFile)} ${shellQuote(directories.readyFile)}`,
|
||||
].join("\n"),
|
||||
],
|
||||
),
|
||||
cwd: input.remoteCwd,
|
||||
env: {
|
||||
[SANDBOX_EXEC_CHANNEL_ENV]: SANDBOX_EXEC_CHANNEL_BRIDGE,
|
||||
},
|
||||
timeoutMs,
|
||||
});
|
||||
if (stopResult.timedOut) {
|
||||
|
||||
22
packages/adapter-utils/src/sandbox-install-command.test.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { buildSandboxNpmInstallCommand } from "./sandbox-install-command.js";
|
||||
|
||||
describe("buildSandboxNpmInstallCommand", () => {
|
||||
it("installs globally as root, via sudo when available, and under ~/.local otherwise", () => {
|
||||
const command = buildSandboxNpmInstallCommand("@google/gemini-cli");
|
||||
expect(command).toContain("if [ \"$(id -u)\" -eq 0 ]; then npm install -g '@google/gemini-cli';");
|
||||
expect(command).toContain("sudo -E npm install -g '@google/gemini-cli'");
|
||||
expect(command).toContain("npm install -g --prefix \"$HOME/.local\" '@google/gemini-cli'");
|
||||
});
|
||||
|
||||
it("bootstraps npm from a portable Node tarball when missing", () => {
|
||||
const command = buildSandboxNpmInstallCommand("@google/gemini-cli");
|
||||
expect(command).toContain("if ! command -v npm >/dev/null 2>&1; then");
|
||||
expect(command).toContain("https://nodejs.org/dist/");
|
||||
expect(command).toContain('export PATH="$HOME/.local/bin:$PATH"');
|
||||
});
|
||||
|
||||
it("shell-quotes package names", () => {
|
||||
expect(buildSandboxNpmInstallCommand("odd'pkg")).toContain("'odd'\"'\"'pkg'");
|
||||
});
|
||||
});
|
||||
46
packages/adapter-utils/src/sandbox-install-command.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
function shellSingleQuote(value: string): string {
|
||||
return `'${value.replaceAll("'", `'\"'\"'`)}'`;
|
||||
}
|
||||
|
||||
// Bootstrap a usable npm when the sandbox image ships without one (e.g. the
|
||||
// default exe.dev VM image has sshd + a normal user homedir but no Node
|
||||
// toolchain). We install a portable Node tarball into $HOME/.local rather
|
||||
// than using apt-get because the distro-packaged Node is often old enough to
|
||||
// reject modern JS syntax (regex /v flag, etc.) used by adapter CLIs like
|
||||
// @google/gemini-cli. The bootstrap also sets PAPERCLIP_NPM_BOOTSTRAPPED=1
|
||||
// so the install step knows to skip sudo — sudo would reset PATH via
|
||||
// secure_path and lose visibility of the freshly-installed npm in
|
||||
// $HOME/.local/bin.
|
||||
const ENSURE_NPM_PREAMBLE =
|
||||
"PAPERCLIP_NPM_BOOTSTRAPPED=; " +
|
||||
'if ! command -v npm >/dev/null 2>&1; then ' +
|
||||
'NODE_ARCH="$(uname -m)"; ' +
|
||||
'case "$NODE_ARCH" in ' +
|
||||
"x86_64) NODE_ARCH=x64 ;; " +
|
||||
"aarch64|arm64) NODE_ARCH=arm64 ;; " +
|
||||
"esac; " +
|
||||
'NODE_VERSION="v22.11.0"; ' +
|
||||
'NODE_TARBALL="node-${NODE_VERSION}-linux-${NODE_ARCH}.tar.xz"; ' +
|
||||
'mkdir -p "$HOME/.local"; ' +
|
||||
'curl -fsSL "https://nodejs.org/dist/${NODE_VERSION}/${NODE_TARBALL}" -o "/tmp/${NODE_TARBALL}" && ' +
|
||||
'tar -xJf "/tmp/${NODE_TARBALL}" -C "$HOME/.local" --strip-components=1 && ' +
|
||||
'rm -f "/tmp/${NODE_TARBALL}" && ' +
|
||||
'export PATH="$HOME/.local/bin:$PATH" && ' +
|
||||
"PAPERCLIP_NPM_BOOTSTRAPPED=1; " +
|
||||
"fi;";
|
||||
|
||||
export function buildSandboxNpmInstallCommand(packageName: string): string {
|
||||
const quotedPackageName = shellSingleQuote(packageName);
|
||||
return [
|
||||
ENSURE_NPM_PREAMBLE,
|
||||
'if [ -n "$PAPERCLIP_NPM_BOOTSTRAPPED" ]; then',
|
||||
`npm install -g ${quotedPackageName};`,
|
||||
'elif [ "$(id -u)" -eq 0 ]; then',
|
||||
`npm install -g ${quotedPackageName};`,
|
||||
'elif command -v sudo >/dev/null 2>&1 && sudo -n true >/dev/null 2>&1; then',
|
||||
`sudo -E npm install -g ${quotedPackageName};`,
|
||||
"else",
|
||||
`mkdir -p "$HOME/.local" && npm install -g --prefix "$HOME/.local" ${quotedPackageName};`,
|
||||
"fi",
|
||||
].join(" ");
|
||||
}
|
||||
@@ -84,7 +84,7 @@ describe("sandbox managed runtime", () => {
|
||||
await rm(remotePath, { recursive: true, force: true });
|
||||
},
|
||||
run: async (command) => {
|
||||
await execFile("sh", ["-lc", command], {
|
||||
await execFile("sh", ["-c", command], {
|
||||
maxBuffer: 32 * 1024 * 1024,
|
||||
});
|
||||
},
|
||||
@@ -126,7 +126,7 @@ describe("sandbox managed runtime", () => {
|
||||
|
||||
await expect(readFile(path.join(localWorkspaceDir, "README.md"), "utf8")).resolves.toBe("remote workspace\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, "remote-only.txt"), "utf8")).resolves.toBe("sync back\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, "local-stale.txt"), "utf8")).rejects.toMatchObject({ code: "ENOENT" });
|
||||
await expect(readFile(path.join(localWorkspaceDir, "local-stale.txt"), "utf8")).resolves.toBe("remove\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".claude", "settings.json"), "utf8")).resolves.toBe("{\"local\":true}\n");
|
||||
await expect(readFile(path.join(localWorkspaceDir, ".paperclip-runtime", "state.json"), "utf8")).resolves.toBe("{}\n");
|
||||
});
|
||||
|
||||
@@ -3,6 +3,7 @@ import { constants as fsConstants, promises as fs } from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { promisify } from "node:util";
|
||||
import { captureDirectorySnapshot, mergeDirectoryWithBaseline } from "./workspace-restore-merge.js";
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
@@ -248,6 +249,9 @@ export async function prepareSandboxManagedRuntime(input: {
|
||||
}): Promise<PreparedSandboxManagedRuntime> {
|
||||
const workspaceRemoteDir = input.workspaceRemoteDir ?? input.spec.remoteCwd;
|
||||
const runtimeRootDir = path.posix.join(workspaceRemoteDir, ".paperclip-runtime", input.adapterKey);
|
||||
const baselineSnapshot = await captureDirectorySnapshot(input.workspaceLocalDir, {
|
||||
exclude: [...new Set([".paperclip-runtime", ...(input.preserveAbsentOnRestore ?? []), ...(input.workspaceExclude ?? [])])],
|
||||
});
|
||||
|
||||
await withTempDir("paperclip-sandbox-sync-", async (tempDir) => {
|
||||
const workspaceTarPath = path.join(tempDir, "workspace.tar");
|
||||
@@ -263,7 +267,7 @@ export async function prepareSandboxManagedRuntime(input: {
|
||||
const preservedNames = new Set([".paperclip-runtime", ...(input.preserveAbsentOnRestore ?? [])]);
|
||||
const findPreserveArgs = [...preservedNames].map((entry) => `! -name ${shellQuote(entry)}`).join(" ");
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`sh -c ${shellQuote(
|
||||
`mkdir -p ${shellQuote(workspaceRemoteDir)} && ` +
|
||||
`find ${shellQuote(workspaceRemoteDir)} -mindepth 1 -maxdepth 1 ${findPreserveArgs} -exec rm -rf -- {} + && ` +
|
||||
`tar -xf ${shellQuote(remoteWorkspaceTar)} -C ${shellQuote(workspaceRemoteDir)} && ` +
|
||||
@@ -285,7 +289,7 @@ export async function prepareSandboxManagedRuntime(input: {
|
||||
const remoteAssetTar = path.posix.join(runtimeRootDir, `${asset.key}-upload.tar`);
|
||||
await input.client.writeFile(remoteAssetTar, toArrayBuffer(assetTarBytes));
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`sh -c ${shellQuote(
|
||||
`rm -rf ${shellQuote(remoteAssetDir)} && ` +
|
||||
`mkdir -p ${shellQuote(remoteAssetDir)} && ` +
|
||||
`tar -xf ${shellQuote(remoteAssetTar)} -C ${shellQuote(remoteAssetDir)} && ` +
|
||||
@@ -310,7 +314,7 @@ export async function prepareSandboxManagedRuntime(input: {
|
||||
await withTempDir("paperclip-sandbox-restore-", async (tempDir) => {
|
||||
const remoteWorkspaceTar = path.posix.join(runtimeRootDir, "workspace-download.tar");
|
||||
await input.client.run(
|
||||
`sh -lc ${shellQuote(
|
||||
`sh -c ${shellQuote(
|
||||
`mkdir -p ${shellQuote(runtimeRootDir)} && ` +
|
||||
`tar -cf ${shellQuote(remoteWorkspaceTar)} -C ${shellQuote(workspaceRemoteDir)} ` +
|
||||
`${tarExcludeFlags(input.workspaceExclude)} .`,
|
||||
@@ -326,8 +330,10 @@ export async function prepareSandboxManagedRuntime(input: {
|
||||
archivePath: localArchivePath,
|
||||
localDir: extractedDir,
|
||||
});
|
||||
await mirrorDirectory(extractedDir, input.workspaceLocalDir, {
|
||||
preserveAbsent: [".paperclip-runtime", ...(input.preserveAbsentOnRestore ?? [])],
|
||||
await mergeDirectoryWithBaseline({
|
||||
baseline: baselineSnapshot,
|
||||
sourceDir: extractedDir,
|
||||
targetDir: input.workspaceLocalDir,
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
export function preferredShellForSandbox(shellCommand: string | null | undefined): "bash" | "sh" {
|
||||
return shellCommand === "bash" ? "bash" : "sh";
|
||||
}
|
||||
|
||||
export function shellCommandArgs(script: string): string[] {
|
||||
return ["-c", script];
|
||||
}
|
||||
|
||||
@@ -9,11 +9,13 @@ import {
|
||||
buildInvocationEnvForLogs,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
materializePaperclipSkillCopy,
|
||||
refreshPaperclipWorkspaceEnvForExecution,
|
||||
renderPaperclipWakePrompt,
|
||||
runningProcesses,
|
||||
runChildProcess,
|
||||
sanitizeSshRemoteEnv,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
rewriteWorkspaceCwdEnvVarsForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
} from "./server-utils.js";
|
||||
|
||||
@@ -810,6 +812,119 @@ describe("shapePaperclipWorkspaceEnvForExecution", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("rewriteWorkspaceCwdEnvVarsForExecution", () => {
|
||||
it("rewrites custom *_WORKSPACE_CWD env vars for remote execution", () => {
|
||||
const env = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
workspaceCwd: "/host/workspace",
|
||||
executionCwd: "/remote/workspace",
|
||||
executionTargetIsRemote: true,
|
||||
env: {
|
||||
QA_PROJECT_WORKSPACE_CWD: "/host/workspace",
|
||||
RANDOM_WORKSPACE_CWD: "/host/workspace",
|
||||
OTHER_ENV: "/host/workspace",
|
||||
},
|
||||
});
|
||||
|
||||
expect(env).toEqual({
|
||||
QA_PROJECT_WORKSPACE_CWD: "/remote/workspace",
|
||||
RANDOM_WORKSPACE_CWD: "/remote/workspace",
|
||||
OTHER_ENV: "/host/workspace",
|
||||
});
|
||||
});
|
||||
|
||||
it("does not rewrite matching values for local execution", () => {
|
||||
const env = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
workspaceCwd: "/host/workspace",
|
||||
executionCwd: "/remote/workspace",
|
||||
executionTargetIsRemote: false,
|
||||
env: {
|
||||
QA_PROJECT_WORKSPACE_CWD: "/host/workspace",
|
||||
RANDOM_WORKSPACE_CWD_TOKEN: "/host/workspace",
|
||||
},
|
||||
});
|
||||
|
||||
expect(env).toEqual({
|
||||
QA_PROJECT_WORKSPACE_CWD: "/host/workspace",
|
||||
RANDOM_WORKSPACE_CWD_TOKEN: "/host/workspace",
|
||||
});
|
||||
});
|
||||
|
||||
it("only rewrites matching *_WORKSPACE_CWD string values", () => {
|
||||
const env = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
workspaceCwd: "/host/workspace",
|
||||
executionCwd: "/remote/workspace",
|
||||
executionTargetIsRemote: true,
|
||||
env: {
|
||||
MATCHING_WORKSPACE_CWD: "/host/workspace/.",
|
||||
DIFFERENT_WORKSPACE_CWD: "/host/other-workspace",
|
||||
BLANK_WORKSPACE_CWD: " ",
|
||||
NON_STRING_WORKSPACE_CWD: 42,
|
||||
},
|
||||
});
|
||||
|
||||
expect(env).toEqual({
|
||||
MATCHING_WORKSPACE_CWD: "/remote/workspace",
|
||||
DIFFERENT_WORKSPACE_CWD: "/host/other-workspace",
|
||||
BLANK_WORKSPACE_CWD: " ",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("refreshPaperclipWorkspaceEnvForExecution", () => {
|
||||
it("rewrites Paperclip workspace env to the prepared remote runtime cwd", () => {
|
||||
const env: Record<string, string> = {
|
||||
PAPERCLIP_WORKSPACE_CWD: "/remote/workspace",
|
||||
PAPERCLIP_WORKSPACE_WORKTREE_PATH: "/host/worktree",
|
||||
PAPERCLIP_WORKSPACES_JSON: JSON.stringify([
|
||||
{ workspaceId: "workspace-1", cwd: "/remote/workspace" },
|
||||
{ workspaceId: "workspace-2", cwd: "/tmp/other" },
|
||||
]),
|
||||
QA_PROJECT_WORKSPACE_CWD: "/remote/workspace",
|
||||
};
|
||||
|
||||
const shaped = refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig: {
|
||||
QA_PROJECT_WORKSPACE_CWD: "/host/workspace",
|
||||
},
|
||||
workspaceCwd: "/host/workspace",
|
||||
workspaceWorktreePath: "/host/worktree",
|
||||
workspaceHints: [
|
||||
{ workspaceId: "workspace-1", cwd: "/host/workspace" },
|
||||
{ workspaceId: "workspace-2", cwd: "/tmp/other" },
|
||||
],
|
||||
executionTargetIsRemote: true,
|
||||
executionCwd: "/remote/workspace/.paperclip-runtime/runs/run-1/workspace",
|
||||
});
|
||||
|
||||
expect(shaped).toEqual({
|
||||
workspaceCwd: "/remote/workspace/.paperclip-runtime/runs/run-1/workspace",
|
||||
workspaceWorktreePath: null,
|
||||
workspaceHints: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace/.paperclip-runtime/runs/run-1/workspace",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace/.paperclip-runtime/runs/run-1/workspace");
|
||||
expect(env.PAPERCLIP_WORKSPACE_WORKTREE_PATH).toBeUndefined();
|
||||
expect(env.QA_PROJECT_WORKSPACE_CWD).toBe("/remote/workspace/.paperclip-runtime/runs/run-1/workspace");
|
||||
expect(JSON.parse(env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace/.paperclip-runtime/runs/run-1/workspace",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("appendWithByteCap", () => {
|
||||
it("keeps valid UTF-8 when trimming through multibyte text", () => {
|
||||
const output = appendWithByteCap("prefix ", "hello — world", 7);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { spawn, type ChildProcess } from "node:child_process";
|
||||
import { createHash, randomUUID } from "node:crypto";
|
||||
import { constants as fsConstants, promises as fs, type Dirent } from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { sanitizeRemoteExecutionEnv } from "./remote-execution-env.js";
|
||||
import { buildSshSpawnTarget, type SshRemoteExecutionSpec } from "./ssh.js";
|
||||
@@ -78,6 +79,8 @@ export const runningProcesses = new Map<string, RunningProcess>();
|
||||
export const MAX_CAPTURE_BYTES = 4 * 1024 * 1024;
|
||||
export const MAX_EXCERPT_BYTES = 32 * 1024;
|
||||
const TERMINAL_RESULT_SCAN_OVERLAP_CHARS = 64 * 1024;
|
||||
const DEFAULT_PAPERCLIP_INSTANCE_ID = "default";
|
||||
const PATH_SEGMENT_RE = /^[a-zA-Z0-9_-]+$/;
|
||||
const SENSITIVE_ENV_KEY = /(key|token|secret|password|passwd|authorization|cookie)/i;
|
||||
const REDACTED_LOG_VALUE = "***REDACTED***";
|
||||
const PAPERCLIP_SKILL_ROOT_RELATIVE_CANDIDATES = [
|
||||
@@ -88,6 +91,25 @@ const MATERIALIZED_SKILL_SENTINEL = ".paperclip-materialized-skill.json";
|
||||
const MATERIALIZED_SKILL_LOCK_OWNER = "owner.json";
|
||||
const MATERIALIZED_SKILL_LOCK_STALE_MS = 30_000;
|
||||
|
||||
function expandHomePrefix(value: string): string {
|
||||
if (value === "~") return os.homedir();
|
||||
if (value.startsWith("~/")) return path.resolve(os.homedir(), value.slice(2));
|
||||
return value;
|
||||
}
|
||||
|
||||
export function resolvePaperclipInstanceRootForAdapter(input: {
|
||||
homeDir?: string;
|
||||
instanceId?: string;
|
||||
env?: NodeJS.ProcessEnv;
|
||||
} = {}): string {
|
||||
const env = input.env ?? process.env;
|
||||
const homeRaw = input.homeDir?.trim() || env.PAPERCLIP_HOME?.trim();
|
||||
const homeDir = path.resolve(homeRaw ? expandHomePrefix(homeRaw) : path.resolve(os.homedir(), ".paperclip"));
|
||||
const instanceId = input.instanceId?.trim() || env.PAPERCLIP_INSTANCE_ID?.trim() || DEFAULT_PAPERCLIP_INSTANCE_ID;
|
||||
if (!PATH_SEGMENT_RE.test(instanceId)) throw new Error(`Invalid PAPERCLIP_INSTANCE_ID '${instanceId}'.`);
|
||||
return path.resolve(homeDir, "instances", instanceId);
|
||||
}
|
||||
|
||||
export const DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE = [
|
||||
"You are agent {{agent.id}} ({{agent.name}}). Continue your Paperclip work.",
|
||||
"",
|
||||
@@ -999,6 +1021,104 @@ export function shapePaperclipWorkspaceEnvForExecution(input: {
|
||||
};
|
||||
}
|
||||
|
||||
export function rewriteWorkspaceCwdEnvVarsForExecution(input: {
|
||||
env: Record<string, unknown>;
|
||||
workspaceCwd?: string | null;
|
||||
executionCwd?: string | null;
|
||||
executionTargetIsRemote?: boolean;
|
||||
}): Record<string, string> {
|
||||
const nextEnv = Object.fromEntries(
|
||||
Object.entries(input.env)
|
||||
.filter((entry): entry is [string, string] => typeof entry[1] === "string"),
|
||||
) as Record<string, string>;
|
||||
const localWorkspaceCwd = typeof input.workspaceCwd === "string" && input.workspaceCwd.trim().length > 0
|
||||
? path.resolve(input.workspaceCwd)
|
||||
: null;
|
||||
// executionCwd is a remote path on the target host; we deliberately do not
|
||||
// run `path.resolve` against it because that applies host-Node semantics
|
||||
// (current working directory, host path separator) to a path that lives on
|
||||
// the remote shell. Callers always pass absolute remote paths, so we
|
||||
// forward the trimmed value verbatim.
|
||||
const remoteWorkspaceCwd = typeof input.executionCwd === "string" && input.executionCwd.trim().length > 0
|
||||
? input.executionCwd.trim()
|
||||
: null;
|
||||
|
||||
if (!input.executionTargetIsRemote || !localWorkspaceCwd || !remoteWorkspaceCwd) {
|
||||
return nextEnv;
|
||||
}
|
||||
|
||||
for (const [key, value] of Object.entries(nextEnv)) {
|
||||
if (!key.endsWith("_WORKSPACE_CWD")) continue;
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) continue;
|
||||
if (path.resolve(trimmed) !== localWorkspaceCwd) continue;
|
||||
nextEnv[key] = remoteWorkspaceCwd;
|
||||
}
|
||||
|
||||
return nextEnv;
|
||||
}
|
||||
|
||||
export function refreshPaperclipWorkspaceEnvForExecution(input: {
|
||||
env: Record<string, string>;
|
||||
envConfig?: Record<string, unknown>;
|
||||
workspaceCwd?: string | null;
|
||||
workspaceSource?: string | null;
|
||||
workspaceStrategy?: string | null;
|
||||
workspaceId?: string | null;
|
||||
workspaceRepoUrl?: string | null;
|
||||
workspaceRepoRef?: string | null;
|
||||
workspaceBranch?: string | null;
|
||||
workspaceWorktreePath?: string | null;
|
||||
workspaceHints?: Array<Record<string, unknown>>;
|
||||
agentHome?: string | null;
|
||||
executionTargetIsRemote?: boolean;
|
||||
executionCwd?: string | null;
|
||||
}): {
|
||||
workspaceCwd: string | null;
|
||||
workspaceWorktreePath: string | null;
|
||||
workspaceHints: Array<Record<string, unknown>>;
|
||||
} {
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: input.workspaceCwd,
|
||||
workspaceWorktreePath: input.workspaceWorktreePath,
|
||||
workspaceHints: input.workspaceHints,
|
||||
executionTargetIsRemote: input.executionTargetIsRemote,
|
||||
executionCwd: input.executionCwd,
|
||||
});
|
||||
|
||||
delete input.env.PAPERCLIP_WORKSPACE_CWD;
|
||||
delete input.env.PAPERCLIP_WORKSPACE_WORKTREE_PATH;
|
||||
delete input.env.PAPERCLIP_WORKSPACES_JSON;
|
||||
|
||||
applyPaperclipWorkspaceEnv(input.env, {
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource: input.workspaceSource,
|
||||
workspaceStrategy: input.workspaceStrategy,
|
||||
workspaceId: input.workspaceId,
|
||||
workspaceRepoUrl: input.workspaceRepoUrl,
|
||||
workspaceRepoRef: input.workspaceRepoRef,
|
||||
workspaceBranch: input.workspaceBranch,
|
||||
workspaceWorktreePath: shapedWorkspaceEnv.workspaceWorktreePath,
|
||||
agentHome: input.agentHome,
|
||||
});
|
||||
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
input.env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
|
||||
const shapedEnvConfig = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
env: input.envConfig ?? {},
|
||||
workspaceCwd: input.workspaceCwd,
|
||||
executionCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
executionTargetIsRemote: input.executionTargetIsRemote,
|
||||
});
|
||||
for (const [key, value] of Object.entries(shapedEnvConfig)) {
|
||||
input.env[key] = value;
|
||||
}
|
||||
|
||||
return shapedWorkspaceEnv;
|
||||
}
|
||||
|
||||
export function sanitizeInheritedPaperclipEnv(baseEnv: NodeJS.ProcessEnv): NodeJS.ProcessEnv {
|
||||
const env: NodeJS.ProcessEnv = { ...baseEnv };
|
||||
for (const key of Object.keys(env)) {
|
||||
|
||||
@@ -40,6 +40,7 @@ export const LEGACY_SESSIONED_ADAPTER_TYPES = new Set([
|
||||
"acpx_local",
|
||||
"claude_local",
|
||||
"codex_local",
|
||||
"cursor_cloud",
|
||||
"cursor",
|
||||
"gemini_local",
|
||||
"hermes_local",
|
||||
@@ -63,6 +64,11 @@ export const ADAPTER_SESSION_MANAGEMENT: Record<string, AdapterSessionManagement
|
||||
nativeContextManagement: "confirmed",
|
||||
defaultSessionCompaction: ADAPTER_MANAGED_SESSION_POLICY,
|
||||
},
|
||||
cursor_cloud: {
|
||||
supportsSessionResume: true,
|
||||
nativeContextManagement: "unknown",
|
||||
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
|
||||
},
|
||||
cursor: {
|
||||
supportsSessionResume: true,
|
||||
nativeContextManagement: "unknown",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { execFile } from "node:child_process";
|
||||
import { mkdir, mkdtemp, rm, symlink, writeFile } from "node:fs/promises";
|
||||
import { mkdir, mkdtemp, readFile, rm, symlink, writeFile } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
@@ -15,6 +15,10 @@ import {
|
||||
startSshEnvLabFixture,
|
||||
stopSshEnvLabFixture,
|
||||
} from "./ssh.js";
|
||||
import { prepareRemoteManagedRuntime } from "./remote-managed-runtime.js";
|
||||
|
||||
const SSH_FIXTURE_TEST_TIMEOUT_MS = 30_000;
|
||||
let sshEnvLabUnsupportedReason: string | null = null;
|
||||
|
||||
async function git(cwd: string, args: string[]): Promise<string> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
@@ -28,6 +32,28 @@ async function git(cwd: string, args: string[]): Promise<string> {
|
||||
});
|
||||
}
|
||||
|
||||
async function startSshEnvLabFixtureOrSkip(statePath: string, label: string) {
|
||||
if (sshEnvLabUnsupportedReason) {
|
||||
console.warn(`Skipping ${label}: ${sshEnvLabUnsupportedReason}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
sshEnvLabUnsupportedReason = support.reason ?? "unsupported environment";
|
||||
console.warn(`Skipping ${label}: ${sshEnvLabUnsupportedReason}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return await startSshEnvLabFixture({ statePath });
|
||||
} catch (error) {
|
||||
sshEnvLabUnsupportedReason = error instanceof Error ? error.message : String(error);
|
||||
console.warn(`Skipping ${label}: ${sshEnvLabUnsupportedReason}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
describe("ssh env-lab fixture", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
@@ -40,24 +66,17 @@ describe("ssh env-lab fixture", () => {
|
||||
});
|
||||
|
||||
it("starts an isolated sshd fixture and executes commands through it", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH env-lab fixture test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH env-lab fixture test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const quotedWorkspace = JSON.stringify(started.workspaceDir);
|
||||
const result = await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cd ${quotedWorkspace} && pwd'`,
|
||||
`cd ${quotedWorkspace} && pwd`,
|
||||
);
|
||||
|
||||
expect(result.stdout.trim()).toBe(started.workspaceDir);
|
||||
@@ -68,28 +87,21 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
const stopped = await readSshEnvLabFixtureStatus(statePath);
|
||||
expect(stopped.running).toBe(false);
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("forwards stdin to remote SSH commands", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH stdin forwarding test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH stdin forwarding test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const remotePath = path.posix.join(started.workspaceDir, "stdin-forwarded.txt");
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cat > ${JSON.stringify(remotePath)}'`,
|
||||
`cat > ${JSON.stringify(remotePath)}`,
|
||||
{
|
||||
stdin: "hello over ssh stdin\n",
|
||||
timeoutMs: 30_000,
|
||||
@@ -99,27 +111,20 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
const result = await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cat ${JSON.stringify(remotePath)}'`,
|
||||
`cat ${JSON.stringify(remotePath)}`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
expect(result.stdout).toBe("hello over ssh stdin\n");
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("does not treat an unrelated reused pid as the running fixture", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH env-lab fixture test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH env-lab fixture test");
|
||||
if (!started) return;
|
||||
await stopSshEnvLabFixture(statePath);
|
||||
await mkdir(path.dirname(statePath), { recursive: true });
|
||||
|
||||
@@ -132,11 +137,12 @@ describe("ssh env-lab fixture", () => {
|
||||
const staleStatus = await readSshEnvLabFixtureStatus(statePath);
|
||||
expect(staleStatus.running).toBe(false);
|
||||
|
||||
const restarted = await startSshEnvLabFixture({ statePath });
|
||||
const restarted = await startSshEnvLabFixtureOrSkip(statePath, "SSH env-lab fixture restart test");
|
||||
if (!restarted) return;
|
||||
expect(restarted.pid).not.toBe(process.pid);
|
||||
|
||||
await stopSshEnvLabFixture(statePath);
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("rejects invalid environment variable keys when constructing SSH spawn targets", async () => {
|
||||
await expect(
|
||||
@@ -161,14 +167,6 @@ describe("ssh env-lab fixture", () => {
|
||||
});
|
||||
|
||||
it("syncs a local directory into the remote fixture workspace", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH env-lab fixture test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
@@ -178,7 +176,8 @@ describe("ssh env-lab fixture", () => {
|
||||
await writeFile(path.join(localDir, "message.txt"), "hello from paperclip\n", "utf8");
|
||||
await writeFile(path.join(localDir, "._message.txt"), "should never sync\n", "utf8");
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH env-lab fixture test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const remoteDir = path.posix.join(started.workspaceDir, "overlay");
|
||||
|
||||
@@ -193,22 +192,14 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
const result = await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cat ${JSON.stringify(path.posix.join(remoteDir, "message.txt"))} && if [ -e ${JSON.stringify(path.posix.join(remoteDir, "._message.txt"))} ]; then echo appledouble-present; fi'`,
|
||||
`cat ${JSON.stringify(path.posix.join(remoteDir, "message.txt"))} && if [ -e ${JSON.stringify(path.posix.join(remoteDir, "._message.txt"))} ]; then echo appledouble-present; fi`,
|
||||
);
|
||||
|
||||
expect(result.stdout).toContain("hello from paperclip");
|
||||
expect(result.stdout).not.toContain("appledouble-present");
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("can dereference local symlinks while syncing to the remote fixture", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH symlink sync test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
@@ -220,7 +211,8 @@ describe("ssh env-lab fixture", () => {
|
||||
await writeFile(path.join(sourceDir, "auth.json"), "{\"token\":\"secret\"}\n", "utf8");
|
||||
await symlink(path.join(sourceDir, "auth.json"), path.join(localDir, "auth.json"));
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH symlink sync test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const remoteDir = path.posix.join(started.workspaceDir, "overlay-follow-links");
|
||||
|
||||
@@ -236,29 +228,22 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
const result = await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'if [ -L ${JSON.stringify(path.posix.join(remoteDir, "auth.json"))} ]; then echo symlink; else echo regular; fi && cat ${JSON.stringify(path.posix.join(remoteDir, "auth.json"))}'`,
|
||||
`if [ -L ${JSON.stringify(path.posix.join(remoteDir, "auth.json"))} ]; then echo symlink; else echo regular; fi && cat ${JSON.stringify(path.posix.join(remoteDir, "auth.json"))}`,
|
||||
);
|
||||
|
||||
expect(result.stdout).toContain("regular");
|
||||
expect(result.stdout).toContain("{\"token\":\"secret\"}");
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("round-trips a git workspace through the SSH fixture", async () => {
|
||||
const support = await getSshEnvLabSupport();
|
||||
if (!support.supported) {
|
||||
console.warn(
|
||||
`Skipping SSH workspace round-trip test: ${support.reason ?? "unsupported environment"}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
const localRepo = path.join(rootDir, "local-workspace");
|
||||
|
||||
await mkdir(localRepo, { recursive: true });
|
||||
await git(localRepo, ["init", "-b", "main"]);
|
||||
await git(localRepo, ["init"]);
|
||||
await git(localRepo, ["checkout", "-b", "main"]);
|
||||
await git(localRepo, ["config", "user.name", "Paperclip Test"]);
|
||||
await git(localRepo, ["config", "user.email", "test@paperclip.dev"]);
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "base\n", "utf8");
|
||||
@@ -269,7 +254,8 @@ describe("ssh env-lab fixture", () => {
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "dirty local\n", "utf8");
|
||||
await writeFile(path.join(localRepo, "untracked.txt"), "from local\n", "utf8");
|
||||
|
||||
const started = await startSshEnvLabFixture({ statePath });
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "SSH workspace round-trip test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const spec = {
|
||||
...config,
|
||||
@@ -284,7 +270,7 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
const remoteStatus = await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cd ${JSON.stringify(started.workspaceDir)} && git status --short'`,
|
||||
`cd ${JSON.stringify(started.workspaceDir)} && git status --short`,
|
||||
);
|
||||
expect(remoteStatus.stdout).toContain("M tracked.txt");
|
||||
expect(remoteStatus.stdout).toContain("?? untracked.txt");
|
||||
@@ -292,7 +278,7 @@ describe("ssh env-lab fixture", () => {
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`sh -lc 'cd ${JSON.stringify(started.workspaceDir)} && git config user.name "Paperclip SSH" && git config user.email "ssh@paperclip.dev" && git add tracked.txt untracked.txt && git commit -m "remote update" >/dev/null && printf "remote dirty\\n" > tracked.txt && printf "remote extra\\n" > remote-only.txt'`,
|
||||
`cd ${JSON.stringify(started.workspaceDir)} && git config user.name "Paperclip SSH" && git config user.email "ssh@paperclip.dev" && git add tracked.txt untracked.txt && git commit -m "remote update" >/dev/null && printf "remote dirty\\n" > tracked.txt && printf "remote extra\\n" > remote-only.txt`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
@@ -307,5 +293,222 @@ describe("ssh env-lab fixture", () => {
|
||||
expect(await git(localRepo, ["log", "-1", "--pretty=%s"])).toBe("remote update");
|
||||
expect(await git(localRepo, ["status", "--short"])).toContain("M tracked.txt");
|
||||
expect(await git(localRepo, ["status", "--short"])).not.toContain("._tracked.txt");
|
||||
});
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("preserves both concurrent SSH restores in a shared git workspace", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
const localRepo = path.join(rootDir, "local-workspace");
|
||||
|
||||
await mkdir(localRepo, { recursive: true });
|
||||
await git(localRepo, ["init"]);
|
||||
await git(localRepo, ["checkout", "-b", "main"]);
|
||||
await git(localRepo, ["config", "user.name", "Paperclip Test"]);
|
||||
await git(localRepo, ["config", "user.email", "test@paperclip.dev"]);
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "base\n", "utf8");
|
||||
await git(localRepo, ["add", "tracked.txt"]);
|
||||
await git(localRepo, ["commit", "-m", "initial"]);
|
||||
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "concurrent SSH restore test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const spec = {
|
||||
...config,
|
||||
remoteCwd: started.workspaceDir,
|
||||
} as const;
|
||||
|
||||
const preparedA = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-a",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
const preparedB = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-b",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
|
||||
expect(preparedA.workspaceRemoteDir).not.toBe(preparedB.workspaceRemoteDir);
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`printf "from run a\\n" > ${JSON.stringify(path.posix.join(preparedA.workspaceRemoteDir, "run-a.txt"))}`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
await runSshCommand(
|
||||
config,
|
||||
`printf "from run b\\n" > ${JSON.stringify(path.posix.join(preparedB.workspaceRemoteDir, "run-b.txt"))}`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
await Promise.all([
|
||||
preparedA.restoreWorkspace(),
|
||||
preparedB.restoreWorkspace(),
|
||||
]);
|
||||
|
||||
await expect(readFile(path.join(localRepo, "run-a.txt"), "utf8")).resolves.toBe("from run a\n");
|
||||
await expect(readFile(path.join(localRepo, "run-b.txt"), "utf8")).resolves.toBe("from run b\n");
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("preserves nested per-run files across sequential SSH restores with stale baselines", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
const localRepo = path.join(rootDir, "local-workspace");
|
||||
|
||||
await mkdir(localRepo, { recursive: true });
|
||||
await git(localRepo, ["init"]);
|
||||
await git(localRepo, ["checkout", "-b", "main"]);
|
||||
await git(localRepo, ["config", "user.name", "Paperclip Test"]);
|
||||
await git(localRepo, ["config", "user.email", "test@paperclip.dev"]);
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "base\n", "utf8");
|
||||
await git(localRepo, ["add", "tracked.txt"]);
|
||||
await git(localRepo, ["commit", "-m", "initial"]);
|
||||
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "sequential nested SSH restore test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const spec = {
|
||||
...config,
|
||||
remoteCwd: started.workspaceDir,
|
||||
} as const;
|
||||
|
||||
const preparedA = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-a",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
const preparedB = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-b",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`mkdir -p ${JSON.stringify(path.posix.join(preparedA.workspaceRemoteDir, "manual-qa/environment-matrix/ssh"))} && printf "from run a\\n" > ${JSON.stringify(path.posix.join(preparedA.workspaceRemoteDir, "manual-qa/environment-matrix/ssh/claude_local.md"))}`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
await runSshCommand(
|
||||
config,
|
||||
`mkdir -p ${JSON.stringify(path.posix.join(preparedB.workspaceRemoteDir, "manual-qa/environment-matrix/ssh"))} && printf "from run b\\n" > ${JSON.stringify(path.posix.join(preparedB.workspaceRemoteDir, "manual-qa/environment-matrix/ssh/codex_local.md"))}`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
await preparedA.restoreWorkspace();
|
||||
await preparedB.restoreWorkspace();
|
||||
|
||||
await expect(readFile(path.join(localRepo, "manual-qa/environment-matrix/ssh/claude_local.md"), "utf8")).resolves
|
||||
.toBe("from run a\n");
|
||||
await expect(readFile(path.join(localRepo, "manual-qa/environment-matrix/ssh/codex_local.md"), "utf8")).resolves
|
||||
.toBe("from run b\n");
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("round-trips remote git commits through the managed runtime restore path", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
const localRepo = path.join(rootDir, "local-workspace");
|
||||
|
||||
await mkdir(localRepo, { recursive: true });
|
||||
await git(localRepo, ["init"]);
|
||||
await git(localRepo, ["checkout", "-b", "main"]);
|
||||
await git(localRepo, ["config", "user.name", "Paperclip Test"]);
|
||||
await git(localRepo, ["config", "user.email", "test@paperclip.dev"]);
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "base\n", "utf8");
|
||||
await git(localRepo, ["add", "tracked.txt"]);
|
||||
await git(localRepo, ["commit", "-m", "initial"]);
|
||||
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "managed-runtime SSH git round-trip test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const spec = {
|
||||
...config,
|
||||
remoteCwd: started.workspaceDir,
|
||||
} as const;
|
||||
|
||||
const prepared = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-commit",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`cd ${JSON.stringify(prepared.workspaceRemoteDir)} && git config user.name "Paperclip SSH" && git config user.email "ssh@paperclip.dev" && printf "committed\\n" > tracked.txt && git add tracked.txt && git commit -m "remote update" >/dev/null && printf "dirty remote\\n" > tracked.txt`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
await prepared.restoreWorkspace();
|
||||
|
||||
expect(await git(localRepo, ["log", "-1", "--pretty=%s"])).toBe("remote update");
|
||||
await expect(readFile(path.join(localRepo, "tracked.txt"), "utf8")).resolves.toBe("dirty remote\n");
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
|
||||
it("merges concurrent remote commits through the managed runtime restore path", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-fixture-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const statePath = path.join(rootDir, "state.json");
|
||||
const localRepo = path.join(rootDir, "local-workspace");
|
||||
|
||||
await mkdir(localRepo, { recursive: true });
|
||||
await git(localRepo, ["init"]);
|
||||
await git(localRepo, ["checkout", "-b", "main"]);
|
||||
await git(localRepo, ["config", "user.name", "Paperclip Test"]);
|
||||
await git(localRepo, ["config", "user.email", "test@paperclip.dev"]);
|
||||
await writeFile(path.join(localRepo, "tracked.txt"), "base\n", "utf8");
|
||||
await git(localRepo, ["add", "tracked.txt"]);
|
||||
await git(localRepo, ["commit", "-m", "initial"]);
|
||||
|
||||
const started = await startSshEnvLabFixtureOrSkip(statePath, "concurrent managed-runtime SSH git merge test");
|
||||
if (!started) return;
|
||||
const config = await buildSshEnvLabFixtureConfig(started);
|
||||
const spec = {
|
||||
...config,
|
||||
remoteCwd: started.workspaceDir,
|
||||
} as const;
|
||||
|
||||
const preparedA = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-commit-a",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
const preparedB = await prepareRemoteManagedRuntime({
|
||||
spec,
|
||||
runId: "run-commit-b",
|
||||
adapterKey: "test-adapter",
|
||||
workspaceLocalDir: localRepo,
|
||||
});
|
||||
|
||||
await runSshCommand(
|
||||
config,
|
||||
`cd ${JSON.stringify(preparedA.workspaceRemoteDir)} && git config user.name "Paperclip SSH" && git config user.email "ssh@paperclip.dev" && printf "from run a\\n" > run-a.txt && git add run-a.txt && git commit -m "remote update a" >/dev/null`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
await runSshCommand(
|
||||
config,
|
||||
`cd ${JSON.stringify(preparedB.workspaceRemoteDir)} && git config user.name "Paperclip SSH" && git config user.email "ssh@paperclip.dev" && printf "from run b\\n" > run-b.txt && git add run-b.txt && git commit -m "remote update b" >/dev/null`,
|
||||
{ timeoutMs: 30_000, maxBuffer: 256 * 1024 },
|
||||
);
|
||||
|
||||
await Promise.all([
|
||||
preparedA.restoreWorkspace(),
|
||||
preparedB.restoreWorkspace(),
|
||||
]);
|
||||
|
||||
await expect(readFile(path.join(localRepo, "run-a.txt"), "utf8")).resolves.toBe("from run a\n");
|
||||
await expect(readFile(path.join(localRepo, "run-b.txt"), "utf8")).resolves.toBe("from run b\n");
|
||||
expect(await git(localRepo, ["log", "-1", "--pretty=%s"])).toContain("Paperclip SSH sync merge");
|
||||
|
||||
const recentSubjects = await git(localRepo, ["log", "--pretty=%s", "-3"]);
|
||||
expect(recentSubjects).toContain("remote update a");
|
||||
expect(recentSubjects).toContain("remote update b");
|
||||
}, SSH_FIXTURE_TEST_TIMEOUT_MS);
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import { execFile, spawn } from "node:child_process";
|
||||
import { constants as fsConstants, createReadStream, createWriteStream, promises as fs } from "node:fs";
|
||||
import net from "node:net";
|
||||
@@ -5,6 +6,8 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { CommandManagedRuntimeRunner } from "./command-managed-runtime.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
import type { DirectorySnapshot } from "./workspace-restore-merge.js";
|
||||
import { mergeDirectoryWithBaseline } from "./workspace-restore-merge.js";
|
||||
|
||||
export interface SshConnectionConfig {
|
||||
host: string;
|
||||
@@ -51,13 +54,11 @@ export function createSshCommandManagedRuntimeRunner(input: {
|
||||
? envEntries.map(([key, value]) => `export ${key}=${shellQuote(value)};`).join(" ") + " "
|
||||
: "";
|
||||
const commandScript = command === "sh" || command === "bash"
|
||||
? args[0] === "-lc" && typeof args[1] === "string"
|
||||
? (args[0] === "-c" || args[0] === "-lc") && typeof args[1] === "string"
|
||||
? `${exportPrefix}${args[1]}`
|
||||
: `${envPrefix}exec ${[shellQuote(command), ...args.map((arg) => shellQuote(arg))].join(" ")}`
|
||||
: `${envPrefix}exec ${[shellQuote(command), ...args.map((arg) => shellQuote(arg))].join(" ")}`;
|
||||
const remoteCommand = `${command === "bash" ? "bash" : "sh"} -lc ${
|
||||
shellQuote(`cd ${shellQuote(cwd)} && ${commandScript}`)
|
||||
}`;
|
||||
const remoteCommand = `cd ${shellQuote(cwd)} && ${commandScript}`;
|
||||
|
||||
try {
|
||||
const result = await runSshCommand(input.spec, remoteCommand, {
|
||||
@@ -330,7 +331,7 @@ async function commandExists(command: string): Promise<boolean> {
|
||||
|
||||
async function resolveCommandPath(command: string): Promise<string | null> {
|
||||
try {
|
||||
const result = await execFileText("sh", ["-lc", `command -v ${shellQuote(command)}`], {
|
||||
const result = await execFileText("sh", ["-c", `command -v ${shellQuote(command)}`], {
|
||||
timeout: 5_000,
|
||||
maxBuffer: 8 * 1024,
|
||||
});
|
||||
@@ -418,7 +419,7 @@ async function runSshScript(
|
||||
): Promise<SshCommandResult> {
|
||||
return await runSshCommand(
|
||||
config,
|
||||
`sh -lc ${shellQuote(script)}`,
|
||||
script,
|
||||
options,
|
||||
);
|
||||
}
|
||||
@@ -499,7 +500,7 @@ async function streamLocalFileToSsh(input: {
|
||||
"-p",
|
||||
String(input.spec.port),
|
||||
`${input.spec.username}@${input.spec.host}`,
|
||||
`sh -lc ${shellQuote(input.remoteScript)}`,
|
||||
`sh -c ${shellQuote(input.remoteScript)}`,
|
||||
];
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
@@ -548,7 +549,7 @@ async function streamSshToLocalFile(input: {
|
||||
"-p",
|
||||
String(input.spec.port),
|
||||
`${input.spec.username}@${input.spec.host}`,
|
||||
`sh -lc ${shellQuote(input.remoteScript)}`,
|
||||
`sh -c ${shellQuote(input.remoteScript)}`,
|
||||
];
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
@@ -596,7 +597,9 @@ async function importGitWorkspaceToSsh(input: {
|
||||
}): Promise<void> {
|
||||
const bundleDir = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-bundle-"));
|
||||
const bundlePath = path.join(bundleDir, "workspace.bundle");
|
||||
const tempRef = "refs/paperclip/ssh-sync/import";
|
||||
// Per-import unique ref so concurrent imports against the same local repo
|
||||
// can't race on `update-ref` between this run's update and bundle create.
|
||||
const tempRef = `refs/paperclip/ssh-sync/import/${randomUUID()}`;
|
||||
|
||||
try {
|
||||
await runLocalGit(input.localDir, ["update-ref", tempRef, input.snapshot.headCommit], {
|
||||
@@ -621,6 +624,8 @@ async function importGitWorkspaceToSsh(input: {
|
||||
: `git -C ${shellQuote(input.remoteDir)} -c advice.detachedHead=false checkout --force --detach ${shellQuote(input.snapshot.headCommit)} >/dev/null`,
|
||||
`git -C ${shellQuote(input.remoteDir)} reset --hard ${shellQuote(input.snapshot.headCommit)} >/dev/null`,
|
||||
`git -C ${shellQuote(input.remoteDir)} clean -fdx -e .paperclip-runtime >/dev/null`,
|
||||
// Drop the per-import ref on the remote side too so it can't accumulate.
|
||||
`git -C ${shellQuote(input.remoteDir)} update-ref -d ${shellQuote(tempRef)} >/dev/null 2>&1 || true`,
|
||||
].join("\n");
|
||||
|
||||
await streamLocalFileToSsh({
|
||||
@@ -641,10 +646,12 @@ async function exportGitWorkspaceFromSsh(input: {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
remoteDir: string;
|
||||
localDir: string;
|
||||
}): Promise<void> {
|
||||
importedRef?: string;
|
||||
resetLocalWorkspace?: boolean;
|
||||
}): Promise<string> {
|
||||
const bundleDir = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-bundle-"));
|
||||
const bundlePath = path.join(bundleDir, "workspace.bundle");
|
||||
const importedRef = "refs/paperclip/ssh-sync/imported";
|
||||
const importedRef = input.importedRef ?? `refs/paperclip/ssh-sync/imported/${randomUUID()}`;
|
||||
|
||||
try {
|
||||
const exportScript = [
|
||||
@@ -668,19 +675,97 @@ async function exportGitWorkspaceFromSsh(input: {
|
||||
timeout: 60_000,
|
||||
maxBuffer: 1024 * 1024,
|
||||
});
|
||||
await runLocalGit(input.localDir, ["reset", "--hard", importedRef], {
|
||||
timeout: 60_000,
|
||||
maxBuffer: 1024 * 1024,
|
||||
});
|
||||
} finally {
|
||||
await runLocalGit(input.localDir, ["update-ref", "-d", importedRef], {
|
||||
if (input.resetLocalWorkspace !== false) {
|
||||
await runLocalGit(input.localDir, ["reset", "--hard", importedRef], {
|
||||
timeout: 60_000,
|
||||
maxBuffer: 1024 * 1024,
|
||||
});
|
||||
}
|
||||
const importedHead = await runLocalGit(input.localDir, ["rev-parse", importedRef], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
}).catch(() => undefined);
|
||||
});
|
||||
return importedHead.stdout.trim();
|
||||
} finally {
|
||||
if (input.resetLocalWorkspace !== false) {
|
||||
await runLocalGit(input.localDir, ["update-ref", "-d", importedRef], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
}).catch(() => undefined);
|
||||
}
|
||||
await fs.rm(bundleDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
async function integrateImportedGitHead(input: {
|
||||
localDir: string;
|
||||
importedHead: string;
|
||||
}): Promise<void> {
|
||||
const snapshot = await readLocalGitWorkspaceSnapshot(input.localDir);
|
||||
if (!snapshot) return;
|
||||
|
||||
const currentHead = snapshot.headCommit;
|
||||
if (!currentHead || currentHead === input.importedHead) return;
|
||||
|
||||
const headRef = snapshot.branchName ? `refs/heads/${snapshot.branchName}` : "HEAD";
|
||||
const mergeBase = await runLocalGit(input.localDir, ["merge-base", currentHead, input.importedHead], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
}).catch(() => null);
|
||||
const mergeBaseHead = mergeBase?.stdout.trim() ?? "";
|
||||
|
||||
if (mergeBaseHead === input.importedHead) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (mergeBaseHead === currentHead) {
|
||||
await runLocalGit(input.localDir, ["update-ref", headRef, input.importedHead, currentHead], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
let mergedTree;
|
||||
try {
|
||||
mergedTree = await runLocalGit(input.localDir, ["merge-tree", "--write-tree", currentHead, input.importedHead], {
|
||||
timeout: 60_000,
|
||||
maxBuffer: 256 * 1024,
|
||||
});
|
||||
} catch (error) {
|
||||
const reason = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(
|
||||
`Failed to merge concurrent SSH git histories for ${currentHead.slice(0, 12)} and ${input.importedHead.slice(0, 12)}: ${reason}`,
|
||||
);
|
||||
}
|
||||
const mergedTreeId = mergedTree.stdout.trim().split("\n")[0]?.trim() ?? "";
|
||||
if (!mergedTreeId) {
|
||||
throw new Error("Failed to compute a merged git tree for SSH workspace restore.");
|
||||
}
|
||||
|
||||
const mergeCommit = await runLocalGit(
|
||||
input.localDir,
|
||||
[
|
||||
"commit-tree",
|
||||
mergedTreeId,
|
||||
"-p",
|
||||
currentHead,
|
||||
"-p",
|
||||
input.importedHead,
|
||||
"-m",
|
||||
`Paperclip SSH sync merge ${input.importedHead.slice(0, 12)}`,
|
||||
],
|
||||
{
|
||||
timeout: 60_000,
|
||||
maxBuffer: 64 * 1024,
|
||||
},
|
||||
);
|
||||
await runLocalGit(input.localDir, ["update-ref", headRef, mergeCommit.stdout.trim(), currentHead], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
});
|
||||
}
|
||||
|
||||
async function clearRemoteDirectory(input: {
|
||||
spec: SshConnectionConfig;
|
||||
remoteDir: string;
|
||||
@@ -802,6 +887,13 @@ async function isSshEnvLabFixtureProcess(state: Pick<SshEnvLabFixtureState, "pid
|
||||
}
|
||||
|
||||
export async function getSshEnvLabSupport(): Promise<SshEnvLabSupport> {
|
||||
if (process.platform === "darwin" && process.env.PAPERCLIP_ENABLE_DARWIN_SSH_ENV_LAB !== "1") {
|
||||
return {
|
||||
supported: false,
|
||||
reason: "SSH env-lab fixture is disabled on macOS; set PAPERCLIP_ENABLE_DARWIN_SSH_ENV_LAB=1 to opt in.",
|
||||
};
|
||||
}
|
||||
|
||||
for (const command of ["ssh", "sshd", "ssh-keygen"]) {
|
||||
if (!(await commandExists(command))) {
|
||||
return {
|
||||
@@ -866,7 +958,7 @@ export async function runSshCommand(
|
||||
"-p",
|
||||
String(config.port),
|
||||
`${config.username}@${config.host}`,
|
||||
`sh -lc ${shellQuote(remoteScript)}`,
|
||||
`sh -c ${shellQuote(remoteScript)}`,
|
||||
);
|
||||
|
||||
return options.stdin != null
|
||||
@@ -921,7 +1013,7 @@ export async function buildSshSpawnTarget(input: {
|
||||
"-p",
|
||||
String(input.spec.port),
|
||||
`${input.spec.username}@${input.spec.host}`,
|
||||
`sh -lc ${shellQuote(remoteScript)}`,
|
||||
`sh -c ${shellQuote(remoteScript)}`,
|
||||
);
|
||||
|
||||
return {
|
||||
@@ -944,7 +1036,7 @@ export async function syncDirectoryToSsh(input: {
|
||||
"-p",
|
||||
String(input.spec.port),
|
||||
`${input.spec.username}@${input.spec.host}`,
|
||||
`sh -lc ${shellQuote(`mkdir -p ${shellQuote(input.remoteDir)} && tar -xf - -C ${shellQuote(input.remoteDir)}`)}`,
|
||||
`sh -c ${shellQuote(`mkdir -p ${shellQuote(input.remoteDir)} && tar -xf - -C ${shellQuote(input.remoteDir)}`)}`,
|
||||
];
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
@@ -1040,7 +1132,7 @@ export async function syncDirectoryFromSsh(input: {
|
||||
"-p",
|
||||
String(input.spec.port),
|
||||
`${input.spec.username}@${input.spec.host}`,
|
||||
`sh -lc ${shellQuote(remoteTarScript)}`,
|
||||
`sh -c ${shellQuote(remoteTarScript)}`,
|
||||
];
|
||||
|
||||
try {
|
||||
@@ -1117,7 +1209,7 @@ export async function prepareWorkspaceForSshExecution(input: {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
localDir: string;
|
||||
remoteDir?: string;
|
||||
}): Promise<void> {
|
||||
}): Promise<{ gitBacked: boolean }> {
|
||||
const remoteDir = input.remoteDir ?? input.spec.remoteCwd;
|
||||
const gitSnapshot = await readLocalGitWorkspaceSnapshot(input.localDir);
|
||||
|
||||
@@ -1139,7 +1231,7 @@ export async function prepareWorkspaceForSshExecution(input: {
|
||||
remoteDir,
|
||||
deletedPaths: gitSnapshot.deletedPaths,
|
||||
});
|
||||
return;
|
||||
return { gitBacked: true };
|
||||
}
|
||||
|
||||
await clearRemoteDirectory({
|
||||
@@ -1153,14 +1245,64 @@ export async function prepareWorkspaceForSshExecution(input: {
|
||||
remoteDir,
|
||||
exclude: [".paperclip-runtime"],
|
||||
});
|
||||
return { gitBacked: false };
|
||||
}
|
||||
|
||||
export async function restoreWorkspaceFromSshExecution(input: {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
localDir: string;
|
||||
remoteDir?: string;
|
||||
baselineSnapshot?: DirectorySnapshot;
|
||||
restoreGitHistory?: boolean;
|
||||
}): Promise<void> {
|
||||
const remoteDir = input.remoteDir ?? input.spec.remoteCwd;
|
||||
if (input.baselineSnapshot) {
|
||||
const stagingDir = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-ssh-sync-back-"));
|
||||
const importedRef = input.restoreGitHistory
|
||||
? `refs/paperclip/ssh-sync/imported/${randomUUID()}`
|
||||
: null;
|
||||
try {
|
||||
const importedHead = input.restoreGitHistory
|
||||
? await exportGitWorkspaceFromSsh({
|
||||
spec: input.spec,
|
||||
remoteDir,
|
||||
localDir: input.localDir,
|
||||
importedRef: importedRef ?? undefined,
|
||||
resetLocalWorkspace: false,
|
||||
})
|
||||
: null;
|
||||
await syncDirectoryFromSsh({
|
||||
spec: input.spec,
|
||||
remoteDir,
|
||||
localDir: stagingDir,
|
||||
exclude: input.baselineSnapshot.exclude,
|
||||
});
|
||||
await mergeDirectoryWithBaseline({
|
||||
baseline: input.baselineSnapshot,
|
||||
sourceDir: stagingDir,
|
||||
targetDir: input.localDir,
|
||||
// Git history advances via integrateImportedGitHead; the working tree
|
||||
// still comes from the remote file snapshot so dirty remote edits win.
|
||||
beforeApply: importedHead
|
||||
? async () => {
|
||||
await integrateImportedGitHead({
|
||||
localDir: input.localDir,
|
||||
importedHead,
|
||||
});
|
||||
}
|
||||
: undefined,
|
||||
});
|
||||
} finally {
|
||||
if (importedRef) {
|
||||
await runLocalGit(input.localDir, ["update-ref", "-d", importedRef], {
|
||||
timeout: 10_000,
|
||||
maxBuffer: 16 * 1024,
|
||||
}).catch(() => undefined);
|
||||
}
|
||||
await fs.rm(stagingDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
return;
|
||||
}
|
||||
const gitSnapshot = await readLocalGitWorkspaceSnapshot(input.localDir);
|
||||
|
||||
if (gitSnapshot) {
|
||||
@@ -1192,7 +1334,7 @@ export async function ensureSshWorkspaceReady(
|
||||
): Promise<{ remoteCwd: string }> {
|
||||
const result = await runSshCommand(
|
||||
config,
|
||||
`sh -lc ${shellQuote(`mkdir -p ${shellQuote(config.remoteWorkspacePath)} && cd ${shellQuote(config.remoteWorkspacePath)} && pwd`)}`,
|
||||
`mkdir -p ${shellQuote(config.remoteWorkspacePath)} && cd ${shellQuote(config.remoteWorkspacePath)} && pwd`,
|
||||
);
|
||||
return {
|
||||
remoteCwd: result.stdout.trim(),
|
||||
|
||||
61
packages/adapter-utils/src/workspace-restore-merge.test.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { mkdir, mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { captureDirectorySnapshot, mergeDirectoryWithBaseline } from "./workspace-restore-merge.js";
|
||||
|
||||
describe("workspace restore merge", () => {
|
||||
const cleanupDirs: string[] = [];
|
||||
|
||||
afterEach(async () => {
|
||||
while (cleanupDirs.length > 0) {
|
||||
const dir = cleanupDirs.pop();
|
||||
if (!dir) continue;
|
||||
await rm(dir, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves sibling files when sequential stale-baseline restores create the same nested directory tree", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-restore-merge-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
|
||||
const targetDir = path.join(rootDir, "target");
|
||||
const sourceADir = path.join(rootDir, "source-a");
|
||||
const sourceBDir = path.join(rootDir, "source-b");
|
||||
await mkdir(targetDir, { recursive: true });
|
||||
await mkdir(path.join(sourceADir, "manual-qa", "environment-matrix", "ssh"), { recursive: true });
|
||||
await mkdir(path.join(sourceBDir, "manual-qa", "environment-matrix", "ssh"), { recursive: true });
|
||||
|
||||
const baseline = await captureDirectorySnapshot(targetDir, { exclude: [] });
|
||||
|
||||
await writeFile(
|
||||
path.join(sourceADir, "manual-qa", "environment-matrix", "ssh", "claude_local.md"),
|
||||
"ssh claude\n",
|
||||
"utf8",
|
||||
);
|
||||
await writeFile(
|
||||
path.join(sourceBDir, "manual-qa", "environment-matrix", "ssh", "codex_local.md"),
|
||||
"ssh codex\n",
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await mergeDirectoryWithBaseline({
|
||||
baseline,
|
||||
sourceDir: sourceADir,
|
||||
targetDir,
|
||||
});
|
||||
await mergeDirectoryWithBaseline({
|
||||
baseline,
|
||||
sourceDir: sourceBDir,
|
||||
targetDir,
|
||||
});
|
||||
|
||||
await expect(
|
||||
readFile(path.join(targetDir, "manual-qa", "environment-matrix", "ssh", "claude_local.md"), "utf8"),
|
||||
).resolves.toBe("ssh claude\n");
|
||||
await expect(
|
||||
readFile(path.join(targetDir, "manual-qa", "environment-matrix", "ssh", "codex_local.md"), "utf8"),
|
||||
).resolves.toBe("ssh codex\n");
|
||||
});
|
||||
});
|
||||
257
packages/adapter-utils/src/workspace-restore-merge.ts
Normal file
@@ -0,0 +1,257 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { createReadStream } from "node:fs";
|
||||
import { constants as fsConstants, promises as fs } from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
type SnapshotEntry =
|
||||
| { kind: "dir" }
|
||||
| { kind: "file"; mode: number; hash: string }
|
||||
| { kind: "symlink"; target: string };
|
||||
|
||||
export interface DirectorySnapshot {
|
||||
exclude: string[];
|
||||
entries: Map<string, SnapshotEntry>;
|
||||
}
|
||||
|
||||
function isRelativePathOrDescendant(relative: string, candidate: string): boolean {
|
||||
return relative === candidate || relative.startsWith(`${candidate}/`);
|
||||
}
|
||||
|
||||
function shouldExclude(relative: string, exclude: readonly string[]): boolean {
|
||||
return exclude.some((candidate) => isRelativePathOrDescendant(relative, candidate));
|
||||
}
|
||||
|
||||
async function hashFile(filePath: string): Promise<string> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const hash = createHash("sha256");
|
||||
const stream = createReadStream(filePath);
|
||||
stream.on("data", (chunk) => hash.update(chunk));
|
||||
stream.on("error", reject);
|
||||
stream.on("end", () => resolve(hash.digest("hex")));
|
||||
});
|
||||
}
|
||||
|
||||
async function walkDirectory(
|
||||
root: string,
|
||||
exclude: readonly string[],
|
||||
relative = "",
|
||||
out: Map<string, SnapshotEntry> = new Map(),
|
||||
): Promise<Map<string, SnapshotEntry>> {
|
||||
const current = relative ? path.join(root, relative) : root;
|
||||
const entries = await fs.readdir(current, { withFileTypes: true }).catch(() => []);
|
||||
entries.sort((left, right) => left.name.localeCompare(right.name));
|
||||
|
||||
for (const entry of entries) {
|
||||
const nextRelative = relative ? path.posix.join(relative, entry.name) : entry.name;
|
||||
if (shouldExclude(nextRelative, exclude)) continue;
|
||||
|
||||
const fullPath = path.join(root, nextRelative);
|
||||
const stats = await fs.lstat(fullPath);
|
||||
if (stats.isDirectory()) {
|
||||
out.set(nextRelative, { kind: "dir" });
|
||||
await walkDirectory(root, exclude, nextRelative, out);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (stats.isSymbolicLink()) {
|
||||
out.set(nextRelative, {
|
||||
kind: "symlink",
|
||||
target: await fs.readlink(fullPath),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
out.set(nextRelative, {
|
||||
kind: "file",
|
||||
mode: stats.mode,
|
||||
hash: await hashFile(fullPath),
|
||||
});
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
async function readSnapshotEntry(root: string, relative: string): Promise<SnapshotEntry | null> {
|
||||
const fullPath = path.join(root, relative);
|
||||
let stats;
|
||||
try {
|
||||
stats = await fs.lstat(fullPath);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (stats.isDirectory()) return { kind: "dir" };
|
||||
if (stats.isSymbolicLink()) {
|
||||
return {
|
||||
kind: "symlink",
|
||||
target: await fs.readlink(fullPath),
|
||||
};
|
||||
}
|
||||
return {
|
||||
kind: "file",
|
||||
mode: stats.mode,
|
||||
hash: await hashFile(fullPath),
|
||||
};
|
||||
}
|
||||
|
||||
function entriesMatch(left: SnapshotEntry | null | undefined, right: SnapshotEntry | null | undefined): boolean {
|
||||
if (!left || !right) return false;
|
||||
if (left.kind !== right.kind) return false;
|
||||
if (left.kind === "dir") return true;
|
||||
if (left.kind === "symlink" && right.kind === "symlink") {
|
||||
return left.target === right.target;
|
||||
}
|
||||
if (left.kind === "file" && right.kind === "file") {
|
||||
return left.mode === right.mode && left.hash === right.hash;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function isHolderAlive(lockDir: string): Promise<boolean> {
|
||||
try {
|
||||
const raw = await fs.readFile(path.join(lockDir, "owner.json"), "utf8");
|
||||
const owner = JSON.parse(raw) as { pid?: unknown };
|
||||
const pid = typeof owner.pid === "number" && Number.isFinite(owner.pid) && owner.pid > 0 ? owner.pid : null;
|
||||
if (pid === null) {
|
||||
// Owner record is unparseable / missing pid — treat as stale.
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
process.kill(pid, 0);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
} catch {
|
||||
// owner.json missing or unreadable — treat as stale.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function acquireDirectoryMergeLock(lockDir: string): Promise<() => Promise<void>> {
|
||||
const deadline = Date.now() + 30_000;
|
||||
while (true) {
|
||||
try {
|
||||
await fs.mkdir(lockDir);
|
||||
await fs.writeFile(
|
||||
path.join(lockDir, "owner.json"),
|
||||
`${JSON.stringify({ pid: process.pid, createdAt: new Date().toISOString() })}\n`,
|
||||
"utf8",
|
||||
);
|
||||
return async () => {
|
||||
await fs.rm(lockDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
};
|
||||
} catch (error) {
|
||||
const code = error && typeof error === "object" ? (error as { code?: unknown }).code : null;
|
||||
if (code !== "EEXIST") throw error;
|
||||
// Stale-lock detection: if the owner PID is dead (SIGKILL / OOM / crash),
|
||||
// the lockDir would otherwise persist forever and stall restores. Mirror
|
||||
// the materializePaperclipSkillCopy lock pattern — remove and retry.
|
||||
if (!(await isHolderAlive(lockDir))) {
|
||||
await fs.rm(lockDir, { recursive: true, force: true }).catch(() => undefined);
|
||||
continue;
|
||||
}
|
||||
if (Date.now() >= deadline) {
|
||||
throw new Error(`Timed out waiting for workspace restore lock at ${lockDir}`);
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function withDirectoryMergeLock<T>(
|
||||
targetDir: string,
|
||||
fn: () => Promise<T>,
|
||||
): Promise<T> {
|
||||
const releaseLock = await acquireDirectoryMergeLock(`${targetDir}.paperclip-restore.lock`);
|
||||
try {
|
||||
return await fn();
|
||||
} finally {
|
||||
await releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
async function copySnapshotEntry(sourceDir: string, targetDir: string, relative: string, entry: SnapshotEntry): Promise<void> {
|
||||
const sourcePath = path.join(sourceDir, relative);
|
||||
const targetPath = path.join(targetDir, relative);
|
||||
|
||||
if (entry.kind === "dir") {
|
||||
const existing = await fs.lstat(targetPath).catch(() => null);
|
||||
if (existing?.isDirectory()) {
|
||||
return;
|
||||
}
|
||||
if (existing) {
|
||||
await fs.rm(targetPath, { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
await fs.mkdir(targetPath, { recursive: true });
|
||||
return;
|
||||
}
|
||||
|
||||
await fs.mkdir(path.dirname(targetPath), { recursive: true });
|
||||
await fs.rm(targetPath, { recursive: true, force: true }).catch(() => undefined);
|
||||
if (entry.kind === "symlink") {
|
||||
await fs.symlink(entry.target, targetPath);
|
||||
return;
|
||||
}
|
||||
|
||||
await fs.copyFile(sourcePath, targetPath, fsConstants.COPYFILE_FICLONE).catch(async () => {
|
||||
await fs.copyFile(sourcePath, targetPath);
|
||||
});
|
||||
await fs.chmod(targetPath, entry.mode);
|
||||
}
|
||||
|
||||
export async function captureDirectorySnapshot(
|
||||
rootDir: string,
|
||||
options: { exclude?: string[] } = {},
|
||||
): Promise<DirectorySnapshot> {
|
||||
const exclude = [...new Set(options.exclude ?? [])];
|
||||
return {
|
||||
exclude,
|
||||
entries: await walkDirectory(rootDir, exclude),
|
||||
};
|
||||
}
|
||||
|
||||
export async function mergeDirectoryWithBaseline(input: {
|
||||
baseline: DirectorySnapshot;
|
||||
sourceDir: string;
|
||||
targetDir: string;
|
||||
beforeApply?: () => Promise<void>;
|
||||
}): Promise<void> {
|
||||
const source = await captureDirectorySnapshot(input.sourceDir, { exclude: input.baseline.exclude });
|
||||
await withDirectoryMergeLock(input.targetDir, async () => {
|
||||
await input.beforeApply?.();
|
||||
const current = await captureDirectorySnapshot(input.targetDir, { exclude: input.baseline.exclude });
|
||||
const deletedLeafEntries = [...input.baseline.entries.entries()]
|
||||
.filter(([relative, entry]) => entry.kind !== "dir" && !source.entries.has(relative))
|
||||
.sort(([left], [right]) => right.length - left.length);
|
||||
|
||||
for (const [relative, baselineEntry] of deletedLeafEntries) {
|
||||
if (!entriesMatch(current.entries.get(relative), baselineEntry)) continue;
|
||||
await fs.rm(path.join(input.targetDir, relative), { recursive: true, force: true }).catch(() => undefined);
|
||||
}
|
||||
|
||||
const deletedDirs = [...input.baseline.entries.entries()]
|
||||
.filter(([relative, entry]) => entry.kind === "dir" && !source.entries.has(relative))
|
||||
.sort(([left], [right]) => right.length - left.length);
|
||||
|
||||
for (const [relative] of deletedDirs) {
|
||||
await fs.rmdir(path.join(input.targetDir, relative)).catch(() => undefined);
|
||||
}
|
||||
|
||||
const changedSourceEntries = [...source.entries.entries()]
|
||||
.filter(([relative, entry]) => !entriesMatch(input.baseline.entries.get(relative), entry))
|
||||
.sort(([left], [right]) => left.localeCompare(right));
|
||||
|
||||
for (const [relative, entry] of changedSourceEntries) {
|
||||
await copySnapshotEntry(input.sourceDir, input.targetDir, relative, entry);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function directoryEntryMatchesBaseline(
|
||||
rootDir: string,
|
||||
relative: string,
|
||||
baselineEntry: SnapshotEntry,
|
||||
): Promise<boolean> {
|
||||
return entriesMatch(await readSnapshotEntry(rootDir, relative), baselineEntry);
|
||||
}
|
||||
@@ -53,7 +53,6 @@
|
||||
"dependencies": {
|
||||
"@agentclientprotocol/claude-agent-acp": "^0.31.4",
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"@paperclipai/shared": "workspace:*",
|
||||
"@zed-industries/codex-acp": "^0.12.0",
|
||||
"acpx": "^0.6.1",
|
||||
"picocolors": "^1.1.1"
|
||||
|
||||
@@ -4,7 +4,6 @@ import path from "node:path";
|
||||
import { createHash, randomUUID } from "node:crypto";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import { resolvePaperclipSpaceRoot } from "@paperclipai/shared/space-paths";
|
||||
import { readAdapterExecutionTarget, adapterExecutionTargetSessionIdentity } from "@paperclipai/adapter-utils/execution-target";
|
||||
import {
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
@@ -22,7 +21,9 @@ import {
|
||||
readPaperclipIssueWorkModeFromContext,
|
||||
renderPaperclipWakePrompt,
|
||||
renderTemplate,
|
||||
resolvePaperclipInstanceRootForAdapter,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
rewriteWorkspaceCwdEnvVarsForExecution,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
type PaperclipSkillEntry,
|
||||
@@ -115,10 +116,9 @@ function shortHash(value: unknown): string {
|
||||
function defaultPaperclipInstanceDir(): string {
|
||||
const home = process.env.PAPERCLIP_HOME?.trim() || path.join(os.homedir(), ".paperclip");
|
||||
const instanceId = process.env.PAPERCLIP_INSTANCE_ID?.trim() || "default";
|
||||
return resolvePaperclipSpaceRoot({
|
||||
return resolvePaperclipInstanceRootForAdapter({
|
||||
homeDir: home,
|
||||
instanceId,
|
||||
spaceId: process.env.PAPERCLIP_SPACE_ID?.trim() || undefined,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -654,10 +654,11 @@ async function buildRuntime(input: {
|
||||
remoteExecutionIdentity && typeof remoteExecutionIdentity.remoteCwd === "string"
|
||||
? remoteExecutionIdentity.remoteCwd
|
||||
: cwd;
|
||||
const executionTargetIsRemote = remoteExecutionIdentity !== null;
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
executionTargetIsRemote: remoteExecutionIdentity !== null,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
@@ -712,7 +713,13 @@ async function buildRuntime(input: {
|
||||
workspaceWorktreePath: shapedWorkspaceEnv.workspaceWorktreePath,
|
||||
agentHome,
|
||||
});
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
const shapedEnvConfig = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
env: envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
executionCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
executionTargetIsRemote,
|
||||
});
|
||||
for (const [key, value] of Object.entries(shapedEnvConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
if (!hasExplicitApiKey && authToken) env.PAPERCLIP_API_KEY = authToken;
|
||||
|
||||
@@ -54,7 +54,6 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"@paperclipai/shared": "workspace:*",
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -3,7 +3,7 @@ import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
|
||||
import { resolvePaperclipSpaceRoot } from "@paperclipai/shared/space-paths";
|
||||
import { resolvePaperclipInstanceRootForAdapter } from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const SEEDED_SHARED_FILES = [
|
||||
".credentials.json",
|
||||
@@ -92,14 +92,14 @@ export function resolveManagedClaudeConfigSeedDir(
|
||||
env: NodeJS.ProcessEnv,
|
||||
companyId?: string,
|
||||
): string {
|
||||
const spaceRoot = resolvePaperclipSpaceRoot({
|
||||
const instanceRoot = resolvePaperclipInstanceRootForAdapter({
|
||||
homeDir: nonEmpty(env.PAPERCLIP_HOME) ?? undefined,
|
||||
instanceId: nonEmpty(env.PAPERCLIP_INSTANCE_ID) ?? undefined,
|
||||
spaceId: nonEmpty(env.PAPERCLIP_SPACE_ID) ?? undefined,
|
||||
env,
|
||||
});
|
||||
return companyId
|
||||
? path.resolve(spaceRoot, "companies", companyId, "claude-config-seed")
|
||||
: path.resolve(spaceRoot, "claude-config-seed");
|
||||
? path.resolve(instanceRoot, "companies", companyId, "claude-config-seed")
|
||||
: path.resolve(instanceRoot, "claude-config-seed");
|
||||
}
|
||||
|
||||
export async function prepareClaudeConfigSeed(
|
||||
|
||||
@@ -27,7 +27,7 @@ const {
|
||||
})),
|
||||
ensureCommandResolvable: vi.fn(async () => undefined),
|
||||
resolveCommandForLogs: vi.fn(async () => "ssh://fixture@127.0.0.1:2222/remote/workspace :: claude"),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => ({ gitBacked: false })),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
@@ -94,6 +94,7 @@ describe("claude remote execution", () => {
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
const instructionsPath = path.join(rootDir, "instructions.md");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-1/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
await writeFile(instructionsPath, "Use the remote workspace.\n", "utf8");
|
||||
@@ -116,6 +117,11 @@ describe("claude remote execution", () => {
|
||||
config: {
|
||||
command: "claude",
|
||||
instructionsFilePath: instructionsPath,
|
||||
env: {
|
||||
QA_PROJECT_WORKSPACE_CWD: workspaceDir,
|
||||
RANDOM_WORKSPACE_CWD: workspaceDir,
|
||||
OTHER_ENV: workspaceDir,
|
||||
},
|
||||
},
|
||||
context: {
|
||||
paperclipWorkspace: {
|
||||
@@ -161,11 +167,11 @@ describe("claude remote execution", () => {
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
remoteDir: "/remote/workspace",
|
||||
remoteDir: managedRemoteWorkspace,
|
||||
}));
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledWith(expect.objectContaining({
|
||||
remoteDir: "/remote/workspace/.paperclip-runtime/claude/skills",
|
||||
remoteDir: `${managedRemoteWorkspace}/.paperclip-runtime/claude/skills`,
|
||||
followSymlinks: true,
|
||||
}));
|
||||
expect(runChildProcess).toHaveBeenCalledTimes(1);
|
||||
@@ -173,15 +179,17 @@ describe("claude remote execution", () => {
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[2]).toContain("--append-system-prompt-file");
|
||||
expect(call?.[2]).toContain("/remote/workspace/.paperclip-runtime/claude/skills/agent-instructions.md");
|
||||
expect(call?.[2]).toContain(
|
||||
`${managedRemoteWorkspace}/.paperclip-runtime/claude/skills/agent-instructions.md`,
|
||||
);
|
||||
expect(call?.[2]).toContain("--add-dir");
|
||||
expect(call?.[2]).toContain("/remote/workspace/.paperclip-runtime/claude/skills");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[2]).toContain(`${managedRemoteWorkspace}/.paperclip-runtime/claude/skills`);
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_WORKTREE_PATH).toBeUndefined();
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
@@ -193,12 +201,15 @@ describe("claude remote execution", () => {
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.QA_PROJECT_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(call?.[3].env.RANDOM_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(call?.[3].env.OTHER_ENV).toBe(workspaceDir);
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe(managedRemoteWorkspace);
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
remoteDir: "/remote/workspace",
|
||||
remoteDir: managedRemoteWorkspace,
|
||||
}));
|
||||
});
|
||||
|
||||
@@ -259,6 +270,7 @@ describe("claude remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-claude-remote-resume-match-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-ssh-resume/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
|
||||
await execute({
|
||||
@@ -274,13 +286,13 @@ describe("claude remote execution", () => {
|
||||
sessionId: "session-123",
|
||||
sessionParams: {
|
||||
sessionId: "session-123",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
},
|
||||
sessionDisplayId: "session-123",
|
||||
|
||||
@@ -6,6 +6,7 @@ import type { RunProcessResult } from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
overrideAdapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
@@ -15,6 +16,7 @@ import {
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetTimeoutSec,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
@@ -35,8 +37,10 @@ import {
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
ensurePathInEnv,
|
||||
refreshPaperclipWorkspaceEnvForExecution,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
rewriteWorkspaceCwdEnvVarsForExecution,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
@@ -55,6 +59,7 @@ import { prepareClaudeConfigSeed } from "./claude-config.js";
|
||||
import { resolveClaudeDesiredSkillNames } from "./skills.js";
|
||||
import { isBedrockModelId } from "./models.js";
|
||||
import { prepareClaudePromptBundle } from "./prompt-cache.js";
|
||||
import { buildClaudeExecutionPermissionArgs } from "./permissions.js";
|
||||
import { SANDBOX_INSTALL_COMMAND } from "../index.js";
|
||||
|
||||
const __moduleDir = path.dirname(fileURLToPath(import.meta.url));
|
||||
@@ -152,7 +157,7 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const executionTargetIsRemote = adapterExecutionTargetIsRemote(executionTarget);
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
@@ -241,7 +246,13 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
if (runtimePrimaryUrl) {
|
||||
env.PAPERCLIP_RUNTIME_PRIMARY_URL = runtimePrimaryUrl;
|
||||
}
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
const shapedEnvConfig = rewriteWorkspaceCwdEnvVarsForExecution({
|
||||
env: envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
executionCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
executionTargetIsRemote,
|
||||
});
|
||||
for (const [key, value] of Object.entries(shapedEnvConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
|
||||
@@ -254,7 +265,10 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const timeoutSec = resolveAdapterExecutionTargetTimeoutSec(
|
||||
executionTarget,
|
||||
asNumber(config.timeoutSec, 0),
|
||||
);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
@@ -267,7 +281,10 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, { installCommand: SANDBOX_INSTALL_COMMAND });
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, {
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
timeoutSec,
|
||||
});
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
@@ -340,6 +357,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
legacyRemoteExecution: ctx.executionTransport?.remoteExecution,
|
||||
});
|
||||
const executionTargetIsRemote = adapterExecutionTargetIsRemote(executionTarget);
|
||||
const executionTargetIsSandbox = executionTarget?.kind === "remote" && executionTarget.transport === "sandbox";
|
||||
|
||||
const promptTemplate = asString(
|
||||
config.promptTemplate,
|
||||
@@ -351,6 +369,21 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const maxTurns = asNumber(config.maxTurnsPerRun, 0);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, true);
|
||||
const configEnv = parseObject(config.env);
|
||||
const workspaceContext = parseObject(context.paperclipWorkspace);
|
||||
const workspaceCwd = asString(workspaceContext.cwd, "");
|
||||
const workspaceSource = asString(workspaceContext.source, "");
|
||||
const workspaceStrategy = asString(workspaceContext.strategy, "");
|
||||
const workspaceBranch = asString(workspaceContext.branchName, "") || null;
|
||||
const workspaceWorktreePath = asString(workspaceContext.worktreePath, "") || null;
|
||||
const agentHome = asString(workspaceContext.agentHome, "") || null;
|
||||
const workspaceHints = Array.isArray(context.paperclipWorkspaces)
|
||||
? context.paperclipWorkspaces.filter(
|
||||
(value): value is Record<string, unknown> => typeof value === "object" && value !== null,
|
||||
)
|
||||
: [];
|
||||
const configuredCwd = asString(config.cwd, "");
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const hasExplicitClaudeConfigDir =
|
||||
typeof configEnv.CLAUDE_CONFIG_DIR === "string" && configEnv.CLAUDE_CONFIG_DIR.trim().length > 0;
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
@@ -379,7 +412,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
extraArgs,
|
||||
} = runtimeConfig;
|
||||
let loggedEnv = initialLoggedEnv;
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const terminalResultCleanupGraceMs = Math.max(
|
||||
0,
|
||||
asNumber(config.terminalResultCleanupGraceMs, 5_000),
|
||||
@@ -433,8 +466,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
`[paperclip] Syncing workspace and Claude runtime assets to ${describeAdapterExecutionTarget(executionTarget)}.\n`,
|
||||
);
|
||||
return await prepareAdapterExecutionTargetRuntime({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
adapterKey: "claude",
|
||||
timeoutSec,
|
||||
workspaceLocalDir: cwd,
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
detectCommand: command,
|
||||
@@ -455,6 +490,26 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
})()
|
||||
: null;
|
||||
if (preparedExecutionTargetRuntime?.workspaceRemoteDir) {
|
||||
effectiveExecutionCwd = preparedExecutionTargetRuntime.workspaceRemoteDir;
|
||||
}
|
||||
const runtimeExecutionTarget = overrideAdapterExecutionTargetRemoteCwd(executionTarget, effectiveExecutionCwd);
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig: configEnv,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
const restoreRemoteWorkspace = preparedExecutionTargetRuntime
|
||||
? () => preparedExecutionTargetRuntime.restoreWorkspace()
|
||||
: null;
|
||||
@@ -502,12 +557,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
);
|
||||
}
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(runtimeExecutionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
target: runtimeExecutionTarget,
|
||||
runtimeRootDir: preparedExecutionTargetRuntime?.runtimeRootDir,
|
||||
adapterKey: "claude",
|
||||
timeoutSec,
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
@@ -536,7 +592,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
runtimeSessionId.length > 0 &&
|
||||
hasMatchingPromptBundle &&
|
||||
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(effectiveExecutionCwd)) &&
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, executionTarget);
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, runtimeExecutionTarget);
|
||||
const sessionId = canResumeSession ? runtimeSessionId : null;
|
||||
if (
|
||||
executionTargetIsRemote &&
|
||||
@@ -609,7 +665,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
) => {
|
||||
const args = ["--print", "-", "--output-format", "stream-json", "--verbose"];
|
||||
if (resumeSessionId) args.push("--resume", resumeSessionId);
|
||||
if (dangerouslySkipPermissions) args.push("--dangerously-skip-permissions");
|
||||
args.push(...buildClaudeExecutionPermissionArgs({
|
||||
dangerouslySkipPermissions,
|
||||
targetIsSandbox: executionTargetIsSandbox,
|
||||
}));
|
||||
if (chrome) args.push("--chrome");
|
||||
// For Bedrock: only pass --model when the ID is a Bedrock-native identifier
|
||||
// (e.g. "us.anthropic.*" or ARN). Anthropic-style IDs like "claude-opus-4-6" are invalid
|
||||
@@ -653,6 +712,11 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (!resumeSessionId) {
|
||||
commandNotes.push(`Using stable Claude prompt bundle ${promptBundle.bundleKey}.`);
|
||||
}
|
||||
if (dangerouslySkipPermissions && executionTargetIsSandbox) {
|
||||
commandNotes.push(
|
||||
"Using a broad --allowedTools whitelist for sandbox execution because Claude rejects --dangerously-skip-permissions under root/sudo.",
|
||||
);
|
||||
}
|
||||
if (attemptInstructionsFilePath && !resumeSessionId) {
|
||||
commandNotes.push(
|
||||
`Injected agent instructions via --append-system-prompt-file ${instructionsFilePath} (with path directive appended)`,
|
||||
@@ -672,7 +736,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
}
|
||||
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, executionTarget, command, args, {
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, runtimeExecutionTarget, command, args, {
|
||||
cwd,
|
||||
env,
|
||||
stdin: prompt,
|
||||
@@ -793,7 +857,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
promptBundleKey: promptBundle.bundleKey,
|
||||
...(executionTargetIsRemote
|
||||
? {
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(executionTarget),
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(runtimeExecutionTarget),
|
||||
}
|
||||
: {}),
|
||||
...(workspaceId ? { workspaceId } : {}),
|
||||
|
||||
43
packages/adapters/claude-local/src/server/permissions.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
// Explicit allowlist of Claude Code tools we permit when running inside a
|
||||
// sandbox. We use this instead of `--dangerously-skip-permissions` for sandbox
|
||||
// targets because the permission-approval prompts can't be answered by a
|
||||
// human inside a non-interactive sandbox, but blanket-allowing every tool
|
||||
// would defeat the point of having a separate sandbox code path.
|
||||
//
|
||||
// Maintenance: this list must be reviewed when Claude Code releases a new
|
||||
// tool. The canonical list of built-in tools is documented at
|
||||
// https://docs.claude.com/en/docs/claude-code/built-in-tools — when a tool
|
||||
// is added there, decide whether it should be allowed in sandbox runs and
|
||||
// either add it here or document the deliberate exclusion. Omitting a tool
|
||||
// silently disables it inside sandboxes, which can look like the tool is
|
||||
// "broken" rather than intentionally gated.
|
||||
const SANDBOX_ALLOWED_TOOLS =
|
||||
"Task AskUserQuestion Bash(*) CronCreate CronDelete CronList Edit " +
|
||||
"EnterPlanMode EnterWorktree ExitPlanMode ExitWorktree Glob Grep Monitor " +
|
||||
"NotebookEdit PushNotification Read RemoteTrigger ScheduleWakeup Skill " +
|
||||
"TaskOutput TaskStop TodoWrite ToolSearch WebFetch WebSearch Write";
|
||||
|
||||
export function buildClaudeProbePermissionArgs(input: {
|
||||
dangerouslySkipPermissions: boolean;
|
||||
targetIsSandbox: boolean;
|
||||
}): string[] {
|
||||
if (!input.dangerouslySkipPermissions) return [];
|
||||
// For sandbox targets, mirror the execution path: pass `--allowedTools`
|
||||
// with the curated allowlist instead of dropping the flag entirely. The
|
||||
// hello probe is a one-shot prompt that should never trigger a tool, but
|
||||
// if a future probe prompt does, we don't want Claude CLI to stall on an
|
||||
// interactive permission prompt that no human can answer.
|
||||
if (input.targetIsSandbox) return ["--allowedTools", SANDBOX_ALLOWED_TOOLS];
|
||||
return ["--dangerously-skip-permissions"];
|
||||
}
|
||||
|
||||
export function buildClaudeExecutionPermissionArgs(input: {
|
||||
dangerouslySkipPermissions: boolean;
|
||||
targetIsSandbox: boolean;
|
||||
}): string[] {
|
||||
if (!input.dangerouslySkipPermissions) return [];
|
||||
if (input.targetIsSandbox) {
|
||||
return ["--allowedTools", SANDBOX_ALLOWED_TOOLS];
|
||||
}
|
||||
return ["--dangerously-skip-permissions"];
|
||||
}
|
||||
@@ -3,8 +3,11 @@ import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { createHash, type Hash } from "node:crypto";
|
||||
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
|
||||
import { ensurePaperclipSkillSymlink, type PaperclipSkillEntry } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { resolvePaperclipSpaceRoot } from "@paperclipai/shared/space-paths";
|
||||
import {
|
||||
ensurePaperclipSkillSymlink,
|
||||
resolvePaperclipInstanceRootForAdapter,
|
||||
type PaperclipSkillEntry,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
type SkillEntry = PaperclipSkillEntry;
|
||||
|
||||
@@ -23,13 +26,13 @@ function resolveManagedClaudePromptCacheRoot(
|
||||
env: NodeJS.ProcessEnv,
|
||||
companyId: string,
|
||||
): string {
|
||||
const spaceRoot = resolvePaperclipSpaceRoot({
|
||||
const instanceRoot = resolvePaperclipInstanceRootForAdapter({
|
||||
homeDir: nonEmpty(env.PAPERCLIP_HOME) ?? undefined,
|
||||
instanceId: nonEmpty(env.PAPERCLIP_INSTANCE_ID) ?? undefined,
|
||||
spaceId: nonEmpty(env.PAPERCLIP_SPACE_ID) ?? undefined,
|
||||
env,
|
||||
});
|
||||
return path.resolve(
|
||||
spaceRoot,
|
||||
instanceRoot,
|
||||
"companies",
|
||||
companyId,
|
||||
"claude-prompt-cache",
|
||||
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
import path from "node:path";
|
||||
import { detectClaudeLoginRequired, parseClaudeStreamJson } from "./parse.js";
|
||||
import { isBedrockModelId } from "./models.js";
|
||||
import { buildClaudeProbePermissionArgs } from "./permissions.js";
|
||||
import { SANDBOX_INSTALL_COMMAND } from "../index.js";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
@@ -64,6 +65,7 @@ export async function testEnvironment(
|
||||
const command = asString(config.command, "claude");
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const targetIsSandbox = target?.kind === "remote" && target.transport === "sandbox";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
@@ -200,7 +202,7 @@ export async function testEnvironment(
|
||||
})();
|
||||
|
||||
const args = ["--print", "-", "--output-format", "stream-json", "--verbose"];
|
||||
if (dangerouslySkipPermissions) args.push("--dangerously-skip-permissions");
|
||||
args.push(...buildClaudeProbePermissionArgs({ dangerouslySkipPermissions, targetIsSandbox }));
|
||||
if (chrome) args.push("--chrome");
|
||||
// For Bedrock: only pass --model when the ID is a Bedrock-native identifier.
|
||||
if (model && (!hasBedrock || isBedrockModelId(model))) {
|
||||
|
||||
@@ -53,7 +53,6 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"@paperclipai/shared": "workspace:*",
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -67,4 +67,22 @@ describe("buildCodexExecArgs", () => {
|
||||
"-",
|
||||
]);
|
||||
});
|
||||
|
||||
it("adds --skip-git-repo-check when requested", () => {
|
||||
const result = buildCodexExecArgs(
|
||||
{
|
||||
model: "gpt-5.3-codex",
|
||||
},
|
||||
{ skipGitRepoCheck: true },
|
||||
);
|
||||
|
||||
expect(result.args).toEqual([
|
||||
"exec",
|
||||
"--json",
|
||||
"--skip-git-repo-check",
|
||||
"--model",
|
||||
"gpt-5.3-codex",
|
||||
"-",
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,7 +30,10 @@ function formatFastModeSupportedModels(): string {
|
||||
|
||||
export function buildCodexExecArgs(
|
||||
config: unknown,
|
||||
options: { resumeSessionId?: string | null } = {},
|
||||
options: {
|
||||
resumeSessionId?: string | null;
|
||||
skipGitRepoCheck?: boolean;
|
||||
} = {},
|
||||
): BuildCodexExecArgsResult {
|
||||
const record = asRecord(config);
|
||||
const model = asString(record.model, "").trim();
|
||||
@@ -48,6 +51,7 @@ export function buildCodexExecArgs(
|
||||
const extraArgs = readExtraArgs(record);
|
||||
|
||||
const args = ["exec", "--json"];
|
||||
if (options.skipGitRepoCheck) args.push("--skip-git-repo-check");
|
||||
if (search) args.unshift("--search");
|
||||
if (bypass) args.push("--dangerously-bypass-approvals-and-sandbox");
|
||||
if (model) args.push("--model", model);
|
||||
|
||||
@@ -2,7 +2,7 @@ import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
|
||||
import { resolvePaperclipSpaceRoot } from "@paperclipai/shared/space-paths";
|
||||
import { resolvePaperclipInstanceRootForAdapter } from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const TRUTHY_ENV_RE = /^(1|true|yes|on)$/i;
|
||||
const COPIED_SHARED_FILES = ["config.json", "config.toml", "instructions.md"] as const;
|
||||
@@ -31,14 +31,14 @@ export function resolveManagedCodexHomeDir(
|
||||
env: NodeJS.ProcessEnv,
|
||||
companyId?: string,
|
||||
): string {
|
||||
const spaceRoot = resolvePaperclipSpaceRoot({
|
||||
const instanceRoot = resolvePaperclipInstanceRootForAdapter({
|
||||
homeDir: nonEmpty(env.PAPERCLIP_HOME) ?? undefined,
|
||||
instanceId: nonEmpty(env.PAPERCLIP_INSTANCE_ID) ?? undefined,
|
||||
spaceId: nonEmpty(env.PAPERCLIP_SPACE_ID) ?? undefined,
|
||||
env,
|
||||
});
|
||||
return companyId
|
||||
? path.resolve(spaceRoot, "companies", companyId, "codex-home")
|
||||
: path.resolve(spaceRoot, "codex-home");
|
||||
? path.resolve(instanceRoot, "companies", companyId, "codex-home")
|
||||
: path.resolve(instanceRoot, "codex-home");
|
||||
}
|
||||
|
||||
async function ensureParentDir(target: string): Promise<void> {
|
||||
|
||||
@@ -23,7 +23,7 @@ const {
|
||||
})),
|
||||
ensureCommandResolvable: vi.fn(async () => undefined),
|
||||
resolveCommandForLogs: vi.fn(async () => "/usr/bin/codex"),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => ({ gitBacked: false })),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
@@ -89,6 +89,7 @@ describe("codex remote execution", () => {
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const codexHomeDir = path.join(rootDir, "codex-home");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-1/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(codexHomeDir, { recursive: true });
|
||||
await writeFile(path.join(rootDir, "instructions.md"), "Use the remote workspace.\n", "utf8");
|
||||
@@ -161,12 +162,12 @@ describe("codex remote execution", () => {
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
remoteDir: "/remote/workspace",
|
||||
remoteDir: managedRemoteWorkspace,
|
||||
}));
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: codexHomeDir,
|
||||
remoteDir: "/remote/workspace/.paperclip-runtime/codex/home",
|
||||
remoteDir: `${managedRemoteWorkspace}/.paperclip-runtime/codex/home`,
|
||||
followSymlinks: true,
|
||||
}));
|
||||
|
||||
@@ -174,13 +175,14 @@ describe("codex remote execution", () => {
|
||||
const call = runChildProcess.mock.calls[0] as unknown as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.CODEX_HOME).toBe("/remote/workspace/.paperclip-runtime/codex/home");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[2]).not.toContain("--skip-git-repo-check");
|
||||
expect(call?.[3].env.CODEX_HOME).toBe(`${managedRemoteWorkspace}/.paperclip-runtime/codex/home`);
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_WORKTREE_PATH).toBeUndefined();
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
@@ -192,12 +194,12 @@ describe("codex remote execution", () => {
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe(managedRemoteWorkspace);
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
remoteDir: "/remote/workspace",
|
||||
remoteDir: managedRemoteWorkspace,
|
||||
}));
|
||||
});
|
||||
|
||||
@@ -269,6 +271,7 @@ describe("codex remote execution", () => {
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const codexHomeDir = path.join(rootDir, "codex-home");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-ssh-resume/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(codexHomeDir, { recursive: true });
|
||||
await writeFile(path.join(codexHomeDir, "auth.json"), "{}", "utf8");
|
||||
@@ -286,13 +289,13 @@ describe("codex remote execution", () => {
|
||||
sessionId: "session-123",
|
||||
sessionParams: {
|
||||
sessionId: "session-123",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
},
|
||||
sessionDisplayId: "session-123",
|
||||
@@ -341,6 +344,7 @@ describe("codex remote execution", () => {
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const codexHomeDir = path.join(rootDir, "codex-home");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-target/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(codexHomeDir, { recursive: true });
|
||||
await writeFile(path.join(codexHomeDir, "auth.json"), "{}", "utf8");
|
||||
@@ -358,13 +362,13 @@ describe("codex remote execution", () => {
|
||||
sessionId: "session-123",
|
||||
sessionParams: {
|
||||
sessionId: "session-123",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
},
|
||||
sessionDisplayId: "session-123",
|
||||
@@ -412,7 +416,7 @@ describe("codex remote execution", () => {
|
||||
"session-123",
|
||||
"-",
|
||||
]);
|
||||
expect(call?.[3].env.CODEX_HOME).toBe("/remote/workspace/.paperclip-runtime/codex/home");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.CODEX_HOME).toBe(`${managedRemoteWorkspace}/.paperclip-runtime/codex/home`);
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe(managedRemoteWorkspace);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,6 +5,7 @@ import { inferOpenAiCompatibleBiller, type AdapterExecutionContext, type Adapter
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
overrideAdapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
@@ -13,6 +14,7 @@ import {
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetTimeoutSec,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
@@ -21,18 +23,17 @@ import {
|
||||
asString,
|
||||
asNumber,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
ensurePaperclipSkillSymlink,
|
||||
ensurePathInEnv,
|
||||
refreshPaperclipWorkspaceEnvForExecution,
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
readPaperclipIssueWorkModeFromContext,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
joinPromptSections,
|
||||
@@ -358,14 +359,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
desiredSkillNames,
|
||||
},
|
||||
);
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
const timeoutSec = resolveAdapterExecutionTargetTimeoutSec(
|
||||
executionTarget,
|
||||
asNumber(config.timeoutSec, 0),
|
||||
);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
let effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const preparedExecutionTargetRuntime = executionTargetIsRemote
|
||||
? await (async () => {
|
||||
await onLog(
|
||||
@@ -373,8 +372,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
`[paperclip] Syncing workspace and CODEX_HOME to ${describeAdapterExecutionTarget(executionTarget)}.\n`,
|
||||
);
|
||||
return await prepareAdapterExecutionTargetRuntime({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
adapterKey: "codex",
|
||||
timeoutSec,
|
||||
workspaceLocalDir: cwd,
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
detectCommand: command,
|
||||
@@ -388,6 +389,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
})()
|
||||
: null;
|
||||
if (preparedExecutionTargetRuntime?.workspaceRemoteDir) {
|
||||
effectiveExecutionCwd = preparedExecutionTargetRuntime.workspaceRemoteDir;
|
||||
}
|
||||
const runtimeExecutionTarget = overrideAdapterExecutionTargetRemoteCwd(executionTarget, effectiveExecutionCwd);
|
||||
const executionTargetIsSandbox =
|
||||
runtimeExecutionTarget?.kind === "remote" && runtimeExecutionTarget.transport === "sandbox";
|
||||
const restoreRemoteWorkspace = preparedExecutionTargetRuntime
|
||||
? () => preparedExecutionTargetRuntime.restoreWorkspace()
|
||||
: null;
|
||||
@@ -449,20 +456,22 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath: shapedWorkspaceEnv.workspaceWorktreePath,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
if (runtimeServiceIntents.length > 0) {
|
||||
env.PAPERCLIP_RUNTIME_SERVICE_INTENTS_JSON = JSON.stringify(runtimeServiceIntents);
|
||||
}
|
||||
@@ -472,19 +481,17 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (runtimePrimaryUrl) {
|
||||
env.PAPERCLIP_RUNTIME_PRIMARY_URL = runtimePrimaryUrl;
|
||||
}
|
||||
for (const [k, v] of Object.entries(envConfig)) {
|
||||
if (typeof v === "string") env[k] = v;
|
||||
}
|
||||
env.CODEX_HOME = remoteCodexHome ?? effectiveCodexHome;
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(runtimeExecutionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
target: runtimeExecutionTarget,
|
||||
runtimeRootDir: preparedExecutionTargetRuntime?.runtimeRootDir,
|
||||
adapterKey: "codex",
|
||||
timeoutSec,
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
@@ -510,8 +517,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
detectCommand: ctx.runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env: runtimeEnv,
|
||||
timeoutSec: asNumber(config.timeoutSec, 0),
|
||||
graceSec: asNumber(config.graceSec, 20),
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
@@ -522,9 +529,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
resolvedCommand,
|
||||
});
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
const runtimeSessionCwd = asString(runtimeSessionParams.cwd, "");
|
||||
@@ -532,7 +536,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const canResumeSession =
|
||||
runtimeSessionId.length > 0 &&
|
||||
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(effectiveExecutionCwd)) &&
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, executionTarget);
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, runtimeExecutionTarget);
|
||||
const codexTransientFallbackMode = readCodexTransientFallbackMode(context);
|
||||
const forceSaferInvocation = fallbackModeUsesSaferInvocation(codexTransientFallbackMode);
|
||||
const forceFreshSession = fallbackModeUsesFreshSession(codexTransientFallbackMode);
|
||||
@@ -649,6 +653,11 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
return notes;
|
||||
})();
|
||||
if (executionTargetIsSandbox) {
|
||||
commandNotes.push(
|
||||
"Added --skip-git-repo-check for sandbox execution because Codex requires an explicit trust bypass in headless remote workspaces.",
|
||||
);
|
||||
}
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const prompt = joinPromptSections([
|
||||
@@ -671,7 +680,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const runAttempt = async (resumeSessionId: string | null) => {
|
||||
const execArgs = buildCodexExecArgs(
|
||||
forceSaferInvocation ? { ...config, fastMode: false } : config,
|
||||
{ resumeSessionId },
|
||||
{
|
||||
resumeSessionId,
|
||||
skipGitRepoCheck: executionTargetIsSandbox,
|
||||
},
|
||||
);
|
||||
const args = execArgs.args;
|
||||
const commandNotesWithFastMode =
|
||||
@@ -695,7 +707,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
}
|
||||
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, executionTarget, command, args, {
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, runtimeExecutionTarget, command, args, {
|
||||
cwd,
|
||||
env,
|
||||
stdin: prompt,
|
||||
@@ -748,7 +760,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
cwd: effectiveExecutionCwd,
|
||||
...(executionTargetIsRemote
|
||||
? {
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(executionTarget),
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(runtimeExecutionTarget),
|
||||
}
|
||||
: {}),
|
||||
...(workspaceId ? { workspaceId } : {}),
|
||||
|
||||
194
packages/adapters/codex-local/src/server/test.remote.test.ts
Normal file
@@ -0,0 +1,194 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import type { AdapterExecutionTarget } from "@paperclipai/adapter-utils/execution-target";
|
||||
|
||||
const {
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
maybeRunSandboxInstallCommand,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
prepareManagedCodexHome,
|
||||
restoreWorkspace,
|
||||
} = vi.hoisted(() => {
|
||||
const restoreWorkspace = vi.fn(async () => {});
|
||||
return {
|
||||
ensureAdapterExecutionTargetDirectory: vi.fn(async () => {}),
|
||||
ensureAdapterExecutionTargetCommandResolvable: vi.fn(async () => {}),
|
||||
maybeRunSandboxInstallCommand: vi.fn(async () => null),
|
||||
runAdapterExecutionTargetProcess: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: [
|
||||
"{\"type\":\"thread.started\",\"thread_id\":\"thread-1\"}",
|
||||
"{\"type\":\"item.completed\",\"item\":{\"type\":\"agent_message\",\"text\":\"hello\"}}",
|
||||
"{\"type\":\"turn.completed\",\"usage\":{\"input_tokens\":1,\"cached_input_tokens\":0,\"output_tokens\":1}}",
|
||||
].join("\n"),
|
||||
stderr: "",
|
||||
pid: 123,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
describeAdapterExecutionTarget: vi.fn(() => "QA SSH"),
|
||||
resolveAdapterExecutionTargetCwd: vi.fn((target, configuredCwd, fallbackCwd) => {
|
||||
if (typeof configuredCwd === "string" && configuredCwd.trim().length > 0) return configuredCwd;
|
||||
if (target && typeof target === "object" && "remoteCwd" in target && typeof target.remoteCwd === "string") {
|
||||
return target.remoteCwd;
|
||||
}
|
||||
return fallbackCwd;
|
||||
}),
|
||||
prepareAdapterExecutionTargetRuntime: vi.fn(async () => ({
|
||||
target: null,
|
||||
workspaceRemoteDir: "/remote/workspace/.paperclip-runtime/runs/test/workspace",
|
||||
runtimeRootDir: "/remote/workspace/.paperclip-runtime/runs/test/workspace/.paperclip-runtime/codex",
|
||||
assetDirs: {
|
||||
home: "/remote/workspace/.paperclip-runtime/runs/test/workspace/.paperclip-runtime/codex/home",
|
||||
},
|
||||
restoreWorkspace,
|
||||
})),
|
||||
prepareManagedCodexHome: vi.fn(async () => "/tmp/paperclip-managed-codex-home"),
|
||||
restoreWorkspace,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
ensureAdapterExecutionTargetDirectory,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
maybeRunSandboxInstallCommand,
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("./codex-home.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("./codex-home.js")>("./codex-home.js");
|
||||
return {
|
||||
...actual,
|
||||
prepareManagedCodexHome,
|
||||
};
|
||||
});
|
||||
|
||||
import { testEnvironment } from "./test.js";
|
||||
|
||||
describe("codex remote environment diagnostics", () => {
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
delete process.env.OPENAI_API_KEY;
|
||||
});
|
||||
|
||||
it("stages managed CODEX_HOME in an isolated runtime dir and keeps the probe cwd on the original remote workspace", async () => {
|
||||
const remoteTarget: AdapterExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "ssh",
|
||||
remoteCwd: "/remote/workspace",
|
||||
spec: {
|
||||
host: "127.0.0.1",
|
||||
port: 22,
|
||||
username: "agent",
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "KNOWN HOSTS",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteWorkspacePath: "/remote/workspace",
|
||||
strictHostKeyChecking: false,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await testEnvironment({
|
||||
companyId: "company-1",
|
||||
adapterType: "codex_local",
|
||||
config: {
|
||||
command: "codex",
|
||||
},
|
||||
executionTarget: remoteTarget,
|
||||
environmentName: "QA SSH",
|
||||
});
|
||||
|
||||
expect(result.status).toBe("pass");
|
||||
expect(result.checks.some((check) => check.code === "codex_hello_probe_passed")).toBe(true);
|
||||
expect(prepareManagedCodexHome).toHaveBeenCalledTimes(1);
|
||||
expect(prepareAdapterExecutionTargetRuntime).toHaveBeenCalledTimes(1);
|
||||
const runtimeCalls = prepareAdapterExecutionTargetRuntime.mock.calls as unknown as Array<[
|
||||
{
|
||||
workspaceLocalDir: string;
|
||||
target?: { remoteCwd?: string };
|
||||
workspaceRemoteDir?: string;
|
||||
},
|
||||
]>;
|
||||
const runtimeInput = runtimeCalls[0]?.[0];
|
||||
expect(runtimeInput?.workspaceLocalDir).toContain(`${os.tmpdir()}/paperclip-codex-envtest-`);
|
||||
expect(runtimeInput?.workspaceLocalDir).not.toBe("/remote/workspace");
|
||||
expect(await fs.stat(runtimeInput!.workspaceLocalDir).catch(() => null)).toBeNull();
|
||||
expect(runtimeInput?.target?.remoteCwd).toBe("/remote/workspace");
|
||||
// `workspaceRemoteDir` is the base path passed to the runtime; the
|
||||
// helper's per-run subdirectory is appended internally inside
|
||||
// `prepareRemoteManagedRuntime`. Pre-building a per-run prefix here
|
||||
// would double-nest the run id in the final path.
|
||||
expect(runtimeInput?.workspaceRemoteDir).toBe("/remote/workspace");
|
||||
expect(runAdapterExecutionTargetProcess).toHaveBeenCalledTimes(1);
|
||||
const probeCall = runAdapterExecutionTargetProcess.mock.calls[0] as unknown as
|
||||
| [string, { kind: string; remoteCwd: string }, string, string[], { cwd: string; env: Record<string, string> }]
|
||||
| undefined;
|
||||
expect(probeCall?.[1]).toMatchObject({
|
||||
kind: "remote",
|
||||
remoteCwd: "/remote/workspace",
|
||||
});
|
||||
expect(probeCall?.[4]).toMatchObject({
|
||||
cwd: "/remote/workspace",
|
||||
env: expect.objectContaining({
|
||||
CODEX_HOME: "/remote/workspace/.paperclip-runtime/runs/test/workspace/.paperclip-runtime/codex/home",
|
||||
}),
|
||||
});
|
||||
expect(restoreWorkspace).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("avoids /tmp CODEX_HOME for remote API-key hello probes", async () => {
|
||||
const remoteTarget: AdapterExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "cloudflare",
|
||||
remoteCwd: "/remote/workspace",
|
||||
runner: {
|
||||
execute: async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
const result = await testEnvironment({
|
||||
companyId: "company-1",
|
||||
adapterType: "codex_local",
|
||||
config: {
|
||||
command: "codex",
|
||||
env: {
|
||||
OPENAI_API_KEY: "sk-test",
|
||||
},
|
||||
},
|
||||
executionTarget: remoteTarget,
|
||||
environmentName: "QA Cloudflare",
|
||||
});
|
||||
|
||||
expect(result.status).toBe("pass");
|
||||
const probeCall = runAdapterExecutionTargetProcess.mock.calls[0] as unknown as
|
||||
| [string, AdapterExecutionTarget, string, string[], { cwd: string; env: Record<string, string> }]
|
||||
| undefined;
|
||||
expect(probeCall?.[4].env.CODEX_HOME).toContain("/remote/workspace/.paperclip-runtime/codex/probe-home-codex-envtest-");
|
||||
expect(probeCall?.[4].env.CODEX_HOME?.startsWith("/tmp/")).toBe(false);
|
||||
expect(probeCall?.[3]).toContain("--skip-git-repo-check");
|
||||
});
|
||||
});
|
||||
@@ -15,13 +15,16 @@ import {
|
||||
runAdapterExecutionTargetProcess,
|
||||
describeAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
} from "@paperclipai/adapter-utils/execution-target";
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import os from "node:os";
|
||||
import { parseCodexJsonl } from "./parse.js";
|
||||
import { SANDBOX_INSTALL_COMMAND } from "../index.js";
|
||||
import { codexHomeDir, readCodexAuthInfo } from "./quota.js";
|
||||
import { buildCodexExecArgs } from "./codex-args.js";
|
||||
import { prepareManagedCodexHome } from "./codex-home.js";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
if (checks.some((check) => check.level === "error")) return "fail";
|
||||
@@ -58,6 +61,99 @@ function summarizeProbeDetail(stdout: string, stderr: string, parsedError: strin
|
||||
const CODEX_AUTH_REQUIRED_RE =
|
||||
/(?:not\s+logged\s+in|login\s+required|authentication\s+required|unauthorized|invalid(?:\s+or\s+missing)?\s+api(?:[_\s-]?key)?|openai[_\s-]?api[_\s-]?key|api[_\s-]?key.*required|please\s+run\s+`?codex\s+login`?)/i;
|
||||
|
||||
async function prepareCodexHelloProbe(input: {
|
||||
runId: string;
|
||||
companyId: string;
|
||||
target: AdapterEnvironmentTestContext["executionTarget"] | null;
|
||||
targetIsRemote: boolean;
|
||||
cwd: string;
|
||||
command: string;
|
||||
args: string[];
|
||||
env: Record<string, string>;
|
||||
probeApiKey: string | null;
|
||||
}): Promise<{
|
||||
command: string;
|
||||
args: string[];
|
||||
env: Record<string, string>;
|
||||
cleanup: () => Promise<void>;
|
||||
}> {
|
||||
let preparedRuntime: Awaited<ReturnType<typeof prepareAdapterExecutionTargetRuntime>> | null = null;
|
||||
let preparedRuntimeWorkspaceLocalDir: string | null = null;
|
||||
|
||||
const cleanup = async () => {
|
||||
await preparedRuntime?.restoreWorkspace().catch(() => {});
|
||||
if (preparedRuntimeWorkspaceLocalDir) {
|
||||
await fs.rm(preparedRuntimeWorkspaceLocalDir, { recursive: true, force: true }).catch(() => {});
|
||||
}
|
||||
};
|
||||
|
||||
if (input.targetIsRemote && !input.probeApiKey) {
|
||||
const managedHome = await prepareManagedCodexHome(process.env, async () => {}, input.companyId, {
|
||||
apiKey: null,
|
||||
});
|
||||
preparedRuntimeWorkspaceLocalDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), `paperclip-codex-envtest-${input.runId}-`),
|
||||
);
|
||||
preparedRuntime = await prepareAdapterExecutionTargetRuntime({
|
||||
runId: input.runId,
|
||||
target: input.target,
|
||||
adapterKey: "codex",
|
||||
workspaceLocalDir: preparedRuntimeWorkspaceLocalDir,
|
||||
// Pass `input.cwd` as the base (not a pre-built per-run subdir).
|
||||
// `prepareRemoteManagedRuntime` itself appends
|
||||
// `.paperclip-runtime/runs/<runId>/workspace` to whatever it gets, so
|
||||
// pre-building a per-run path here would double-nest the run ID.
|
||||
workspaceRemoteDir: input.cwd,
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
detectCommand: input.command,
|
||||
assets: [
|
||||
{
|
||||
key: "home",
|
||||
localDir: managedHome,
|
||||
followSymlinks: true,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return {
|
||||
command: input.command,
|
||||
args: input.args,
|
||||
env: preparedRuntime.assetDirs.home
|
||||
? { ...input.env, CODEX_HOME: preparedRuntime.assetDirs.home }
|
||||
: { ...input.env },
|
||||
cleanup,
|
||||
};
|
||||
}
|
||||
|
||||
if (input.probeApiKey) {
|
||||
const probeHome = input.targetIsRemote
|
||||
? path.posix.join(input.cwd, ".paperclip-runtime", "codex", `probe-home-${input.runId}`)
|
||||
: path.join(os.tmpdir(), `paperclip-codex-probe-${input.runId}`);
|
||||
return {
|
||||
command: "sh",
|
||||
args: [
|
||||
"-c",
|
||||
'set -e; mkdir -p "$CODEX_HOME"; umask 077; printf "%s" "$_PAPERCLIP_CODEX_AUTH_JSON" > "$CODEX_HOME/auth.json"; unset _PAPERCLIP_CODEX_AUTH_JSON; trap \'rm -rf "$CODEX_HOME"\' EXIT INT TERM; "$0" "$@"',
|
||||
input.command,
|
||||
...input.args,
|
||||
],
|
||||
env: {
|
||||
...input.env,
|
||||
CODEX_HOME: probeHome,
|
||||
_PAPERCLIP_CODEX_AUTH_JSON: JSON.stringify({ OPENAI_API_KEY: input.probeApiKey }),
|
||||
},
|
||||
cleanup,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
command: input.command,
|
||||
args: input.args,
|
||||
env: { ...input.env },
|
||||
cleanup,
|
||||
};
|
||||
}
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
@@ -66,6 +162,7 @@ export async function testEnvironment(
|
||||
const command = asString(config.command, "codex");
|
||||
const target = ctx.executionTarget ?? null;
|
||||
const targetIsRemote = target?.kind === "remote";
|
||||
const targetIsSandbox = target?.kind === "remote" && target.transport === "sandbox";
|
||||
const cwd = resolveAdapterExecutionTargetCwd(target, asString(config.cwd, ""), process.cwd());
|
||||
const targetLabel = targetIsRemote
|
||||
? ctx.environmentName ?? describeAdapterExecutionTarget(target)
|
||||
@@ -175,7 +272,10 @@ export async function testEnvironment(
|
||||
hint: "Use the `codex` CLI command to run the automatic login and installation probe.",
|
||||
});
|
||||
} else {
|
||||
const execArgs = buildCodexExecArgs({ ...config, fastMode: false });
|
||||
const execArgs = buildCodexExecArgs(
|
||||
{ ...config, fastMode: false },
|
||||
{ skipGitRepoCheck: targetIsSandbox },
|
||||
);
|
||||
const args = execArgs.args;
|
||||
if (execArgs.fastModeIgnoredReason) {
|
||||
checks.push({
|
||||
@@ -185,6 +285,14 @@ export async function testEnvironment(
|
||||
hint: "Switch the agent model to GPT-5.4 or enter a manual model ID to enable Codex Fast mode.",
|
||||
});
|
||||
}
|
||||
if (targetIsSandbox) {
|
||||
checks.push({
|
||||
code: "codex_git_repo_check_skipped",
|
||||
level: "info",
|
||||
message: "Added --skip-git-repo-check for sandbox hello probes.",
|
||||
hint: "Codex requires an explicit trust bypass in headless remote sandbox workspaces.",
|
||||
});
|
||||
}
|
||||
|
||||
// Codex CLI (>= 0.122) ignores the OPENAI_API_KEY env var and only reads
|
||||
// credentials from $CODEX_HOME/auth.json. When we have a key available,
|
||||
@@ -196,86 +304,80 @@ export async function testEnvironment(
|
||||
: isNonEmpty(hostOpenAiKey)
|
||||
? hostOpenAiKey
|
||||
: null;
|
||||
let probeCommand = command;
|
||||
let probeArgs = args;
|
||||
const probeEnv: Record<string, string> = { ...env };
|
||||
if (probeApiKey) {
|
||||
const probeHome = targetIsRemote
|
||||
? `/tmp/paperclip-codex-probe-${runId}`
|
||||
: path.join(os.tmpdir(), `paperclip-codex-probe-${runId}`);
|
||||
probeEnv.CODEX_HOME = probeHome;
|
||||
probeEnv._PAPERCLIP_CODEX_AUTH_JSON = JSON.stringify({ OPENAI_API_KEY: probeApiKey });
|
||||
probeCommand = "sh";
|
||||
// Trap on EXIT removes the probe home (with the API-key auth.json) on
|
||||
// any exit path; we drop `exec` so the wrapper shell stays alive long
|
||||
// enough for the trap to fire after the child returns.
|
||||
probeArgs = [
|
||||
"-c",
|
||||
'set -e; mkdir -p "$CODEX_HOME"; umask 077; printf "%s" "$_PAPERCLIP_CODEX_AUTH_JSON" > "$CODEX_HOME/auth.json"; unset _PAPERCLIP_CODEX_AUTH_JSON; trap \'rm -rf "$CODEX_HOME"\' EXIT INT TERM; "$0" "$@"',
|
||||
command,
|
||||
...args,
|
||||
];
|
||||
}
|
||||
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
const preparedProbe = await prepareCodexHelloProbe({
|
||||
runId,
|
||||
companyId: ctx.companyId,
|
||||
target,
|
||||
probeCommand,
|
||||
probeArgs,
|
||||
{
|
||||
cwd,
|
||||
env: probeEnv,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
stdin: "Respond with hello.",
|
||||
onLog: async () => {},
|
||||
},
|
||||
);
|
||||
const parsed = parseCodexJsonl(probe.stdout);
|
||||
const detail = summarizeProbeDetail(probe.stdout, probe.stderr, parsed.errorMessage);
|
||||
const authEvidence = `${parsed.errorMessage ?? ""}\n${probe.stdout}\n${probe.stderr}`.trim();
|
||||
targetIsRemote,
|
||||
cwd,
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
probeApiKey,
|
||||
});
|
||||
try {
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
preparedProbe.command,
|
||||
preparedProbe.args,
|
||||
{
|
||||
cwd,
|
||||
env: preparedProbe.env,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
stdin: "Respond with hello.",
|
||||
onLog: async () => {},
|
||||
},
|
||||
);
|
||||
const parsed = parseCodexJsonl(probe.stdout);
|
||||
const detail = summarizeProbeDetail(probe.stdout, probe.stderr, parsed.errorMessage);
|
||||
const authEvidence = `${parsed.errorMessage ?? ""}\n${probe.stdout}\n${probe.stderr}`.trim();
|
||||
|
||||
if (probe.timedOut) {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_timed_out",
|
||||
level: "warn",
|
||||
message: "Codex hello probe timed out.",
|
||||
hint: "Retry the probe. If this persists, verify Codex can run `Respond with hello` from this directory manually.",
|
||||
});
|
||||
} else if ((probe.exitCode ?? 1) === 0) {
|
||||
const summary = parsed.summary.trim();
|
||||
const hasHello = /\bhello\b/i.test(summary);
|
||||
checks.push({
|
||||
code: hasHello ? "codex_hello_probe_passed" : "codex_hello_probe_unexpected_output",
|
||||
level: hasHello ? "info" : "warn",
|
||||
message: hasHello
|
||||
? "Codex hello probe succeeded."
|
||||
: "Codex probe ran but did not return `hello` as expected.",
|
||||
...(summary ? { detail: summary.replace(/\s+/g, " ").trim().slice(0, 240) } : {}),
|
||||
...(hasHello
|
||||
? {}
|
||||
: {
|
||||
hint: "Try the probe manually (`codex exec --json -` then prompt: Respond with hello) to inspect full output.",
|
||||
}),
|
||||
});
|
||||
} else if (CODEX_AUTH_REQUIRED_RE.test(authEvidence)) {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_auth_required",
|
||||
level: "warn",
|
||||
message: "Codex CLI is installed, but authentication is not ready.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: probeApiKey
|
||||
? "OPENAI_API_KEY was provided but Codex still rejected the request. Verify the key is valid for the OpenAI Responses API (e.g. `curl -H \"Authorization: Bearer $OPENAI_API_KEY\" https://api.openai.com/v1/models`), or run `codex login` and seed `~/.codex/auth.json`."
|
||||
: "Codex CLI does not read OPENAI_API_KEY from the environment; set OPENAI_API_KEY in this adapter's config (so Paperclip writes it to `$CODEX_HOME/auth.json`) or run `codex login` on the host first.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_failed",
|
||||
level: "error",
|
||||
message: "Codex hello probe failed.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: "Run `codex exec --json -` manually in this working directory and prompt `Respond with hello` to debug.",
|
||||
});
|
||||
if (probe.timedOut) {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_timed_out",
|
||||
level: "warn",
|
||||
message: "Codex hello probe timed out.",
|
||||
hint: "Retry the probe. If this persists, verify Codex can run `Respond with hello` from this directory manually.",
|
||||
});
|
||||
} else if ((probe.exitCode ?? 1) === 0) {
|
||||
const summary = parsed.summary.trim();
|
||||
const hasHello = /\bhello\b/i.test(summary);
|
||||
checks.push({
|
||||
code: hasHello ? "codex_hello_probe_passed" : "codex_hello_probe_unexpected_output",
|
||||
level: hasHello ? "info" : "warn",
|
||||
message: hasHello
|
||||
? "Codex hello probe succeeded."
|
||||
: "Codex probe ran but did not return `hello` as expected.",
|
||||
...(summary ? { detail: summary.replace(/\s+/g, " ").trim().slice(0, 240) } : {}),
|
||||
...(hasHello
|
||||
? {}
|
||||
: {
|
||||
hint: "Try the probe manually (`codex exec --json -` then prompt: Respond with hello) to inspect full output.",
|
||||
}),
|
||||
});
|
||||
} else if (CODEX_AUTH_REQUIRED_RE.test(authEvidence)) {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_auth_required",
|
||||
level: "warn",
|
||||
message: "Codex CLI is installed, but authentication is not ready.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: probeApiKey
|
||||
? "OPENAI_API_KEY was provided but Codex still rejected the request. Verify the key is valid for the OpenAI Responses API (e.g. `curl -H \"Authorization: Bearer $OPENAI_API_KEY\" https://api.openai.com/v1/models`), or run `codex login` and seed `~/.codex/auth.json`."
|
||||
: "Codex CLI does not read OPENAI_API_KEY from the environment; set OPENAI_API_KEY in this adapter's config (so Paperclip writes it to `$CODEX_HOME/auth.json`) or run `codex login` on the host first.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "codex_hello_probe_failed",
|
||||
level: "error",
|
||||
message: "Codex hello probe failed.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: "Run `codex exec --json -` manually in this working directory and prompt `Respond with hello` to debug.",
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
await preparedProbe.cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
61
packages/adapters/cursor-cloud/package.json
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-cursor-cloud",
|
||||
"version": "0.3.1",
|
||||
"license": "MIT",
|
||||
"homepage": "https://github.com/paperclipai/paperclip",
|
||||
"bugs": {
|
||||
"url": "https://github.com/paperclipai/paperclip/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/paperclipai/paperclip",
|
||||
"directory": "packages/adapters/cursor-cloud"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
"./server": "./src/server/index.ts",
|
||||
"./ui": "./src/ui/index.ts",
|
||||
"./cli": "./src/cli/index.ts"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
},
|
||||
"./server": {
|
||||
"types": "./dist/server/index.d.ts",
|
||||
"import": "./dist/server/index.js"
|
||||
},
|
||||
"./ui": {
|
||||
"types": "./dist/ui/index.d.ts",
|
||||
"import": "./dist/ui/index.js"
|
||||
},
|
||||
"./cli": {
|
||||
"types": "./dist/cli/index.d.ts",
|
||||
"import": "./dist/cli/index.js"
|
||||
}
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@cursor/sdk": "^1.0.12",
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
42
packages/adapters/cursor-cloud/src/cli/format-event.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import pc from "picocolors";
|
||||
import { parseCursorCloudStdoutLine } from "../ui/parse-stdout.js";
|
||||
|
||||
export function printCursorCloudEvent(raw: string, _debug: boolean): void {
|
||||
const entries = parseCursorCloudStdoutLine(raw, new Date().toISOString());
|
||||
for (const entry of entries) {
|
||||
switch (entry.kind) {
|
||||
case "assistant":
|
||||
console.log(pc.green(`assistant: ${entry.text}`));
|
||||
break;
|
||||
case "thinking":
|
||||
console.log(pc.gray(`thinking: ${entry.text}`));
|
||||
break;
|
||||
case "user":
|
||||
console.log(pc.gray(`user: ${entry.text}`));
|
||||
break;
|
||||
case "tool_call":
|
||||
console.log(pc.yellow(`tool_call: ${entry.name}`));
|
||||
break;
|
||||
case "tool_result":
|
||||
console.log((entry.isError ? pc.red : pc.cyan)(entry.content || "tool result"));
|
||||
break;
|
||||
case "result":
|
||||
console.log((entry.isError ? pc.red : pc.blue)(`result: ${entry.subtype}${entry.text ? ` - ${entry.text}` : ""}`));
|
||||
break;
|
||||
case "stderr":
|
||||
console.error(pc.red(entry.text));
|
||||
break;
|
||||
case "system":
|
||||
console.log(pc.blue(entry.text));
|
||||
break;
|
||||
case "init":
|
||||
console.log(pc.blue(`Cursor Cloud init (${entry.sessionId})`));
|
||||
break;
|
||||
case "stdout":
|
||||
console.log(entry.text);
|
||||
break;
|
||||
default:
|
||||
console.log("text" in entry ? entry.text : JSON.stringify(entry));
|
||||
}
|
||||
}
|
||||
}
|
||||
1
packages/adapters/cursor-cloud/src/cli/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { printCursorCloudEvent } from "./format-event.js";
|
||||
34
packages/adapters/cursor-cloud/src/index.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
export const type = "cursor_cloud";
|
||||
export const label = "Cursor Cloud";
|
||||
|
||||
export const agentConfigurationDoc = `# cursor_cloud agent configuration
|
||||
|
||||
Adapter: cursor_cloud
|
||||
|
||||
Use when:
|
||||
- You want Paperclip to run Cursor Cloud Agents through the official Cursor SDK
|
||||
- You want durable remote Cursor agent sessions across Paperclip heartbeats
|
||||
- You want Paperclip to keep task state while Cursor handles remote code execution
|
||||
|
||||
Core fields:
|
||||
- repoUrl (string, required): Git repository URL Cursor should open
|
||||
- repoStartingRef (string, optional): starting ref for the repo
|
||||
- repoPullRequestUrl (string, optional): PR URL to attach the agent to
|
||||
- runtimeEnvType (string, optional): cloud | pool | machine
|
||||
- runtimeEnvName (string, optional): named cloud/pool/machine target
|
||||
- workOnCurrentBranch (boolean, optional): continue work on current branch
|
||||
- autoCreatePR (boolean, optional): let Cursor auto-create a PR
|
||||
- skipReviewerRequest (boolean, optional): suppress reviewer request on auto-created PRs
|
||||
- instructionsFilePath (string, optional): agent instructions file prepended to the prompt
|
||||
- promptTemplate (string, optional): heartbeat prompt template
|
||||
- bootstrapPromptTemplate (string, optional): first-run-only bootstrap prompt template
|
||||
- model (string, optional): Cursor model id; omit to use the account default
|
||||
- env.CURSOR_API_KEY (string, required): Cursor API key
|
||||
- env.* (optional): additional env vars injected into the cloud agent shell
|
||||
|
||||
Notes:
|
||||
- Paperclip reuses the durable Cursor agent across heartbeats when the repo/runtime identity still matches.
|
||||
- Each Paperclip heartbeat maps to a Cursor run on that durable agent.
|
||||
- Paperclip injects PAPERCLIP_* runtime env vars into the cloud agent shell through Cursor SDK cloud envVars.
|
||||
- Paperclip remains the source of truth for issue/task state; Cursor provides the remote execution surface.
|
||||
`;
|
||||
348
packages/adapters/cursor-cloud/src/server/execute.test.ts
Normal file
@@ -0,0 +1,348 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
type MockRunOptions = {
|
||||
id?: string;
|
||||
agentId?: string;
|
||||
status?: string;
|
||||
waitResult?: Record<string, unknown>;
|
||||
streamMessages?: unknown[];
|
||||
streamError?: Error | null;
|
||||
};
|
||||
|
||||
type MockAgentOptions = {
|
||||
agentId?: string;
|
||||
sendRun?: ReturnType<typeof createMockRun>;
|
||||
};
|
||||
|
||||
const { createMock, resumeMock, getRunMock } = vi.hoisted(() => ({
|
||||
createMock: vi.fn(),
|
||||
resumeMock: vi.fn(),
|
||||
getRunMock: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("@cursor/sdk", () => ({
|
||||
Agent: {
|
||||
create: createMock,
|
||||
resume: resumeMock,
|
||||
getRun: getRunMock,
|
||||
},
|
||||
}));
|
||||
|
||||
function createMockRun(options: MockRunOptions = {}) {
|
||||
const runId = options.id ?? "run-123";
|
||||
const agentId = options.agentId ?? "agent-123";
|
||||
const status = options.status ?? "finished";
|
||||
const waitResult = options.waitResult ?? {
|
||||
id: runId,
|
||||
status,
|
||||
result: "Done\nWith detail",
|
||||
model: { id: "gpt-5.4" },
|
||||
durationMs: 1234,
|
||||
};
|
||||
const streamMessages = options.streamMessages ?? [];
|
||||
const streamError = options.streamError ?? null;
|
||||
|
||||
return {
|
||||
id: runId,
|
||||
agentId,
|
||||
status,
|
||||
result: typeof waitResult.result === "string" ? waitResult.result : null,
|
||||
model: waitResult.model ?? null,
|
||||
durationMs: waitResult.durationMs ?? null,
|
||||
git: waitResult.git ?? null,
|
||||
supports(capability: string) {
|
||||
return capability === "stream" || capability === "wait";
|
||||
},
|
||||
async *stream() {
|
||||
for (const message of streamMessages) yield message;
|
||||
if (streamError) throw streamError;
|
||||
},
|
||||
async wait() {
|
||||
return waitResult;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createMockSdkAgent(options: MockAgentOptions = {}) {
|
||||
const sendRun = options.sendRun ?? createMockRun();
|
||||
return {
|
||||
agentId: options.agentId ?? sendRun.agentId,
|
||||
send: vi.fn(async () => sendRun),
|
||||
[Symbol.asyncDispose]: vi.fn(async () => {}),
|
||||
};
|
||||
}
|
||||
|
||||
function createContext(
|
||||
overrides: Partial<AdapterExecutionContext> = {},
|
||||
): AdapterExecutionContext & {
|
||||
logs: Array<{ stream: "stdout" | "stderr"; chunk: string }>;
|
||||
meta: Record<string, unknown>[];
|
||||
} {
|
||||
const logs: Array<{ stream: "stdout" | "stderr"; chunk: string }> = [];
|
||||
const meta: Record<string, unknown>[] = [];
|
||||
const agent = overrides.agent ?? {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
name: "Cursor Cloud Agent",
|
||||
adapterType: "cursor_cloud",
|
||||
adapterConfig: {},
|
||||
};
|
||||
const runtime = overrides.runtime ?? {
|
||||
sessionId: null,
|
||||
sessionParams: null,
|
||||
sessionDisplayId: null,
|
||||
taskKey: null,
|
||||
};
|
||||
const config = overrides.config ?? {
|
||||
env: {
|
||||
CURSOR_API_KEY: "cursor-secret",
|
||||
EXTRA_FLAG: "1",
|
||||
},
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoStartingRef: "main",
|
||||
runtimeEnvType: "cloud",
|
||||
promptTemplate: "Do the work for {{agent.name}}",
|
||||
model: "gpt-5.4",
|
||||
};
|
||||
const context = overrides.context ?? {
|
||||
taskId: "issue-1",
|
||||
issueId: "issue-1",
|
||||
wakeReason: "issue_commented",
|
||||
};
|
||||
|
||||
const base: AdapterExecutionContext = {
|
||||
runId: "run-heartbeat-1",
|
||||
agent,
|
||||
runtime,
|
||||
config,
|
||||
context,
|
||||
authToken: "paperclip-run-jwt",
|
||||
onLog: async (stream, chunk) => {
|
||||
logs.push({ stream, chunk });
|
||||
},
|
||||
onMeta: async (entry) => {
|
||||
meta.push(entry as unknown as Record<string, unknown>);
|
||||
},
|
||||
};
|
||||
|
||||
return {
|
||||
...base,
|
||||
...overrides,
|
||||
logs,
|
||||
meta,
|
||||
};
|
||||
}
|
||||
|
||||
describe("cursor_cloud execute", () => {
|
||||
beforeEach(() => {
|
||||
createMock.mockReset();
|
||||
resumeMock.mockReset();
|
||||
getRunMock.mockReset();
|
||||
});
|
||||
|
||||
it("creates a fresh Cursor agent and injects Paperclip env without CURSOR_API_KEY", async () => {
|
||||
const run = createMockRun({
|
||||
agentId: "agent-fresh",
|
||||
streamMessages: [
|
||||
{
|
||||
type: "assistant",
|
||||
message: {
|
||||
content: [{ type: "text", text: "Working" }],
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
const sdkAgent = createMockSdkAgent({ agentId: "agent-fresh", sendRun: run });
|
||||
createMock.mockResolvedValue(sdkAgent);
|
||||
const ctx = createContext();
|
||||
|
||||
const result = await execute(ctx);
|
||||
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(resumeMock).not.toHaveBeenCalled();
|
||||
expect(getRunMock).not.toHaveBeenCalled();
|
||||
expect(createMock.mock.calls[0]?.[0]).toMatchObject({
|
||||
apiKey: "cursor-secret",
|
||||
name: "Paperclip Cursor Cloud Agent",
|
||||
model: { id: "gpt-5.4" },
|
||||
cloud: {
|
||||
env: { type: "cloud" },
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
},
|
||||
});
|
||||
expect(createMock.mock.calls[0]?.[0]?.cloud?.envVars).toMatchObject({
|
||||
EXTRA_FLAG: "1",
|
||||
PAPERCLIP_RUN_ID: "run-heartbeat-1",
|
||||
PAPERCLIP_TASK_ID: "issue-1",
|
||||
PAPERCLIP_WAKE_REASON: "issue_commented",
|
||||
PAPERCLIP_API_KEY: "paperclip-run-jwt",
|
||||
});
|
||||
expect(createMock.mock.calls[0]?.[0]?.cloud?.envVars).not.toHaveProperty("CURSOR_API_KEY");
|
||||
|
||||
expect(result).toMatchObject({
|
||||
exitCode: 0,
|
||||
errorMessage: null,
|
||||
sessionId: "agent-fresh",
|
||||
model: "gpt-5.4",
|
||||
summary: "Done",
|
||||
sessionParams: {
|
||||
cursorAgentId: "agent-fresh",
|
||||
latestRunId: "run-123",
|
||||
runtime: "cloud",
|
||||
envType: "cloud",
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
},
|
||||
});
|
||||
expect(ctx.logs.map((entry) => entry.chunk)).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.stringContaining('"type":"cursor_cloud.init"'),
|
||||
expect.stringContaining('"type":"cursor_cloud.message"'),
|
||||
expect.stringContaining('"type":"cursor_cloud.result"'),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it("resumes a matching saved session when no active run can be reattached", async () => {
|
||||
getRunMock.mockResolvedValue(createMockRun({ status: "finished" }));
|
||||
const resumedRun = createMockRun({ id: "run-resumed", agentId: "agent-resumed" });
|
||||
const sdkAgent = createMockSdkAgent({ agentId: "agent-resumed", sendRun: resumedRun });
|
||||
resumeMock.mockResolvedValue(sdkAgent);
|
||||
const ctx = createContext({
|
||||
runtime: {
|
||||
sessionId: null,
|
||||
sessionDisplayId: "agent-previous",
|
||||
taskKey: null,
|
||||
sessionParams: {
|
||||
cursorAgentId: "agent-previous",
|
||||
latestRunId: "run-previous",
|
||||
runtime: "cloud",
|
||||
envType: "cloud",
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const result = await execute(ctx);
|
||||
|
||||
expect(getRunMock).toHaveBeenCalledWith("run-previous", {
|
||||
runtime: "cloud",
|
||||
agentId: "agent-previous",
|
||||
apiKey: "cursor-secret",
|
||||
});
|
||||
expect(resumeMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).not.toHaveBeenCalled();
|
||||
expect(sdkAgent.send).toHaveBeenCalledTimes(1);
|
||||
expect(result.sessionId).toBe("agent-resumed");
|
||||
});
|
||||
|
||||
it("reattaches to an active run, drains it, then sends the heartbeat as a follow-up", async () => {
|
||||
const attachedRun = createMockRun({
|
||||
id: "run-attached",
|
||||
agentId: "agent-attached",
|
||||
status: "running",
|
||||
waitResult: {
|
||||
id: "run-attached",
|
||||
status: "finished",
|
||||
result: "Prior result",
|
||||
model: { id: "gpt-5.4" },
|
||||
},
|
||||
streamMessages: [
|
||||
{
|
||||
type: "status",
|
||||
status: "running",
|
||||
message: "Still working",
|
||||
},
|
||||
],
|
||||
});
|
||||
getRunMock.mockResolvedValue(attachedRun);
|
||||
const followUpRun = createMockRun({
|
||||
id: "run-followup",
|
||||
agentId: "agent-attached",
|
||||
waitResult: {
|
||||
id: "run-followup",
|
||||
status: "finished",
|
||||
result: "Follow-up result",
|
||||
model: { id: "gpt-5.4" },
|
||||
},
|
||||
});
|
||||
const sdkAgent = createMockSdkAgent({ agentId: "agent-attached", sendRun: followUpRun });
|
||||
resumeMock.mockResolvedValue(sdkAgent);
|
||||
const ctx = createContext({
|
||||
runtime: {
|
||||
sessionId: null,
|
||||
sessionDisplayId: "agent-attached",
|
||||
taskKey: null,
|
||||
sessionParams: {
|
||||
cursorAgentId: "agent-attached",
|
||||
latestRunId: "run-attached",
|
||||
runtime: "cloud",
|
||||
envType: "cloud",
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const result = await execute(ctx);
|
||||
|
||||
expect(getRunMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).not.toHaveBeenCalled();
|
||||
expect(resumeMock).toHaveBeenCalledTimes(1);
|
||||
expect(sdkAgent.send).toHaveBeenCalledTimes(1);
|
||||
expect(result).toMatchObject({
|
||||
exitCode: 0,
|
||||
sessionId: "agent-attached",
|
||||
summary: "Follow-up result",
|
||||
resultJson: {
|
||||
cursorRunId: "run-followup",
|
||||
},
|
||||
});
|
||||
const logChunks = ctx.logs.map((entry) => entry.chunk);
|
||||
expect(logChunks).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.stringContaining("Reattached to existing Cursor run run-attached."),
|
||||
expect.stringContaining("Prior Cursor run run-attached finished"),
|
||||
expect.stringContaining("Started Cursor run run-followup."),
|
||||
expect.stringContaining('"runId":"run-attached"'),
|
||||
expect.stringContaining('"runId":"run-followup"'),
|
||||
]),
|
||||
);
|
||||
expect(ctx.meta[0]?.context).toMatchObject({
|
||||
cursorCloud: {
|
||||
canReuseSession: true,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("maps non-finished Cursor results to failing Paperclip runs", async () => {
|
||||
const cancelledRun = createMockRun({
|
||||
id: "run-cancelled",
|
||||
agentId: "agent-cancelled",
|
||||
status: "cancelled",
|
||||
waitResult: {
|
||||
id: "run-cancelled",
|
||||
status: "cancelled",
|
||||
result: "",
|
||||
model: { id: "gpt-5.4" },
|
||||
},
|
||||
});
|
||||
const sdkAgent = createMockSdkAgent({ agentId: "agent-cancelled", sendRun: cancelledRun });
|
||||
createMock.mockResolvedValue(sdkAgent);
|
||||
const ctx = createContext();
|
||||
|
||||
const result = await execute(ctx);
|
||||
|
||||
expect(result).toMatchObject({
|
||||
exitCode: 1,
|
||||
errorMessage: "Cursor run cancelled",
|
||||
sessionId: "agent-cancelled",
|
||||
resultJson: {
|
||||
status: "cancelled",
|
||||
cursorAgentId: "agent-cancelled",
|
||||
cursorRunId: "run-cancelled",
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
607
packages/adapters/cursor-cloud/src/server/execute.ts
Normal file
@@ -0,0 +1,607 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import {
|
||||
Agent,
|
||||
type AgentOptions,
|
||||
type ModelSelection,
|
||||
type Run,
|
||||
type RunResult,
|
||||
type SDKAgent,
|
||||
type SDKMessage,
|
||||
} from "@cursor/sdk";
|
||||
import type { AdapterExecutionContext, AdapterExecutionResult, AdapterInvocationMeta } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
asBoolean,
|
||||
asString,
|
||||
buildPaperclipEnv,
|
||||
joinPromptSections,
|
||||
parseObject,
|
||||
readPaperclipIssueWorkModeFromContext,
|
||||
renderPaperclipWakePrompt,
|
||||
renderTemplate,
|
||||
stringifyPaperclipWakePayload,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
type CursorCloudSession = {
|
||||
cursorAgentId: string;
|
||||
latestRunId?: string;
|
||||
runtime: "cloud";
|
||||
envType?: "cloud" | "pool" | "machine";
|
||||
envName?: string;
|
||||
repos: Array<{ url: string; startingRef?: string; prUrl?: string }>;
|
||||
};
|
||||
|
||||
type CursorCloudEvent =
|
||||
| { type: "cursor_cloud.init"; sessionId: string; agentId: string; runId?: string; model?: string }
|
||||
| { type: "cursor_cloud.status"; status: string; message?: string }
|
||||
| { type: "cursor_cloud.message"; message: SDKMessage }
|
||||
| {
|
||||
type: "cursor_cloud.result";
|
||||
status: string;
|
||||
result?: string;
|
||||
model?: string;
|
||||
durationMs?: number;
|
||||
git?: unknown;
|
||||
error?: string;
|
||||
};
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function asStringEnvMap(value: unknown): Record<string, string> {
|
||||
const parsed = parseObject(value);
|
||||
const env: Record<string, string> = {};
|
||||
for (const [key, entry] of Object.entries(parsed)) {
|
||||
if (typeof entry === "string") {
|
||||
env[key] = entry;
|
||||
} else if (typeof entry === "object" && entry !== null && !Array.isArray(entry)) {
|
||||
const rec = entry as Record<string, unknown>;
|
||||
if (rec.type === "plain" && typeof rec.value === "string") env[key] = rec.value;
|
||||
}
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function normalizeEnvType(raw: string): "cloud" | "pool" | "machine" {
|
||||
const value = raw.trim().toLowerCase();
|
||||
if (value === "pool" || value === "machine") return value;
|
||||
return "cloud";
|
||||
}
|
||||
|
||||
function trimNullable(value: unknown): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function firstNonEmptyLine(text: string): string {
|
||||
return (
|
||||
text
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.find(Boolean) ?? ""
|
||||
);
|
||||
}
|
||||
|
||||
function toModelSelection(rawModel: string): ModelSelection | undefined {
|
||||
const model = rawModel.trim();
|
||||
return model ? { id: model } : undefined;
|
||||
}
|
||||
|
||||
function toSummary(result: RunResult): string | null {
|
||||
const direct = trimNullable(result.result);
|
||||
if (direct) return firstNonEmptyLine(direct);
|
||||
return null;
|
||||
}
|
||||
|
||||
function formatRunError(err: unknown): string {
|
||||
if (err instanceof Error && err.message.trim().length > 0) return err.message.trim();
|
||||
return String(err);
|
||||
}
|
||||
|
||||
function buildWakeEnv(ctx: AdapterExecutionContext, configEnv: Record<string, string>): Record<string, string> {
|
||||
const { runId, agent, context, authToken } = ctx;
|
||||
const env: Record<string, string> = {
|
||||
...configEnv,
|
||||
...buildPaperclipEnv(agent),
|
||||
PAPERCLIP_RUN_ID: runId,
|
||||
};
|
||||
|
||||
const wakeTaskId = trimNullable(context.taskId) ?? trimNullable(context.issueId);
|
||||
const wakeReason = trimNullable(context.wakeReason);
|
||||
const wakeCommentId = trimNullable(context.wakeCommentId) ?? trimNullable(context.commentId);
|
||||
const approvalId = trimNullable(context.approvalId);
|
||||
const approvalStatus = trimNullable(context.approvalStatus);
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
const issueWorkMode = readPaperclipIssueWorkModeFromContext(context);
|
||||
|
||||
if (wakeTaskId) env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
if (wakeReason) env.PAPERCLIP_WAKE_REASON = wakeReason;
|
||||
if (wakeCommentId) env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
|
||||
if (approvalId) env.PAPERCLIP_APPROVAL_ID = approvalId;
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (issueWorkMode) env.PAPERCLIP_ISSUE_WORK_MODE = issueWorkMode;
|
||||
if (!trimNullable(env.PAPERCLIP_API_KEY) && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
|
||||
const workspace = parseObject(context.paperclipWorkspace);
|
||||
const workspaceMappings: Array<[string, unknown]> = [
|
||||
["PAPERCLIP_WORKSPACE_CWD", workspace.cwd],
|
||||
["PAPERCLIP_WORKSPACE_SOURCE", workspace.source],
|
||||
["PAPERCLIP_WORKSPACE_ID", workspace.workspaceId],
|
||||
["PAPERCLIP_WORKSPACE_REPO_URL", workspace.repoUrl],
|
||||
["PAPERCLIP_WORKSPACE_REPO_REF", workspace.repoRef],
|
||||
["PAPERCLIP_WORKSPACE_BRANCH", workspace.branch],
|
||||
["PAPERCLIP_WORKSPACE_WORKTREE_PATH", workspace.worktreePath],
|
||||
["AGENT_HOME", workspace.agentHome],
|
||||
];
|
||||
for (const [key, value] of workspaceMappings) {
|
||||
const normalized = trimNullable(value);
|
||||
if (normalized) env[key] = normalized;
|
||||
}
|
||||
|
||||
delete env.CURSOR_API_KEY;
|
||||
return env;
|
||||
}
|
||||
|
||||
async function buildInstructionsPrefix(
|
||||
config: Record<string, unknown>,
|
||||
onLog: AdapterExecutionContext["onLog"],
|
||||
): Promise<{ prefix: string; notes: string[]; chars: number }> {
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
if (!instructionsFilePath) {
|
||||
return { prefix: "", notes: [], chars: 0 };
|
||||
}
|
||||
|
||||
try {
|
||||
const contents = await fs.readFile(instructionsFilePath, "utf8");
|
||||
const instructionsDir = `${path.dirname(instructionsFilePath)}/`;
|
||||
const prefix = `${contents.trim()}\n\nThe above agent instructions were loaded from ${instructionsFilePath}. Resolve any relative file references from ${instructionsDir}.\n`;
|
||||
return {
|
||||
prefix,
|
||||
chars: prefix.length,
|
||||
notes: [
|
||||
`Loaded agent instructions from ${instructionsFilePath}`,
|
||||
`Prepended instructions + path directive to prompt (relative references from ${instructionsDir}).`,
|
||||
],
|
||||
};
|
||||
} catch (err) {
|
||||
const reason = err instanceof Error ? err.message : String(err);
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Warning: could not read agent instructions file "${instructionsFilePath}": ${reason}\n`,
|
||||
);
|
||||
return {
|
||||
prefix: "",
|
||||
chars: 0,
|
||||
notes: [
|
||||
`Configured instructionsFilePath ${instructionsFilePath}, but file could not be read; continuing without injected instructions.`,
|
||||
],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function renderPaperclipEnvNote(env: Record<string, string>): string {
|
||||
const keys = Object.keys(env)
|
||||
.filter((key) => key.startsWith("PAPERCLIP_"))
|
||||
.sort();
|
||||
if (keys.length === 0) return "";
|
||||
return [
|
||||
"Paperclip runtime note:",
|
||||
`The following PAPERCLIP_* environment variables are available in the cloud agent shell: ${keys.join(", ")}`,
|
||||
"Use them directly instead of assuming they are absent.",
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
function readSession(params: Record<string, unknown> | null): CursorCloudSession | null {
|
||||
if (!params) return null;
|
||||
const record = asRecord(params);
|
||||
if (!record) return null;
|
||||
const cursorAgentId =
|
||||
trimNullable(record.cursorAgentId) ??
|
||||
trimNullable(record.agentId) ??
|
||||
trimNullable(record.sessionId);
|
||||
if (!cursorAgentId) return null;
|
||||
const latestRunId = trimNullable(record.latestRunId) ?? trimNullable(record.runId) ?? undefined;
|
||||
const envType = trimNullable(record.envType);
|
||||
const envName = trimNullable(record.envName);
|
||||
const reposValue = Array.isArray(record.repos) ? record.repos : [];
|
||||
const repos = reposValue
|
||||
.map((entry) => asRecord(entry))
|
||||
.filter((entry): entry is Record<string, unknown> => Boolean(entry))
|
||||
.map((entry) => ({
|
||||
url: asString(entry.url, "").trim(),
|
||||
startingRef: trimNullable(entry.startingRef) ?? undefined,
|
||||
prUrl: trimNullable(entry.prUrl) ?? undefined,
|
||||
}))
|
||||
.filter((entry) => entry.url.length > 0);
|
||||
return {
|
||||
cursorAgentId,
|
||||
...(latestRunId ? { latestRunId } : {}),
|
||||
runtime: "cloud",
|
||||
...(envType ? { envType: normalizeEnvType(envType) } : {}),
|
||||
...(envName ? { envName } : {}),
|
||||
repos,
|
||||
};
|
||||
}
|
||||
|
||||
function sessionMatches(
|
||||
session: CursorCloudSession | null,
|
||||
envType: "cloud" | "pool" | "machine",
|
||||
envName: string | null,
|
||||
repos: Array<{ url: string; startingRef?: string; prUrl?: string }>,
|
||||
): boolean {
|
||||
if (!session) return false;
|
||||
if ((session.envType ?? "cloud") !== envType) return false;
|
||||
if ((session.envName ?? null) !== envName) return false;
|
||||
if (session.repos.length !== repos.length) return false;
|
||||
return session.repos.every((repo, index) => {
|
||||
const next = repos[index];
|
||||
return repo.url === next.url
|
||||
&& (repo.startingRef ?? null) === (next.startingRef ?? null)
|
||||
&& (repo.prUrl ?? null) === (next.prUrl ?? null);
|
||||
});
|
||||
}
|
||||
|
||||
function buildAgentOptions(input: {
|
||||
apiKey: string;
|
||||
name: string;
|
||||
model?: ModelSelection;
|
||||
envType: "cloud" | "pool" | "machine";
|
||||
envName: string | null;
|
||||
repos: Array<{ url: string; startingRef?: string; prUrl?: string }>;
|
||||
workOnCurrentBranch: boolean;
|
||||
autoCreatePR: boolean;
|
||||
skipReviewerRequest: boolean;
|
||||
envVars: Record<string, string>;
|
||||
}): AgentOptions {
|
||||
return {
|
||||
apiKey: input.apiKey,
|
||||
name: input.name,
|
||||
...(input.model ? { model: input.model } : {}),
|
||||
cloud: {
|
||||
env: {
|
||||
type: input.envType,
|
||||
...(input.envName ? { name: input.envName } : {}),
|
||||
},
|
||||
repos: input.repos,
|
||||
workOnCurrentBranch: input.workOnCurrentBranch,
|
||||
autoCreatePR: input.autoCreatePR,
|
||||
skipReviewerRequest: input.skipReviewerRequest,
|
||||
envVars: input.envVars,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function eventLine(event: CursorCloudEvent): string {
|
||||
return `${JSON.stringify(event)}\n`;
|
||||
}
|
||||
|
||||
async function emitMessage(onLog: AdapterExecutionContext["onLog"], message: SDKMessage) {
|
||||
await onLog("stdout", eventLine({ type: "cursor_cloud.message", message }));
|
||||
}
|
||||
|
||||
async function emitStatus(onLog: AdapterExecutionContext["onLog"], status: string, message?: string) {
|
||||
await onLog("stdout", eventLine({ type: "cursor_cloud.status", status, ...(message ? { message } : {}) }));
|
||||
}
|
||||
|
||||
async function streamRun(run: Run, onLog: AdapterExecutionContext["onLog"]) {
|
||||
if (!run.supports("stream")) return;
|
||||
for await (const message of run.stream()) {
|
||||
await emitMessage(onLog, message);
|
||||
}
|
||||
}
|
||||
|
||||
async function getAttachedRun(input: {
|
||||
apiKey: string;
|
||||
session: CursorCloudSession | null;
|
||||
}): Promise<Run | null> {
|
||||
const latestRunId = input.session?.latestRunId;
|
||||
const cursorAgentId = input.session?.cursorAgentId;
|
||||
if (!latestRunId || !cursorAgentId) return null;
|
||||
try {
|
||||
const run = await Agent.getRun(latestRunId, {
|
||||
runtime: "cloud",
|
||||
agentId: cursorAgentId,
|
||||
apiKey: input.apiKey,
|
||||
});
|
||||
return run.status === "running" ? run : null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult> {
|
||||
const { runId, agent, runtime, config, context, onLog, onMeta } = ctx;
|
||||
const envConfig = asStringEnvMap(config.env);
|
||||
const apiKey = asString(envConfig.CURSOR_API_KEY, "").trim();
|
||||
if (!apiKey) {
|
||||
return {
|
||||
exitCode: 1,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
errorMessage: "CURSOR_API_KEY is required for cursor_cloud.",
|
||||
provider: "cursor",
|
||||
biller: "cursor",
|
||||
billingType: "api",
|
||||
clearSession: false,
|
||||
};
|
||||
}
|
||||
|
||||
const workspace = parseObject(context.paperclipWorkspace);
|
||||
const repoUrl =
|
||||
asString(config.repoUrl, "").trim() ||
|
||||
asString(workspace.repoUrl, "").trim();
|
||||
if (!repoUrl) {
|
||||
return {
|
||||
exitCode: 1,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
errorMessage: "cursor_cloud requires repoUrl in adapterConfig or workspace context.",
|
||||
provider: "cursor",
|
||||
biller: "cursor",
|
||||
billingType: "api",
|
||||
clearSession: false,
|
||||
};
|
||||
}
|
||||
|
||||
const repoStartingRef =
|
||||
trimNullable(config.repoStartingRef) ??
|
||||
trimNullable(workspace.repoRef) ??
|
||||
undefined;
|
||||
const repoPullRequestUrl = trimNullable(config.repoPullRequestUrl) ?? undefined;
|
||||
const envType = normalizeEnvType(asString(config.runtimeEnvType, "cloud"));
|
||||
const envName = trimNullable(config.runtimeEnvName);
|
||||
const workOnCurrentBranch = asBoolean(config.workOnCurrentBranch, false);
|
||||
const autoCreatePR = asBoolean(config.autoCreatePR, false);
|
||||
const skipReviewerRequest = asBoolean(config.skipReviewerRequest, false);
|
||||
const model = toModelSelection(asString(config.model, ""));
|
||||
const repos = [{
|
||||
url: repoUrl,
|
||||
...(repoStartingRef ? { startingRef: repoStartingRef } : {}),
|
||||
...(repoPullRequestUrl ? { prUrl: repoPullRequestUrl } : {}),
|
||||
}];
|
||||
const remoteEnv = buildWakeEnv(ctx, envConfig);
|
||||
const session = readSession(runtime.sessionParams) ?? (runtime.sessionId
|
||||
? {
|
||||
cursorAgentId: runtime.sessionId,
|
||||
runtime: "cloud" as const,
|
||||
repos,
|
||||
}
|
||||
: null);
|
||||
const canReuseSession = sessionMatches(session, envType, envName, repos);
|
||||
const promptTemplate = asString(config.promptTemplate, DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE);
|
||||
const bootstrapPromptTemplate = asString(config.bootstrapPromptTemplate, "");
|
||||
const templateData = {
|
||||
agentId: agent.id,
|
||||
companyId: agent.companyId,
|
||||
runId,
|
||||
company: { id: agent.companyId },
|
||||
agent,
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const instructions = await buildInstructionsPrefix(config, onLog);
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: canReuseSession });
|
||||
const renderedBootstrapPrompt =
|
||||
!canReuseSession && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const renderedPrompt =
|
||||
canReuseSession && wakePrompt.length > 0
|
||||
? ""
|
||||
: renderTemplate(promptTemplate, templateData).trim();
|
||||
const paperclipEnvNote = renderPaperclipEnvNote(remoteEnv);
|
||||
const prompt = joinPromptSections([
|
||||
instructions.prefix,
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
paperclipEnvNote,
|
||||
renderedPrompt,
|
||||
]);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const finalPrompt = joinPromptSections([prompt, sessionHandoffNote]);
|
||||
|
||||
const agentOptions = buildAgentOptions({
|
||||
apiKey,
|
||||
name: `Paperclip ${agent.name}`,
|
||||
model,
|
||||
envType,
|
||||
envName,
|
||||
repos,
|
||||
workOnCurrentBranch,
|
||||
autoCreatePR,
|
||||
skipReviewerRequest,
|
||||
envVars: remoteEnv,
|
||||
});
|
||||
|
||||
const commandNotes = [
|
||||
...instructions.notes,
|
||||
canReuseSession
|
||||
? `Reusing Cursor cloud agent session ${session?.cursorAgentId ?? "unknown"}`
|
||||
: "Creating a new Cursor cloud agent session",
|
||||
`Repository: ${repoUrl}${repoStartingRef ? ` @ ${repoStartingRef}` : ""}`,
|
||||
`Runtime target: ${envType}${envName ? ` (${envName})` : ""}`,
|
||||
];
|
||||
|
||||
if (onMeta) {
|
||||
const meta: AdapterInvocationMeta = {
|
||||
adapterType: "cursor_cloud",
|
||||
command: "@cursor/sdk",
|
||||
commandNotes,
|
||||
prompt: finalPrompt,
|
||||
promptMetrics: {
|
||||
promptChars: finalPrompt.length,
|
||||
instructionsChars: instructions.chars,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
},
|
||||
context: {
|
||||
cursorCloud: {
|
||||
envType,
|
||||
envName,
|
||||
repoUrl,
|
||||
repoStartingRef,
|
||||
repoPullRequestUrl,
|
||||
canReuseSession,
|
||||
},
|
||||
},
|
||||
};
|
||||
await onMeta(meta);
|
||||
}
|
||||
|
||||
let sdkAgent: SDKAgent | null = null;
|
||||
let run: Run | null = null;
|
||||
let streamError: string | null = null;
|
||||
try {
|
||||
const attachedRun = canReuseSession
|
||||
? await getAttachedRun({ apiKey, session })
|
||||
: null;
|
||||
|
||||
if (attachedRun) {
|
||||
await emitStatus(onLog, "running", `Reattached to existing Cursor run ${attachedRun.id}.`);
|
||||
await onLog("stdout", eventLine({
|
||||
type: "cursor_cloud.init",
|
||||
sessionId: attachedRun.agentId,
|
||||
agentId: attachedRun.agentId,
|
||||
runId: attachedRun.id,
|
||||
...(model?.id ? { model: model.id } : {}),
|
||||
}));
|
||||
const priorStreamPromise = streamRun(attachedRun, onLog).catch((err) => {
|
||||
streamError = formatRunError(err);
|
||||
});
|
||||
if (attachedRun.supports("wait")) await attachedRun.wait();
|
||||
await priorStreamPromise;
|
||||
streamError = null;
|
||||
await emitStatus(
|
||||
onLog,
|
||||
"running",
|
||||
`Prior Cursor run ${attachedRun.id} finished; sending heartbeat follow-up so this wake's context is not dropped.`,
|
||||
);
|
||||
}
|
||||
|
||||
sdkAgent = canReuseSession && session
|
||||
? await Agent.resume(session.cursorAgentId, agentOptions)
|
||||
: await Agent.create(agentOptions);
|
||||
run = await sdkAgent.send(finalPrompt, {
|
||||
...(model ? { model } : {}),
|
||||
});
|
||||
await onLog("stdout", eventLine({
|
||||
type: "cursor_cloud.init",
|
||||
sessionId: sdkAgent.agentId,
|
||||
agentId: sdkAgent.agentId,
|
||||
runId: run.id,
|
||||
...(model?.id ? { model: model.id } : {}),
|
||||
}));
|
||||
await emitStatus(onLog, "running", `Started Cursor run ${run.id}.`);
|
||||
|
||||
const streamPromise = streamRun(run, onLog).catch((err) => {
|
||||
streamError = formatRunError(err);
|
||||
});
|
||||
const result = run.supports("wait")
|
||||
? await run.wait()
|
||||
: {
|
||||
id: run.id,
|
||||
status: run.status === "running" ? "error" : run.status,
|
||||
result: run.result,
|
||||
model: run.model,
|
||||
durationMs: run.durationMs,
|
||||
git: run.git,
|
||||
};
|
||||
await streamPromise;
|
||||
|
||||
const modelId = result.model?.id ?? model?.id ?? null;
|
||||
await onLog("stdout", eventLine({
|
||||
type: "cursor_cloud.result",
|
||||
status: result.status,
|
||||
...(result.result ? { result: result.result } : {}),
|
||||
...(modelId ? { model: modelId } : {}),
|
||||
...(typeof result.durationMs === "number" ? { durationMs: result.durationMs } : {}),
|
||||
...(result.git ? { git: result.git } : {}),
|
||||
...(streamError ? { error: streamError } : {}),
|
||||
}));
|
||||
|
||||
const nextSession: CursorCloudSession = {
|
||||
cursorAgentId: run.agentId,
|
||||
latestRunId: result.id,
|
||||
runtime: "cloud",
|
||||
envType,
|
||||
...(envName ? { envName } : {}),
|
||||
repos,
|
||||
};
|
||||
const isError = result.status !== "finished";
|
||||
return {
|
||||
exitCode: isError ? 1 : 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
errorMessage: isError ? (trimNullable(result.result) ?? streamError ?? `Cursor run ${result.status}`) : null,
|
||||
sessionId: run.agentId,
|
||||
sessionDisplayId: run.agentId,
|
||||
sessionParams: nextSession,
|
||||
provider: "cursor",
|
||||
biller: "cursor",
|
||||
billingType: "api",
|
||||
model: modelId,
|
||||
costUsd: null,
|
||||
summary: toSummary(result),
|
||||
resultJson: {
|
||||
status: result.status,
|
||||
cursorAgentId: run.agentId,
|
||||
cursorRunId: result.id,
|
||||
envType,
|
||||
envName,
|
||||
repos,
|
||||
...(result.result ? { result: result.result } : {}),
|
||||
...(result.git ? { git: result.git } : {}),
|
||||
...(typeof result.durationMs === "number" ? { durationMs: result.durationMs } : {}),
|
||||
...(streamError ? { streamError } : {}),
|
||||
},
|
||||
clearSession: false,
|
||||
};
|
||||
} catch (err) {
|
||||
const reason = formatRunError(err);
|
||||
if (run) {
|
||||
await onLog("stdout", eventLine({
|
||||
type: "cursor_cloud.result",
|
||||
status: "error",
|
||||
error: reason,
|
||||
}));
|
||||
}
|
||||
return {
|
||||
exitCode: 1,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
errorMessage: reason,
|
||||
sessionId: session?.cursorAgentId ?? null,
|
||||
sessionDisplayId: session?.cursorAgentId ?? null,
|
||||
sessionParams: session,
|
||||
provider: "cursor",
|
||||
biller: "cursor",
|
||||
billingType: "api",
|
||||
costUsd: null,
|
||||
clearSession: false,
|
||||
resultJson: {
|
||||
status: "error",
|
||||
...(run ? { cursorRunId: run.id } : {}),
|
||||
...(session?.cursorAgentId ? { cursorAgentId: session.cursorAgentId } : {}),
|
||||
error: reason,
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
if (sdkAgent) {
|
||||
try {
|
||||
await sdkAgent[Symbol.asyncDispose]();
|
||||
} catch {
|
||||
// Best effort only.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
70
packages/adapters/cursor-cloud/src/server/index.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
export { execute } from "./execute.js";
|
||||
export { testEnvironment } from "./test.js";
|
||||
export { sessionCodec } from "./session.js";
|
||||
|
||||
import type { AdapterConfigSchema } from "@paperclipai/adapter-utils";
|
||||
|
||||
export function getConfigSchema(): AdapterConfigSchema {
|
||||
return {
|
||||
fields: [
|
||||
{
|
||||
key: "repoUrl",
|
||||
label: "Repository URL",
|
||||
type: "text",
|
||||
required: true,
|
||||
hint: "Git repository URL Cursor should open for this agent.",
|
||||
},
|
||||
{
|
||||
key: "repoStartingRef",
|
||||
label: "Starting ref",
|
||||
type: "text",
|
||||
hint: "Optional branch, tag, or SHA Cursor should start from.",
|
||||
},
|
||||
{
|
||||
key: "repoPullRequestUrl",
|
||||
label: "Pull request URL",
|
||||
type: "text",
|
||||
hint: "Optional PR URL when attaching the agent to an existing review branch.",
|
||||
},
|
||||
{
|
||||
key: "runtimeEnvType",
|
||||
label: "Cursor runtime",
|
||||
type: "select",
|
||||
default: "cloud",
|
||||
options: [
|
||||
{ value: "cloud", label: "Cursor hosted" },
|
||||
{ value: "pool", label: "Self-hosted pool" },
|
||||
{ value: "machine", label: "Named machine" },
|
||||
],
|
||||
hint: "Choose where Cursor should execute the remote agent.",
|
||||
},
|
||||
{
|
||||
key: "runtimeEnvName",
|
||||
label: "Runtime name",
|
||||
type: "text",
|
||||
hint: "Optional pool or machine name when targeting a non-default runtime.",
|
||||
},
|
||||
{
|
||||
key: "workOnCurrentBranch",
|
||||
label: "Work on current branch",
|
||||
type: "toggle",
|
||||
default: false,
|
||||
hint: "Tell Cursor to continue on the current branch instead of making a new one.",
|
||||
},
|
||||
{
|
||||
key: "autoCreatePR",
|
||||
label: "Auto-create PR",
|
||||
type: "toggle",
|
||||
default: false,
|
||||
hint: "Allow Cursor to automatically create a pull request for the work.",
|
||||
},
|
||||
{
|
||||
key: "skipReviewerRequest",
|
||||
label: "Skip reviewer request",
|
||||
type: "toggle",
|
||||
default: false,
|
||||
hint: "Suppress reviewer requests on auto-created pull requests.",
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
31
packages/adapters/cursor-cloud/src/server/session.test.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sessionCodec } from "./session.js";
|
||||
|
||||
describe("cursorCloud sessionCodec", () => {
|
||||
it("normalizes legacy and current session identifiers", () => {
|
||||
expect(
|
||||
sessionCodec.deserialize({
|
||||
agentId: "agent-123",
|
||||
runId: "run-456",
|
||||
envType: "pool",
|
||||
envName: "trusted",
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
}),
|
||||
).toEqual({
|
||||
cursorAgentId: "agent-123",
|
||||
latestRunId: "run-456",
|
||||
runtime: "cloud",
|
||||
envType: "pool",
|
||||
envName: "trusted",
|
||||
repos: [{ url: "https://github.com/paperclipai/paperclip.git", startingRef: "main" }],
|
||||
});
|
||||
});
|
||||
|
||||
it("drops invalid session payloads and exposes the display id", () => {
|
||||
expect(sessionCodec.deserialize({ latestRunId: "run-1" })).toBeNull();
|
||||
expect(sessionCodec.getDisplayId?.({
|
||||
cursorAgentId: "agent-789",
|
||||
latestRunId: "run-101",
|
||||
})).toBe("agent-789");
|
||||
});
|
||||
});
|
||||
61
packages/adapters/cursor-cloud/src/server/session.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import type { AdapterSessionCodec } from "@paperclipai/adapter-utils";
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function readString(value: unknown): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function readRepos(value: unknown): Array<{ url: string; startingRef?: string; prUrl?: string }> {
|
||||
if (!Array.isArray(value)) return [];
|
||||
const repos: Array<{ url: string; startingRef?: string; prUrl?: string }> = [];
|
||||
for (const entry of value) {
|
||||
const repo = asRecord(entry);
|
||||
if (!repo) continue;
|
||||
const url = readString(repo.url);
|
||||
if (!url) continue;
|
||||
const startingRef = readString(repo.startingRef);
|
||||
const prUrl = readString(repo.prUrl);
|
||||
repos.push({
|
||||
url,
|
||||
...(startingRef ? { startingRef } : {}),
|
||||
...(prUrl ? { prUrl } : {}),
|
||||
});
|
||||
}
|
||||
return repos;
|
||||
}
|
||||
|
||||
function normalize(raw: unknown): Record<string, unknown> | null {
|
||||
const record = asRecord(raw);
|
||||
if (!record) return null;
|
||||
const cursorAgentId =
|
||||
readString(record.cursorAgentId) ??
|
||||
readString(record.agentId) ??
|
||||
readString(record.sessionId);
|
||||
if (!cursorAgentId) return null;
|
||||
const latestRunId = readString(record.latestRunId) ?? readString(record.runId);
|
||||
const runtime = readString(record.runtime) ?? "cloud";
|
||||
const envType = readString(record.envType);
|
||||
const envName = readString(record.envName);
|
||||
const repos = readRepos(record.repos);
|
||||
return {
|
||||
cursorAgentId,
|
||||
...(latestRunId ? { latestRunId } : {}),
|
||||
runtime,
|
||||
...(envType ? { envType } : {}),
|
||||
...(envName ? { envName } : {}),
|
||||
...(repos.length > 0 ? { repos } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
export const sessionCodec: AdapterSessionCodec = {
|
||||
deserialize: normalize,
|
||||
serialize: normalize,
|
||||
getDisplayId(params) {
|
||||
const normalized = normalize(params);
|
||||
return normalized ? String(normalized.cursorAgentId) : null;
|
||||
},
|
||||
};
|
||||
118
packages/adapters/cursor-cloud/src/server/test.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import { Cursor } from "@cursor/sdk";
|
||||
import type {
|
||||
AdapterEnvironmentCheck,
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterEnvironmentTestResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import { asString, parseObject } from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
if (checks.some((check) => check.level === "error")) return "fail";
|
||||
if (checks.some((check) => check.level === "warn")) return "warn";
|
||||
return "pass";
|
||||
}
|
||||
|
||||
function asStringEnvMap(value: unknown): Record<string, string> {
|
||||
const parsed = parseObject(value);
|
||||
const env: Record<string, string> = {};
|
||||
for (const [key, entry] of Object.entries(parsed)) {
|
||||
if (typeof entry === "string") {
|
||||
env[key] = entry;
|
||||
} else if (typeof entry === "object" && entry !== null && !Array.isArray(entry)) {
|
||||
const rec = entry as Record<string, unknown>;
|
||||
if (rec.type === "plain" && typeof rec.value === "string") env[key] = rec.value;
|
||||
}
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function looksLikeRepoUrl(value: string): boolean {
|
||||
return /^(https?:\/\/|git@)/i.test(value.trim());
|
||||
}
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const env = asStringEnvMap(config.env);
|
||||
const apiKey = asString(env.CURSOR_API_KEY, "").trim();
|
||||
const repoUrl = asString(config.repoUrl, "").trim();
|
||||
const model = asString(config.model, "").trim();
|
||||
|
||||
if (!apiKey) {
|
||||
checks.push({
|
||||
code: "cursor_cloud_api_key_missing",
|
||||
level: "error",
|
||||
message: "CURSOR_API_KEY is required.",
|
||||
hint: "Add CURSOR_API_KEY under environment variables for this adapter.",
|
||||
});
|
||||
}
|
||||
|
||||
if (!repoUrl) {
|
||||
checks.push({
|
||||
code: "cursor_cloud_repo_missing",
|
||||
level: "error",
|
||||
message: "repoUrl is required.",
|
||||
hint: "Set the repository URL Cursor should open for this agent.",
|
||||
});
|
||||
} else if (!looksLikeRepoUrl(repoUrl)) {
|
||||
checks.push({
|
||||
code: "cursor_cloud_repo_invalid",
|
||||
level: "error",
|
||||
message: "repoUrl must be an http(s) or git SSH repository URL.",
|
||||
detail: repoUrl,
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "cursor_cloud_repo_present",
|
||||
level: "info",
|
||||
message: `Repository configured: ${repoUrl}`,
|
||||
});
|
||||
}
|
||||
|
||||
if (apiKey) {
|
||||
try {
|
||||
const me = await Cursor.me({ apiKey });
|
||||
checks.push({
|
||||
code: "cursor_cloud_auth_ok",
|
||||
level: "info",
|
||||
message: "Cursor API key is valid.",
|
||||
detail: me.userEmail ? `Authenticated as ${me.userEmail}.` : `API key: ${me.apiKeyName}`,
|
||||
});
|
||||
} catch (err) {
|
||||
checks.push({
|
||||
code: "cursor_cloud_auth_failed",
|
||||
level: "error",
|
||||
message: err instanceof Error ? err.message : "Failed to validate Cursor API key.",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (apiKey && model) {
|
||||
try {
|
||||
const models = await Cursor.models.list({ apiKey });
|
||||
const match = models.find((entry) => entry.id === model);
|
||||
checks.push({
|
||||
code: match ? "cursor_cloud_model_ok" : "cursor_cloud_model_unknown",
|
||||
level: match ? "info" : "warn",
|
||||
message: match
|
||||
? `Model "${model}" is available to the authenticated Cursor account.`
|
||||
: `Model "${model}" was not found in the authenticated Cursor model list.`,
|
||||
});
|
||||
} catch (err) {
|
||||
checks.push({
|
||||
code: "cursor_cloud_model_probe_failed",
|
||||
level: "warn",
|
||||
message: err instanceof Error ? err.message : "Failed to validate model availability.",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
85
packages/adapters/cursor-cloud/src/ui/build-config.test.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
|
||||
import { buildCursorCloudConfig } from "./build-config.js";
|
||||
|
||||
function makeValues(overrides: Partial<CreateConfigValues> = {}): CreateConfigValues {
|
||||
return {
|
||||
adapterType: "cursor_cloud",
|
||||
cwd: "",
|
||||
instructionsFilePath: "",
|
||||
promptTemplate: "",
|
||||
model: "",
|
||||
thinkingEffort: "",
|
||||
chrome: false,
|
||||
dangerouslySkipPermissions: false,
|
||||
search: false,
|
||||
fastMode: false,
|
||||
dangerouslyBypassSandbox: false,
|
||||
command: "",
|
||||
args: "",
|
||||
extraArgs: "",
|
||||
envVars: "",
|
||||
envBindings: {},
|
||||
url: "",
|
||||
bootstrapPrompt: "",
|
||||
payloadTemplateJson: "",
|
||||
workspaceStrategyType: "project_primary",
|
||||
workspaceBaseRef: "",
|
||||
workspaceBranchTemplate: "",
|
||||
worktreeParentDir: "",
|
||||
runtimeServicesJson: "",
|
||||
maxTurnsPerRun: 1000,
|
||||
heartbeatEnabled: false,
|
||||
intervalSec: 300,
|
||||
adapterSchemaValues: {},
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
describe("buildCursorCloudConfig", () => {
|
||||
it("persists schema values and top-level prompt fields", () => {
|
||||
const config = buildCursorCloudConfig(
|
||||
makeValues({
|
||||
instructionsFilePath: ".cursor/AGENTS.md",
|
||||
promptTemplate: "hello {{agent.name}}",
|
||||
bootstrapPrompt: "bootstrap",
|
||||
model: "gpt-5.4",
|
||||
adapterSchemaValues: {
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
runtimeEnvType: "pool",
|
||||
runtimeEnvName: "trusted-workers",
|
||||
autoCreatePR: true,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(config).toMatchObject({
|
||||
instructionsFilePath: ".cursor/AGENTS.md",
|
||||
promptTemplate: "hello {{agent.name}}",
|
||||
bootstrapPromptTemplate: "bootstrap",
|
||||
model: "gpt-5.4",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
runtimeEnvType: "pool",
|
||||
runtimeEnvName: "trusted-workers",
|
||||
autoCreatePR: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("merges structured env bindings over legacy envVars text", () => {
|
||||
const config = buildCursorCloudConfig(
|
||||
makeValues({
|
||||
envVars: ["CURSOR_API_KEY=legacy-key", "PLAIN=value", "INVALID KEY=nope"].join("\n"),
|
||||
envBindings: {
|
||||
CURSOR_API_KEY: { type: "secret_ref", secretId: "secret-1", version: "latest" },
|
||||
STRUCTURED_ONLY: "from-binding",
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(config.env).toEqual({
|
||||
CURSOR_API_KEY: { type: "secret_ref", secretId: "secret-1", version: "latest" },
|
||||
PLAIN: { type: "plain", value: "value" },
|
||||
STRUCTURED_ONLY: { type: "plain", value: "from-binding" },
|
||||
});
|
||||
});
|
||||
});
|
||||
67
packages/adapters/cursor-cloud/src/ui/build-config.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
|
||||
|
||||
function parseEnvVars(text: string): Record<string, string> {
|
||||
const env: Record<string, string> = {};
|
||||
for (const line of text.split(/\r?\n/)) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith("#")) continue;
|
||||
const eq = trimmed.indexOf("=");
|
||||
if (eq <= 0) continue;
|
||||
const key = trimmed.slice(0, eq).trim();
|
||||
const value = trimmed.slice(eq + 1);
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
env[key] = value;
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function parseEnvBindings(bindings: unknown): Record<string, unknown> {
|
||||
if (typeof bindings !== "object" || bindings === null || Array.isArray(bindings)) return {};
|
||||
const env: Record<string, unknown> = {};
|
||||
for (const [key, raw] of Object.entries(bindings)) {
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
if (typeof raw === "string") {
|
||||
env[key] = { type: "plain", value: raw };
|
||||
continue;
|
||||
}
|
||||
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) continue;
|
||||
const rec = raw as Record<string, unknown>;
|
||||
if (rec.type === "plain" && typeof rec.value === "string") {
|
||||
env[key] = { type: "plain", value: rec.value };
|
||||
continue;
|
||||
}
|
||||
if (rec.type === "secret_ref" && typeof rec.secretId === "string") {
|
||||
env[key] = {
|
||||
type: "secret_ref",
|
||||
secretId: rec.secretId,
|
||||
...(typeof rec.version === "number" || rec.version === "latest"
|
||||
? { version: rec.version }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
export function buildCursorCloudConfig(values: CreateConfigValues): Record<string, unknown> {
|
||||
const config: Record<string, unknown> = {
|
||||
...(values.adapterSchemaValues ?? {}),
|
||||
};
|
||||
if (values.instructionsFilePath) config.instructionsFilePath = values.instructionsFilePath;
|
||||
if (values.promptTemplate) config.promptTemplate = values.promptTemplate;
|
||||
if (values.bootstrapPrompt) config.bootstrapPromptTemplate = values.bootstrapPrompt;
|
||||
if (values.model?.trim()) config.model = values.model.trim();
|
||||
|
||||
const env = parseEnvBindings(values.envBindings);
|
||||
const legacy = parseEnvVars(values.envVars);
|
||||
for (const [key, value] of Object.entries(legacy)) {
|
||||
if (!Object.prototype.hasOwnProperty.call(env, key)) {
|
||||
env[key] = { type: "plain", value };
|
||||
}
|
||||
}
|
||||
if (Object.keys(env).length > 0) {
|
||||
config.env = env;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
2
packages/adapters/cursor-cloud/src/ui/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { buildCursorCloudConfig } from "./build-config.js";
|
||||
export { parseCursorCloudStdoutLine } from "./parse-stdout.js";
|
||||
143
packages/adapters/cursor-cloud/src/ui/parse-stdout.test.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { parseCursorCloudStdoutLine } from "./parse-stdout.js";
|
||||
|
||||
const ts = "2026-05-10T05:10:00.000Z";
|
||||
|
||||
describe("parseCursorCloudStdoutLine", () => {
|
||||
it("parses init and status events", () => {
|
||||
expect(
|
||||
parseCursorCloudStdoutLine(
|
||||
JSON.stringify({ type: "cursor_cloud.init", sessionId: "agent-123", model: "gpt-5.4" }),
|
||||
ts,
|
||||
),
|
||||
).toEqual([{ kind: "init", ts, sessionId: "agent-123", model: "gpt-5.4" }]);
|
||||
|
||||
expect(
|
||||
parseCursorCloudStdoutLine(
|
||||
JSON.stringify({ type: "cursor_cloud.status", status: "running", message: "Reattached" }),
|
||||
ts,
|
||||
),
|
||||
).toEqual([{ kind: "system", ts, text: "running: Reattached" }]);
|
||||
});
|
||||
|
||||
it("parses assistant text and tool lifecycle SDK messages", () => {
|
||||
const assistantLine = JSON.stringify({
|
||||
type: "cursor_cloud.message",
|
||||
message: {
|
||||
type: "assistant",
|
||||
message: {
|
||||
content: [
|
||||
{ type: "text", text: "Working on it." },
|
||||
{ type: "tool_use", id: "tool-1", name: "read_file", input: { path: "README.md" } },
|
||||
],
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(parseCursorCloudStdoutLine(assistantLine, ts)).toEqual([
|
||||
{ kind: "assistant", ts, text: "Working on it." },
|
||||
{ kind: "tool_call", ts, name: "read_file", toolUseId: "tool-1", input: { path: "README.md" } },
|
||||
]);
|
||||
|
||||
const toolStartLine = JSON.stringify({
|
||||
type: "cursor_cloud.message",
|
||||
message: {
|
||||
type: "tool_call",
|
||||
id: "call-1",
|
||||
name: "bash",
|
||||
status: "running",
|
||||
args: { command: "pwd" },
|
||||
},
|
||||
});
|
||||
expect(parseCursorCloudStdoutLine(toolStartLine, ts)).toEqual([
|
||||
{ kind: "tool_call", ts, name: "bash", toolUseId: "call-1", input: { command: "pwd" } },
|
||||
]);
|
||||
|
||||
const toolEndLine = JSON.stringify({
|
||||
type: "cursor_cloud.message",
|
||||
message: {
|
||||
type: "tool_call",
|
||||
id: "call-1",
|
||||
name: "bash",
|
||||
status: "completed",
|
||||
result: { stdout: "/repo" },
|
||||
},
|
||||
});
|
||||
expect(parseCursorCloudStdoutLine(toolEndLine, ts)).toEqual([
|
||||
{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: "call-1",
|
||||
toolName: "bash",
|
||||
content: JSON.stringify({ stdout: "/repo" }, null, 2),
|
||||
isError: false,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("parses standalone tool_result SDK messages", () => {
|
||||
const line = JSON.stringify({
|
||||
type: "cursor_cloud.message",
|
||||
message: {
|
||||
type: "tool_result",
|
||||
call_id: "call-9",
|
||||
name: "read_file",
|
||||
result: { contents: "file body" },
|
||||
},
|
||||
});
|
||||
expect(parseCursorCloudStdoutLine(line, ts)).toEqual([
|
||||
{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: "call-9",
|
||||
toolName: "read_file",
|
||||
content: JSON.stringify({ contents: "file body" }, null, 2),
|
||||
isError: false,
|
||||
},
|
||||
]);
|
||||
|
||||
const errorLine = JSON.stringify({
|
||||
type: "cursor_cloud.message",
|
||||
message: {
|
||||
type: "tool_result",
|
||||
call_id: "call-10",
|
||||
name: "bash",
|
||||
is_error: true,
|
||||
content: "exit 1",
|
||||
},
|
||||
});
|
||||
expect(parseCursorCloudStdoutLine(errorLine, ts)).toEqual([
|
||||
{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: "call-10",
|
||||
toolName: "bash",
|
||||
content: "exit 1",
|
||||
isError: true,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("parses result events and preserves unknown lines as stdout", () => {
|
||||
expect(
|
||||
parseCursorCloudStdoutLine(
|
||||
JSON.stringify({ type: "cursor_cloud.result", status: "finished", result: "Done", model: "gpt-5.4" }),
|
||||
ts,
|
||||
),
|
||||
).toEqual([
|
||||
{
|
||||
kind: "result",
|
||||
ts,
|
||||
text: "Done",
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
cachedTokens: 0,
|
||||
costUsd: 0,
|
||||
subtype: "finished",
|
||||
isError: false,
|
||||
errors: [],
|
||||
},
|
||||
]);
|
||||
|
||||
expect(parseCursorCloudStdoutLine("plain text", ts)).toEqual([{ kind: "stdout", ts, text: "plain text" }]);
|
||||
});
|
||||
});
|
||||
186
packages/adapters/cursor-cloud/src/ui/parse-stdout.ts
Normal file
@@ -0,0 +1,186 @@
|
||||
import type { TranscriptEntry } from "@paperclipai/adapter-utils";
|
||||
|
||||
function safeJsonParse(text: string): unknown {
|
||||
try {
|
||||
return JSON.parse(text);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function asString(value: unknown, fallback = ""): string {
|
||||
return typeof value === "string" ? value : fallback;
|
||||
}
|
||||
|
||||
function stringifyUnknown(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
if (value === null || value === undefined) return "";
|
||||
try {
|
||||
return JSON.stringify(value, null, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
function parseAssistantMessage(message: Record<string, unknown>, ts: string): TranscriptEntry[] {
|
||||
const content = Array.isArray(message.content) ? message.content : [];
|
||||
const entries: TranscriptEntry[] = [];
|
||||
for (const partRaw of content) {
|
||||
const part = asRecord(partRaw);
|
||||
if (!part) continue;
|
||||
const type = asString(part.type).trim();
|
||||
if (type === "text") {
|
||||
const text = asString(part.text).trim();
|
||||
if (text) entries.push({ kind: "assistant", ts, text });
|
||||
continue;
|
||||
}
|
||||
if (type === "tool_use") {
|
||||
entries.push({
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name: asString(part.name, "tool"),
|
||||
toolUseId: asString(part.id) || undefined,
|
||||
input: part.input ?? {},
|
||||
});
|
||||
}
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
function parseSdkMessage(messageRaw: unknown, ts: string): TranscriptEntry[] {
|
||||
const message = asRecord(messageRaw);
|
||||
if (!message) return [];
|
||||
const type = asString(message.type);
|
||||
|
||||
if (type === "assistant") {
|
||||
const body = asRecord(message.message);
|
||||
return body ? parseAssistantMessage(body, ts) : [];
|
||||
}
|
||||
|
||||
if (type === "user") {
|
||||
const body = asRecord(message.message);
|
||||
const content = Array.isArray(body?.content) ? body.content : [];
|
||||
const text = content
|
||||
.map((entry) => asRecord(entry))
|
||||
.filter((entry): entry is Record<string, unknown> => Boolean(entry))
|
||||
.map((entry) => asString(entry.text).trim())
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
return text ? [{ kind: "user", ts, text }] : [];
|
||||
}
|
||||
|
||||
if (type === "thinking") {
|
||||
const text = asString(message.text).trim();
|
||||
return text ? [{ kind: "thinking", ts, text }] : [];
|
||||
}
|
||||
|
||||
if (type === "tool_call") {
|
||||
const toolUseId = asString(message.call_id, asString(message.id, "tool_call"));
|
||||
const status = asString(message.status).toLowerCase();
|
||||
if (status === "running") {
|
||||
return [{
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name: asString(message.name, "tool"),
|
||||
toolUseId,
|
||||
input: message.args ?? {},
|
||||
}];
|
||||
}
|
||||
if (status === "completed" || status === "error") {
|
||||
return [{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId,
|
||||
toolName: asString(message.name, "tool"),
|
||||
content: stringifyUnknown(message.result ?? message.args ?? {}),
|
||||
isError: status === "error",
|
||||
}];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
if (type === "tool_result") {
|
||||
const toolUseId = asString(message.call_id, asString(message.id, "tool_result"));
|
||||
const isError =
|
||||
message.is_error === true ||
|
||||
asString(message.status).toLowerCase() === "error";
|
||||
return [{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId,
|
||||
toolName: asString(message.name, "tool"),
|
||||
content: stringifyUnknown(message.result ?? message.content ?? message.output ?? {}),
|
||||
isError,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "status") {
|
||||
const status = asString(message.status);
|
||||
const statusMessage = asString(message.message);
|
||||
return [{
|
||||
kind: "system",
|
||||
ts,
|
||||
text: `status: ${status}${statusMessage ? ` - ${statusMessage}` : ""}`,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "task") {
|
||||
const text = asString(message.text).trim();
|
||||
return text ? [{ kind: "system", ts, text }] : [];
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
export function parseCursorCloudStdoutLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const parsed = asRecord(safeJsonParse(line));
|
||||
if (!parsed) {
|
||||
return [{ kind: "stdout", ts, text: line }];
|
||||
}
|
||||
|
||||
const type = asString(parsed.type);
|
||||
if (type === "cursor_cloud.init") {
|
||||
const sessionId = asString(parsed.sessionId, asString(parsed.agentId));
|
||||
return [{
|
||||
kind: "init",
|
||||
ts,
|
||||
model: asString(parsed.model, "cursor_cloud"),
|
||||
sessionId,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "cursor_cloud.status") {
|
||||
return [{
|
||||
kind: "system",
|
||||
ts,
|
||||
text: `${asString(parsed.status, "status")}${parsed.message ? `: ${asString(parsed.message)}` : ""}`,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "cursor_cloud.message") {
|
||||
return parseSdkMessage(parsed.message, ts);
|
||||
}
|
||||
|
||||
if (type === "cursor_cloud.result") {
|
||||
const status = asString(parsed.status, "error");
|
||||
return [{
|
||||
kind: "result",
|
||||
ts,
|
||||
text: asString(parsed.result),
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
cachedTokens: 0,
|
||||
costUsd: 0,
|
||||
subtype: status,
|
||||
isError: status !== "finished",
|
||||
errors: parsed.error ? [asString(parsed.error)] : [],
|
||||
}];
|
||||
}
|
||||
|
||||
return [{ kind: "stdout", ts, text: line }];
|
||||
}
|
||||
9
packages/adapters/cursor-cloud/tsconfig.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src",
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
@@ -104,5 +104,5 @@ Notes:
|
||||
- Sessions are resumed with --resume when stored session cwd matches current cwd.
|
||||
- Paperclip auto-injects local skills into "~/.cursor/skills" when missing, so Cursor can discover "$paperclip" and related skills on local runs.
|
||||
- Paperclip auto-adds --yolo unless one of --trust/--yolo/-f is already present in extraArgs.
|
||||
- Remote sandbox runs prepend "~/.local/bin" to PATH and prefer "~/.local/bin/cursor-agent" when the default Cursor entrypoint is requested, so standard E2B-style installs do not need hardcoded absolute command paths.
|
||||
- Remote sandbox runs prepend "~/.cursor/bin" and "~/.local/bin" to PATH and prefer the installed absolute entrypoint from one of those directories when the default Cursor command is requested, so installer-managed sandbox leases do not need hardcoded command paths.
|
||||
`;
|
||||
|
||||
@@ -28,7 +28,7 @@ const {
|
||||
})),
|
||||
ensureCommandResolvable: vi.fn(async () => undefined),
|
||||
resolveCommandForLogs: vi.fn(async () => "ssh://fixture@127.0.0.1:2222/remote/workspace :: agent"),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => ({ gitBacked: false })),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
runSshCommand: vi.fn(async () => ({
|
||||
stdout: "/home/agent",
|
||||
@@ -103,6 +103,7 @@ describe("cursor remote execution", () => {
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-1/workspace";
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
agent: {
|
||||
@@ -158,19 +159,19 @@ describe("cursor remote execution", () => {
|
||||
|
||||
expect(result.sessionParams).toMatchObject({
|
||||
sessionId: "cursor-session-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
});
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledWith(expect.objectContaining({
|
||||
remoteDir: "/remote/workspace/.paperclip-runtime/cursor/skills",
|
||||
remoteDir: `${managedRemoteWorkspace}/.paperclip-runtime/cursor/skills`,
|
||||
followSymlinks: true,
|
||||
}));
|
||||
expect(runSshCommand).toHaveBeenCalledWith(
|
||||
@@ -182,12 +183,12 @@ describe("cursor remote execution", () => {
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[2]).toContain("--workspace");
|
||||
expect(call?.[2]).toContain("/remote/workspace");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[2]).toContain(managedRemoteWorkspace);
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
@@ -199,7 +200,7 @@ describe("cursor remote execution", () => {
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe(managedRemoteWorkspace);
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
@@ -210,6 +211,7 @@ describe("cursor remote execution", () => {
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-ssh-resume/workspace";
|
||||
await execute({
|
||||
runId: "run-ssh-resume",
|
||||
agent: {
|
||||
@@ -223,13 +225,13 @@ describe("cursor remote execution", () => {
|
||||
sessionId: "session-123",
|
||||
sessionParams: {
|
||||
sessionId: "session-123",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
},
|
||||
sessionDisplayId: "session-123",
|
||||
|
||||
352
packages/adapters/cursor-local/src/server/execute.test.ts
Normal file
@@ -0,0 +1,352 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { AdapterExecutionTarget } from "@paperclipai/adapter-utils/execution-target";
|
||||
import { runChildProcess } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { SANDBOX_INSTALL_COMMAND } from "../index.js";
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
type PrepareCursorSandboxCommandInput = {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
command: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
remoteSystemHomeDirHint?: string | null;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
};
|
||||
|
||||
type PrepareCursorSandboxCommandResult = {
|
||||
command: string;
|
||||
env: Record<string, string>;
|
||||
remoteSystemHomeDir: string | null;
|
||||
addedPathEntry: string | null;
|
||||
preferredCommandPath: string | null;
|
||||
};
|
||||
|
||||
const {
|
||||
setPrepareCursorSandboxCommand,
|
||||
} = vi.hoisted(() => {
|
||||
const setPrepareCursorSandboxCommand = vi.fn<
|
||||
(input: PrepareCursorSandboxCommandInput) => Promise<PrepareCursorSandboxCommandResult>
|
||||
>();
|
||||
return { setPrepareCursorSandboxCommand };
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge: async () => null,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("./remote-command.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("./remote-command.js")>("./remote-command.js");
|
||||
return {
|
||||
...actual,
|
||||
prepareCursorSandboxCommand: async (input: Parameters<typeof actual.prepareCursorSandboxCommand>[0]) => {
|
||||
return setPrepareCursorSandboxCommand(input);
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
function buildFakeAgentScript(captureDir: string): string {
|
||||
return `#!/bin/sh
|
||||
cat > ${JSON.stringify(path.join(captureDir, "prompt.txt"))}
|
||||
printf '%s' "$0" > ${JSON.stringify(path.join(captureDir, "command.txt"))}
|
||||
printf '%s' "$PATH" > ${JSON.stringify(path.join(captureDir, "path.txt"))}
|
||||
printf '%s\\n' '{"type":"system","subtype":"init","session_id":"cursor-session-fresh-1","model":"auto"}'
|
||||
printf '%s\\n' '{"type":"assistant","message":{"content":[{"type":"output_text","text":"hello"}]}}'
|
||||
printf '%s\\n' '{"type":"result","subtype":"success","session_id":"cursor-session-fresh-1","result":"ok"}'
|
||||
`;
|
||||
}
|
||||
|
||||
function buildInstallSimulationCommand(commandPath: string, captureDir: string): string {
|
||||
return [
|
||||
`mkdir -p ${JSON.stringify(path.dirname(commandPath))}`,
|
||||
`mkdir -p ${JSON.stringify(captureDir)}`,
|
||||
`cat > ${JSON.stringify(commandPath)} <<'EOF'`,
|
||||
buildFakeAgentScript(captureDir),
|
||||
"EOF",
|
||||
`chmod +x ${JSON.stringify(commandPath)}`,
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
function createFreshLeaseSandboxRunner(options: {
|
||||
homeDir: string;
|
||||
installCommandPath: string;
|
||||
captureDir: string;
|
||||
}) {
|
||||
let counter = 0;
|
||||
const installCommands: string[] = [];
|
||||
const systemPath = [
|
||||
"/usr/local/bin",
|
||||
"/opt/homebrew/bin",
|
||||
"/usr/local/sbin",
|
||||
"/usr/bin",
|
||||
"/bin",
|
||||
"/usr/sbin",
|
||||
"/sbin",
|
||||
].join(path.delimiter);
|
||||
|
||||
return {
|
||||
installCommands,
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}) => {
|
||||
counter += 1;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (args[1] === SANDBOX_INSTALL_COMMAND) {
|
||||
installCommands.push(args[1]);
|
||||
args[1] = buildInstallSimulationCommand(options.installCommandPath, options.captureDir);
|
||||
}
|
||||
|
||||
const inheritedPath = input.env?.PATH ?? systemPath;
|
||||
const pathWithLocalBin = `${path.join(options.homeDir, ".local", "bin")}${path.delimiter}${inheritedPath}`;
|
||||
const env = {
|
||||
...(input.env ?? {}),
|
||||
HOME: input.env?.HOME ?? options.homeDir,
|
||||
PATH: pathWithLocalBin,
|
||||
};
|
||||
|
||||
return await runChildProcess(`cursor-fresh-lease-${counter}`, input.command, args, {
|
||||
cwd: input.cwd ?? process.cwd(),
|
||||
env,
|
||||
stdin: input.stdin,
|
||||
timeoutSec: Math.max(1, Math.ceil((input.timeoutMs ?? 30_000) / 1000)),
|
||||
graceSec: 5,
|
||||
onLog: input.onLog ?? (async () => {}),
|
||||
onSpawn: input.onSpawn
|
||||
? async (meta) => input.onSpawn?.({ pid: meta.pid, startedAt: meta.startedAt })
|
||||
: undefined,
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe("cursor execute", () => {
|
||||
it("installs the default agent command on a fresh sandbox lease before execution", async () => {
|
||||
setPrepareCursorSandboxCommand.mockReset();
|
||||
setPrepareCursorSandboxCommand.mockImplementation(async (input) => {
|
||||
const actual = await vi.importActual<typeof import("./remote-command.js")>("./remote-command.js");
|
||||
return actual.prepareCursorSandboxCommand(input);
|
||||
});
|
||||
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-fresh-lease-"));
|
||||
const homeDir = path.join(root, "home");
|
||||
const workspace = path.join(root, "workspace");
|
||||
const remoteWorkspace = path.join(root, "remote-workspace");
|
||||
const captureDir = path.join(root, "capture");
|
||||
const agentPath = path.join(homeDir, ".local", "bin", "agent");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
await fs.mkdir(remoteWorkspace, { recursive: true });
|
||||
|
||||
const runner = createFreshLeaseSandboxRunner({
|
||||
homeDir,
|
||||
installCommandPath: agentPath,
|
||||
captureDir,
|
||||
});
|
||||
|
||||
const previousHome = process.env.HOME;
|
||||
process.env.HOME = homeDir;
|
||||
|
||||
try {
|
||||
const result = await execute({
|
||||
runId: "run-fresh-lease-1",
|
||||
agent: {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
name: "Cursor Coder",
|
||||
adapterType: "cursor",
|
||||
adapterConfig: {},
|
||||
},
|
||||
runtime: {
|
||||
sessionId: null,
|
||||
sessionParams: null,
|
||||
sessionDisplayId: null,
|
||||
taskKey: null,
|
||||
},
|
||||
executionTarget: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
remoteCwd: remoteWorkspace,
|
||||
runner,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
config: {
|
||||
command: "agent",
|
||||
cwd: workspace,
|
||||
promptTemplate: "Follow the paperclip heartbeat.",
|
||||
},
|
||||
context: {},
|
||||
authToken: "run-jwt-token",
|
||||
onLog: async () => {},
|
||||
});
|
||||
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.errorMessage).toBeNull();
|
||||
expect(runner.installCommands).toEqual([SANDBOX_INSTALL_COMMAND]);
|
||||
|
||||
const command = await fs.readFile(path.join(captureDir, "command.txt"), "utf8");
|
||||
const runtimePath = await fs.readFile(path.join(captureDir, "path.txt"), "utf8");
|
||||
const prompt = await fs.readFile(path.join(captureDir, "prompt.txt"), "utf8");
|
||||
expect(command).toBe(agentPath);
|
||||
expect(runtimePath.split(path.delimiter)).toContain(path.join(homeDir, ".local", "bin"));
|
||||
expect(prompt).toContain("Follow the paperclip heartbeat.");
|
||||
} finally {
|
||||
if (previousHome === undefined) delete process.env.HOME;
|
||||
else process.env.HOME = previousHome;
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("reruns sandbox command resolution after managed runtime setup and keeps the original sandbox home", async () => {
|
||||
setPrepareCursorSandboxCommand.mockReset();
|
||||
const prepareInputs: PrepareCursorSandboxCommandInput[] = [];
|
||||
let finalPreparedCommand: string | null = null;
|
||||
|
||||
const rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-fresh-lease-managed-"));
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const remoteWorkspace = path.join(rootDir, "remote-workspace");
|
||||
const systemHomeDir = path.join(rootDir, "system-home");
|
||||
const managedCaptureDir = path.join(rootDir, "managed-capture");
|
||||
await fs.mkdir(managedCaptureDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
await fs.mkdir(remoteWorkspace, { recursive: true });
|
||||
const preferredAgentScript = `#!/bin/sh
|
||||
printf '%s\\n' '{"type":"system","subtype":"init","session_id":"cursor-session-fresh-1","model":"auto"}'
|
||||
printf '%s\\n' '{"type":"assistant","message":{"content":[{"type":"output_text","text":"hello"}]}}'
|
||||
printf '%s\\n' '{"type":"result","subtype":"success","session_id":"cursor-session-fresh-1","result":"ok"}'
|
||||
`;
|
||||
|
||||
setPrepareCursorSandboxCommand.mockImplementation(async (input) => {
|
||||
const call = prepareInputs.length;
|
||||
prepareInputs.push(input);
|
||||
if (call === 0) {
|
||||
return {
|
||||
command: input.command,
|
||||
env: input.env,
|
||||
remoteSystemHomeDir: systemHomeDir,
|
||||
addedPathEntry: null,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
|
||||
expect(input.remoteSystemHomeDirHint).toBe(systemHomeDir);
|
||||
const preferredCommandPath = path.join(systemHomeDir, ".local", "bin", input.command);
|
||||
finalPreparedCommand = preferredCommandPath;
|
||||
const runtimeEnv = {
|
||||
...input.env,
|
||||
PATH: `${path.join(systemHomeDir, ".local", "bin")}${path.delimiter}${input.env.PATH}`,
|
||||
};
|
||||
await fs.mkdir(path.dirname(preferredCommandPath), { recursive: true });
|
||||
await fs.writeFile(preferredCommandPath, preferredAgentScript);
|
||||
await fs.chmod(preferredCommandPath, 0o755);
|
||||
await fs.writeFile(path.join(managedCaptureDir, "agent-output.log"), preferredCommandPath);
|
||||
|
||||
return {
|
||||
command: preferredCommandPath,
|
||||
env: runtimeEnv,
|
||||
remoteSystemHomeDir: systemHomeDir,
|
||||
addedPathEntry: path.join(systemHomeDir, ".local", "bin"),
|
||||
preferredCommandPath,
|
||||
};
|
||||
});
|
||||
|
||||
const runnerState = {
|
||||
commands: [] as string[],
|
||||
};
|
||||
const runner = {
|
||||
execute: async (input: { command: string; args?: string[]; env?: Record<string, string> }) => {
|
||||
runnerState.commands.push(input.command);
|
||||
if (input.command === "sh") {
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: 555,
|
||||
startedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
return runChildProcess(`cursor-fresh-lease-${runnerState.commands.length}`, input.command, input.args ?? [], {
|
||||
cwd: remoteWorkspace,
|
||||
env: input.env ?? {},
|
||||
timeoutSec: 30,
|
||||
graceSec: 5,
|
||||
onLog: async () => {},
|
||||
onSpawn: async () => {},
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
const runMeta: Array<{ command?: string; [key: string]: unknown }> = [];
|
||||
const previousHome = process.env.HOME;
|
||||
process.env.HOME = systemHomeDir;
|
||||
|
||||
try {
|
||||
const command = "agent";
|
||||
const result = await execute({
|
||||
runId: "run-fresh-lease-managed",
|
||||
agent: {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
name: "Cursor Coder",
|
||||
adapterType: "cursor",
|
||||
adapterConfig: {},
|
||||
},
|
||||
runtime: {
|
||||
sessionId: null,
|
||||
sessionParams: null,
|
||||
sessionDisplayId: null,
|
||||
taskKey: null,
|
||||
},
|
||||
executionTarget: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
remoteCwd: remoteWorkspace,
|
||||
providerKey: "fixture",
|
||||
runner: runner,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
config: {
|
||||
command,
|
||||
cwd: workspaceDir,
|
||||
promptTemplate: "Run against runtime-managed command.",
|
||||
},
|
||||
context: {},
|
||||
authToken: "run-jwt-token",
|
||||
onLog: async () => {},
|
||||
onMeta: async (meta) => {
|
||||
runMeta.push(meta as unknown as { command?: string; [key: string]: unknown });
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(prepareInputs).toHaveLength(2);
|
||||
expect(finalPreparedCommand).not.toBeNull();
|
||||
expect(finalPreparedCommand).toMatch(/\.local\/(bin|sbin)\/agent$/);
|
||||
const resolvedCommand = runMeta.find(Boolean)?.command as string | undefined;
|
||||
expect(resolvedCommand).toMatch(/\.local\/bin\/agent$/);
|
||||
expect(resolvedCommand).toContain(path.join(systemHomeDir, ".local", "bin", command));
|
||||
} finally {
|
||||
if (previousHome === undefined) delete process.env.HOME;
|
||||
else process.env.HOME = previousHome;
|
||||
await fs.rm(rootDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -6,6 +6,7 @@ import { inferOpenAiCompatibleBiller, type AdapterExecutionContext, type Adapter
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
overrideAdapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
@@ -16,6 +17,7 @@ import {
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
readAdapterExecutionTargetHomeDir,
|
||||
resolveAdapterExecutionTargetTimeoutSec,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
@@ -26,19 +28,18 @@ import {
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
ensurePaperclipSkillSymlink,
|
||||
ensurePathInEnv,
|
||||
refreshPaperclipWorkspaceEnvForExecution,
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
readPaperclipIssueWorkModeFromContext,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
removeMaintainerOnlySkillSymlinks,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
joinPromptSections,
|
||||
@@ -224,13 +225,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
let effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
const cursorSkillEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const desiredCursorSkillNames = resolvePaperclipDesiredSkillNames(config, cursorSkillEntries);
|
||||
@@ -294,24 +289,26 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
for (const [k, v] of Object.entries(envConfig)) {
|
||||
if (typeof v === "string") env[k] = v;
|
||||
}
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const timeoutSec = resolveAdapterExecutionTargetTimeoutSec(
|
||||
executionTarget,
|
||||
asNumber(config.timeoutSec, 0),
|
||||
);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
@@ -324,10 +321,11 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
// Probe the sandbox before the managed-home override so we discover
|
||||
// cursor-agent from the real system HOME (e.g. ~/.local/bin/cursor-agent).
|
||||
// The managed HOME set later is for runtime isolation, not for finding the CLI.
|
||||
const sandboxCommand = await prepareCursorSandboxCommand({
|
||||
// Probe the sandbox before the managed-home override so we discover the
|
||||
// installer-managed agent symlinks from the real system HOME (for example
|
||||
// ~/.local/bin/agent). The managed HOME set later is for runtime isolation,
|
||||
// not for finding the CLI.
|
||||
const initialSandboxCommand = await prepareCursorSandboxCommand({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
command,
|
||||
@@ -336,22 +334,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
});
|
||||
command = sandboxCommand.command;
|
||||
env = sandboxCommand.env;
|
||||
const effectiveEnv = Object.fromEntries(
|
||||
Object.entries({ ...process.env, ...env }).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const billingType = resolveCursorBillingType(effectiveEnv);
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, { installCommand: SANDBOX_INSTALL_COMMAND });
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
const sandboxSystemHomeDir = initialSandboxCommand.remoteSystemHomeDir;
|
||||
command = initialSandboxCommand.command;
|
||||
env = initialSandboxCommand.env;
|
||||
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
@@ -372,8 +357,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
`[paperclip] Syncing workspace and Cursor runtime assets to ${describeAdapterExecutionTarget(executionTarget)}.\n`,
|
||||
);
|
||||
const preparedExecutionTargetRuntime = await prepareAdapterExecutionTargetRuntime({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
adapterKey: "cursor",
|
||||
timeoutSec,
|
||||
workspaceLocalDir: cwd,
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
detectCommand: command,
|
||||
@@ -384,6 +371,20 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}],
|
||||
});
|
||||
restoreRemoteWorkspace = () => preparedExecutionTargetRuntime.restoreWorkspace();
|
||||
effectiveExecutionCwd = preparedExecutionTargetRuntime.workspaceRemoteDir ?? effectiveExecutionCwd;
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
remoteRuntimeRootDir = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
const managedHome = adapterExecutionTargetUsesManagedHome(executionTarget);
|
||||
if (managedHome && preparedExecutionTargetRuntime.runtimeRootDir) {
|
||||
@@ -415,12 +416,47 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
const finalSandboxCommand = executionTarget?.kind === "remote" && executionTarget.transport === "sandbox"
|
||||
? await prepareCursorSandboxCommand({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
remoteSystemHomeDirHint: sandboxSystemHomeDir,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
})
|
||||
: null;
|
||||
if (finalSandboxCommand) {
|
||||
command = finalSandboxCommand.command;
|
||||
env = finalSandboxCommand.env;
|
||||
}
|
||||
const runtimeExecutionTarget = overrideAdapterExecutionTargetRemoteCwd(executionTarget, effectiveExecutionCwd);
|
||||
const effectiveEnv = Object.fromEntries(
|
||||
Object.entries({ ...process.env, ...env }).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const billingType = resolveCursorBillingType(effectiveEnv);
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, {
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
timeoutSec,
|
||||
});
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(runtimeExecutionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: runtimeExecutionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "cursor",
|
||||
timeoutSec,
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
@@ -441,7 +477,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const canResumeSession =
|
||||
runtimeSessionId.length > 0 &&
|
||||
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(effectiveExecutionCwd)) &&
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, executionTarget);
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, runtimeExecutionTarget);
|
||||
const sessionId = canResumeSession ? runtimeSessionId : null;
|
||||
if (executionTargetIsRemote && runtimeSessionId && !canResumeSession) {
|
||||
await onLog(
|
||||
@@ -481,11 +517,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
notes.push("Auto-added --yolo to bypass interactive prompts.");
|
||||
}
|
||||
notes.push("Prompt is piped to Cursor via stdin.");
|
||||
if (sandboxCommand.addedPathEntry) {
|
||||
const sandboxCommand = finalSandboxCommand ?? initialSandboxCommand;
|
||||
if (sandboxCommand?.addedPathEntry) {
|
||||
notes.push(`Remote sandbox runs prepend ${sandboxCommand.addedPathEntry} to PATH.`);
|
||||
}
|
||||
if (sandboxCommand.preferredCommandPath) {
|
||||
notes.push(`Remote sandbox runs prefer ${sandboxCommand.preferredCommandPath} when using the default Cursor entrypoint.`);
|
||||
if (sandboxCommand?.preferredCommandPath) {
|
||||
notes.push(
|
||||
`Remote sandbox runs prefer ${sandboxCommand.preferredCommandPath} when using the default Cursor entrypoint.`,
|
||||
);
|
||||
}
|
||||
if (!instructionsFilePath) return notes;
|
||||
if (instructionsPrefix.length > 0) {
|
||||
@@ -588,7 +627,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
};
|
||||
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, executionTarget, command, args, {
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, runtimeExecutionTarget, command, args, {
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec,
|
||||
@@ -646,7 +685,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
...(workspaceRepoRef ? { repoRef: workspaceRepoRef } : {}),
|
||||
...(executionTargetIsRemote
|
||||
? {
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(executionTarget),
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(runtimeExecutionTarget),
|
||||
}
|
||||
: {}),
|
||||
} as Record<string, unknown>)
|
||||
|
||||
137
packages/adapters/cursor-local/src/server/remote-command.test.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { runChildProcess } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { prepareCursorSandboxCommand } from "./remote-command.js";
|
||||
|
||||
function createLocalSandboxRunner() {
|
||||
let counter = 0;
|
||||
return {
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}) => {
|
||||
counter += 1;
|
||||
return await runChildProcess(`cursor-remote-command-${counter}`, input.command, input.args ?? [], {
|
||||
cwd: input.cwd ?? process.cwd(),
|
||||
env: input.env ?? {},
|
||||
stdin: input.stdin,
|
||||
timeoutSec: Math.max(1, Math.ceil((input.timeoutMs ?? 30_000) / 1000)),
|
||||
graceSec: 5,
|
||||
onLog: input.onLog ?? (async () => {}),
|
||||
onSpawn: input.onSpawn
|
||||
? async (meta) => input.onSpawn?.({ pid: meta.pid, startedAt: meta.startedAt })
|
||||
: undefined,
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function writeFakeAgent(commandPath: string): Promise<void> {
|
||||
const script = `#!/bin/sh
|
||||
printf '%s\\n' ok
|
||||
`;
|
||||
await fs.mkdir(path.dirname(commandPath), { recursive: true });
|
||||
await fs.writeFile(commandPath, script, "utf8");
|
||||
await fs.chmod(commandPath, 0o755);
|
||||
}
|
||||
|
||||
describe("prepareCursorSandboxCommand", () => {
|
||||
it("prefers the Cursor installer bin directory when the default agent entrypoint is installed there", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-remote-command-cursor-bin-"));
|
||||
const systemHomeDir = path.join(root, "system-home");
|
||||
const managedHomeDir = path.join(root, "managed-home");
|
||||
const remoteWorkspace = path.join(root, "workspace");
|
||||
const cursorAgentPath = path.join(systemHomeDir, ".cursor", "bin", "agent");
|
||||
await fs.mkdir(remoteWorkspace, { recursive: true });
|
||||
await writeFakeAgent(cursorAgentPath);
|
||||
|
||||
try {
|
||||
const result = await prepareCursorSandboxCommand({
|
||||
runId: "run-remote-command-cursor-bin",
|
||||
target: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
shellCommand: "bash",
|
||||
remoteCwd: remoteWorkspace,
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
command: "agent",
|
||||
cwd: remoteWorkspace,
|
||||
env: {
|
||||
HOME: managedHomeDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
},
|
||||
remoteSystemHomeDirHint: systemHomeDir,
|
||||
timeoutSec: 30,
|
||||
graceSec: 5,
|
||||
});
|
||||
|
||||
expect(result.command).toBe(cursorAgentPath);
|
||||
expect(result.preferredCommandPath).toBe(cursorAgentPath);
|
||||
expect(result.remoteSystemHomeDir).toBe(systemHomeDir);
|
||||
expect(result.addedPathEntry).toBe(path.join(systemHomeDir, ".local", "bin"));
|
||||
expect(result.env.PATH?.split(":").slice(0, 2)).toEqual([
|
||||
path.join(systemHomeDir, ".local", "bin"),
|
||||
path.join(systemHomeDir, ".cursor", "bin"),
|
||||
]);
|
||||
expect(result.env.PATH).not.toContain(path.join(managedHomeDir, ".cursor", "bin"));
|
||||
expect(result.env.PATH).not.toContain(path.join(managedHomeDir, ".local", "bin"));
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("keeps probing the original sandbox home after managed HOME overrides", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-remote-command-"));
|
||||
const systemHomeDir = path.join(root, "system-home");
|
||||
const managedHomeDir = path.join(root, "managed-home");
|
||||
const remoteWorkspace = path.join(root, "workspace");
|
||||
const systemAgentPath = path.join(systemHomeDir, ".local", "bin", "agent");
|
||||
await fs.mkdir(remoteWorkspace, { recursive: true });
|
||||
await writeFakeAgent(systemAgentPath);
|
||||
|
||||
try {
|
||||
const result = await prepareCursorSandboxCommand({
|
||||
runId: "run-remote-command-1",
|
||||
target: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
shellCommand: "bash",
|
||||
remoteCwd: remoteWorkspace,
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
command: "agent",
|
||||
cwd: remoteWorkspace,
|
||||
env: {
|
||||
HOME: managedHomeDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
},
|
||||
remoteSystemHomeDirHint: systemHomeDir,
|
||||
timeoutSec: 30,
|
||||
graceSec: 5,
|
||||
});
|
||||
|
||||
expect(result.command).toBe(systemAgentPath);
|
||||
expect(result.preferredCommandPath).toBe(systemAgentPath);
|
||||
expect(result.remoteSystemHomeDir).toBe(systemHomeDir);
|
||||
expect(result.addedPathEntry).toBe(path.join(systemHomeDir, ".local", "bin"));
|
||||
expect(result.env.PATH?.split(":").slice(0, 2)).toEqual([
|
||||
path.join(systemHomeDir, ".local", "bin"),
|
||||
path.join(systemHomeDir, ".cursor", "bin"),
|
||||
]);
|
||||
expect(result.env.PATH).not.toContain(path.join(managedHomeDir, ".local", "bin"));
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -6,6 +6,14 @@ import {
|
||||
import { ensurePathInEnv } from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const DEFAULT_CURSOR_COMMAND_BASENAMES = new Set(["agent", "cursor-agent"]);
|
||||
// `.local/bin` first because the official Cursor Agent installer drops the
|
||||
// binary there; `.cursor/bin` is a secondary location used by some older
|
||||
// installs. The order also defines the prepended `PATH` order surfaced to the
|
||||
// adapter.
|
||||
const CURSOR_SANDBOX_BIN_DIRS = [
|
||||
path.posix.join(".local", "bin"),
|
||||
path.posix.join(".cursor", "bin"),
|
||||
];
|
||||
|
||||
function commandBasename(command: string): string {
|
||||
return command.trim().split(/[\\/]/).pop()?.toLowerCase() ?? "";
|
||||
@@ -22,6 +30,32 @@ function prependPosixPathEntry(pathValue: string, entry: string): string {
|
||||
return cleaned.length > 0 ? `${entry}:${cleaned}` : entry;
|
||||
}
|
||||
|
||||
function prependPosixPathEntries(pathValue: string, entries: string[]): string {
|
||||
return entries.reduceRight((value, entry) => prependPosixPathEntry(value, entry), pathValue);
|
||||
}
|
||||
|
||||
function preferredSandboxCommandBasenames(command: string): string[] {
|
||||
const basename = commandBasename(command);
|
||||
if (!DEFAULT_CURSOR_COMMAND_BASENAMES.has(basename)) return [];
|
||||
return basename === "cursor-agent"
|
||||
? ["cursor-agent", "agent"]
|
||||
: ["agent", "cursor-agent"];
|
||||
}
|
||||
|
||||
function candidateSandboxCommandPaths(homeDir: string, basenames: string[]): string[] {
|
||||
// Iterate dirs first, then basenames within each dir, so directory
|
||||
// preference (CURSOR_SANDBOX_BIN_DIRS order) wins over basename
|
||||
// preference. Both basenames inside `.local/bin` are checked before
|
||||
// falling through to `.cursor/bin`.
|
||||
return CURSOR_SANDBOX_BIN_DIRS.flatMap((relativeDir) =>
|
||||
basenames.map((basename) => path.posix.join(homeDir, relativeDir, basename))
|
||||
);
|
||||
}
|
||||
|
||||
function candidateSandboxPathEntries(homeDir: string): string[] {
|
||||
return CURSOR_SANDBOX_BIN_DIRS.map((relativeDir) => path.posix.join(homeDir, relativeDir));
|
||||
}
|
||||
|
||||
type SandboxCursorRuntimeInfo = {
|
||||
remoteSystemHomeDir: string | null;
|
||||
preferredCommandPath: string | null;
|
||||
@@ -40,20 +74,60 @@ async function readSandboxCursorRuntimeInfo(input: {
|
||||
command: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
remoteSystemHomeDirHint?: string | null;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
}): Promise<SandboxCursorRuntimeInfo> {
|
||||
const shouldCheckPreferredCommand = isDefaultCursorCommand(input.command) && !hasPathSeparator(input.command);
|
||||
const preferredBasenames =
|
||||
!hasPathSeparator(input.command)
|
||||
? preferredSandboxCommandBasenames(input.command)
|
||||
: [];
|
||||
const hintedRemoteSystemHomeDir = input.remoteSystemHomeDirHint?.trim() || null;
|
||||
const homeMarker = "__PAPERCLIP_CURSOR_HOME__:";
|
||||
const preferredMarker = "__PAPERCLIP_CURSOR_AGENT__:";
|
||||
try {
|
||||
// When the caller has already resolved the remote `$HOME`, probe absolute
|
||||
// paths so the shell doesn't depend on its own environment to interpret
|
||||
// `$HOME`. Without a hint we still probe `$HOME/...` literally — this is
|
||||
// how the sandbox finds a user-prefixed install before falling back to a
|
||||
// PATH lookup. Skipping the `$HOME` probes here was the regression behind
|
||||
// server tests `cursor-local-adapter-environment.test.ts` and
|
||||
// `cursor-local-execute.test.ts` failing on a host whose own `agent`
|
||||
// command resolves via PATH.
|
||||
const fixedCandidatePaths =
|
||||
preferredBasenames.length > 0
|
||||
? hintedRemoteSystemHomeDir
|
||||
? candidateSandboxCommandPaths(hintedRemoteSystemHomeDir, preferredBasenames)
|
||||
: preferredBasenames.flatMap((basename) =>
|
||||
CURSOR_SANDBOX_BIN_DIRS.map((relativeDir) =>
|
||||
`$HOME/${relativeDir}/${basename}`,
|
||||
),
|
||||
)
|
||||
: [];
|
||||
const preferredProbeBranches = [
|
||||
...fixedCandidatePaths.map(
|
||||
(fixedPath) =>
|
||||
`[ -x ${JSON.stringify(fixedPath)} ] && printf ${JSON.stringify(`${preferredMarker}%s\\n`)} ${JSON.stringify(fixedPath)}`,
|
||||
),
|
||||
...preferredBasenames.map(
|
||||
(basename) =>
|
||||
`resolved="$(command -v ${JSON.stringify(basename)} 2>/dev/null)" && [ -n "$resolved" ] && printf ${JSON.stringify(`${preferredMarker}%s\\n`)} "$resolved"`,
|
||||
),
|
||||
];
|
||||
const result = await runAdapterExecutionTargetShellCommand(
|
||||
input.runId,
|
||||
input.target,
|
||||
[
|
||||
`printf ${JSON.stringify(`${homeMarker}%s\\n`)} "$HOME"`,
|
||||
shouldCheckPreferredCommand
|
||||
? `if [ -x "$HOME/.local/bin/cursor-agent" ]; then printf ${JSON.stringify(`${preferredMarker}%s\\n`)} "$HOME/.local/bin/cursor-agent"; fi`
|
||||
hintedRemoteSystemHomeDir
|
||||
? `printf ${JSON.stringify(`${homeMarker}%s\\n`)} ${JSON.stringify(hintedRemoteSystemHomeDir)}`
|
||||
: `printf ${JSON.stringify(`${homeMarker}%s\\n`)} "$HOME"`,
|
||||
preferredProbeBranches.length > 0
|
||||
? preferredProbeBranches
|
||||
.map((probeBranch, index) => {
|
||||
const branchKeyword = index === 0 ? "if" : "elif";
|
||||
return `${branchKeyword} ${probeBranch}; then :`;
|
||||
})
|
||||
.join("; ") + "; fi; :"
|
||||
: "",
|
||||
].filter(Boolean).join("; "),
|
||||
{
|
||||
@@ -100,6 +174,7 @@ export async function prepareCursorSandboxCommand(input: {
|
||||
command: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
remoteSystemHomeDirHint?: string | null;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
}): Promise<PreparedCursorSandboxCommand> {
|
||||
@@ -119,10 +194,12 @@ export async function prepareCursorSandboxCommand(input: {
|
||||
command: input.command,
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
remoteSystemHomeDirHint: input.remoteSystemHomeDirHint,
|
||||
timeoutSec: input.timeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
});
|
||||
const remoteSystemHomeDir = runtimeInfo.remoteSystemHomeDir;
|
||||
const remoteSystemHomeDir =
|
||||
runtimeInfo.remoteSystemHomeDir ?? input.remoteSystemHomeDirHint?.trim() ?? null;
|
||||
|
||||
if (!remoteSystemHomeDir) {
|
||||
return {
|
||||
@@ -134,18 +211,19 @@ export async function prepareCursorSandboxCommand(input: {
|
||||
};
|
||||
}
|
||||
|
||||
const remoteLocalBinDir = path.posix.join(remoteSystemHomeDir, ".local", "bin");
|
||||
const sandboxPathEntries = candidateSandboxPathEntries(remoteSystemHomeDir);
|
||||
const runtimeEnv = ensurePathInEnv(input.env);
|
||||
const currentPath = runtimeEnv.PATH ?? runtimeEnv.Path ?? "";
|
||||
const nextPath = prependPosixPathEntry(currentPath, remoteLocalBinDir);
|
||||
const nextPath = prependPosixPathEntries(currentPath, sandboxPathEntries);
|
||||
const env = nextPath === currentPath ? input.env : { ...input.env, PATH: nextPath };
|
||||
const addedPathEntry = nextPath === currentPath ? null : sandboxPathEntries[0];
|
||||
|
||||
if (!runtimeInfo.preferredCommandPath) {
|
||||
return {
|
||||
command: input.command,
|
||||
env,
|
||||
remoteSystemHomeDir,
|
||||
addedPathEntry: nextPath === currentPath ? null : remoteLocalBinDir,
|
||||
addedPathEntry,
|
||||
preferredCommandPath: null,
|
||||
};
|
||||
}
|
||||
@@ -154,7 +232,7 @@ export async function prepareCursorSandboxCommand(input: {
|
||||
command: runtimeInfo.preferredCommandPath,
|
||||
env,
|
||||
remoteSystemHomeDir,
|
||||
addedPathEntry: nextPath === currentPath ? null : remoteLocalBinDir,
|
||||
addedPathEntry,
|
||||
preferredCommandPath: runtimeInfo.preferredCommandPath,
|
||||
};
|
||||
}
|
||||
|
||||
132
packages/adapters/cursor-local/src/server/test.test.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { runChildProcess } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { SANDBOX_INSTALL_COMMAND } from "../index.js";
|
||||
import { testEnvironment } from "./test.js";
|
||||
|
||||
function buildFakeAgentScript(): string {
|
||||
return `#!/bin/sh
|
||||
if [ "$1" = "--version" ]; then
|
||||
printf '%s\\n' 'Cursor Agent 1.2.3'
|
||||
exit 0
|
||||
fi
|
||||
printf '%s\\n' '{"type":"system","subtype":"init","session_id":"cursor-session-envtest-1","model":"auto"}'
|
||||
printf '%s\\n' '{"type":"assistant","message":{"content":[{"type":"output_text","text":"hello"}]}}'
|
||||
printf '%s\\n' '{"type":"result","subtype":"success","session_id":"cursor-session-envtest-1","result":"ok"}'
|
||||
`;
|
||||
}
|
||||
|
||||
function buildInstallSimulationCommand(commandPath: string): string {
|
||||
return [
|
||||
`mkdir -p ${JSON.stringify(path.dirname(commandPath))}`,
|
||||
`cat > ${JSON.stringify(commandPath)} <<'EOF'`,
|
||||
buildFakeAgentScript(),
|
||||
"EOF",
|
||||
`chmod +x ${JSON.stringify(commandPath)}`,
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
function createSandboxRunner(options: { homeDir: string; installCommandPath: string }) {
|
||||
let counter = 0;
|
||||
const installCommands: string[] = [];
|
||||
const systemPath = "/usr/bin:/bin";
|
||||
return {
|
||||
installCommands,
|
||||
execute: async (input: {
|
||||
command: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
stdin?: string;
|
||||
timeoutMs?: number;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}) => {
|
||||
counter += 1;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (args[1] === SANDBOX_INSTALL_COMMAND) {
|
||||
installCommands.push(args[1]);
|
||||
args[1] = buildInstallSimulationCommand(options.installCommandPath);
|
||||
}
|
||||
return await runChildProcess(`cursor-envtest-runner-${counter}`, input.command, args, {
|
||||
cwd: input.cwd ?? process.cwd(),
|
||||
env: {
|
||||
...(input.env ?? {}),
|
||||
HOME: input.env?.HOME ?? options.homeDir,
|
||||
PATH: input.env?.PATH ?? systemPath,
|
||||
},
|
||||
stdin: input.stdin,
|
||||
timeoutSec: Math.max(1, Math.ceil((input.timeoutMs ?? 30_000) / 1000)),
|
||||
graceSec: 5,
|
||||
onLog: input.onLog ?? (async () => {}),
|
||||
onSpawn: input.onSpawn
|
||||
? async (meta) => input.onSpawn?.({ pid: meta.pid, startedAt: meta.startedAt })
|
||||
: undefined,
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe("cursor testEnvironment", () => {
|
||||
it("re-resolves the installed agent under ~/.cursor/bin and verifies --version before the hello probe", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-envtest-"));
|
||||
const homeDir = path.join(root, "home");
|
||||
const workspace = path.join(root, "workspace");
|
||||
const remoteWorkspace = path.join(root, "remote-workspace");
|
||||
const agentPath = path.join(homeDir, ".cursor", "bin", "agent");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
await fs.mkdir(remoteWorkspace, { recursive: true });
|
||||
|
||||
const runner = createSandboxRunner({
|
||||
homeDir,
|
||||
installCommandPath: agentPath,
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await testEnvironment({
|
||||
companyId: "company-1",
|
||||
adapterType: "cursor",
|
||||
config: {
|
||||
command: "agent",
|
||||
cwd: workspace,
|
||||
env: {
|
||||
PATH: "/usr/bin:/bin",
|
||||
},
|
||||
},
|
||||
executionTarget: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
shellCommand: "bash",
|
||||
remoteCwd: remoteWorkspace,
|
||||
runner,
|
||||
timeoutMs: 30_000,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.status).toBe("pass");
|
||||
expect(runner.installCommands).toEqual([SANDBOX_INSTALL_COMMAND]);
|
||||
expect(result.checks).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
code: "cursor_command_resolvable",
|
||||
level: "info",
|
||||
message: `Command is executable: ${agentPath}`,
|
||||
}),
|
||||
expect.objectContaining({
|
||||
code: "cursor_version_probe_passed",
|
||||
level: "info",
|
||||
detail: "Cursor Agent 1.2.3",
|
||||
}),
|
||||
expect.objectContaining({
|
||||
code: "cursor_hello_probe_passed",
|
||||
level: "info",
|
||||
}),
|
||||
]),
|
||||
);
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -148,7 +148,6 @@ export async function testEnvironment(
|
||||
});
|
||||
command = sandboxCommand.command;
|
||||
env = sandboxCommand.env;
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
const installCheck = await maybeRunSandboxInstallCommand({
|
||||
runId,
|
||||
target,
|
||||
@@ -158,6 +157,19 @@ export async function testEnvironment(
|
||||
env,
|
||||
});
|
||||
if (installCheck) checks.push(installCheck);
|
||||
const finalSandboxCommand = await prepareCursorSandboxCommand({
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
remoteSystemHomeDirHint: sandboxCommand.remoteSystemHomeDir,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
});
|
||||
command = finalSandboxCommand.command;
|
||||
env = finalSandboxCommand.env;
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, target, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
@@ -218,6 +230,58 @@ export async function testEnvironment(
|
||||
hint: "Use `agent` or `cursor-agent` to run the automatic installation and auth probe.",
|
||||
});
|
||||
} else {
|
||||
const versionProbe = await runAdapterExecutionTargetProcess(
|
||||
runId,
|
||||
target,
|
||||
command,
|
||||
["--version"],
|
||||
{
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
onLog: async () => {},
|
||||
},
|
||||
);
|
||||
const versionDetail = summarizeProbeDetail(versionProbe.stdout, versionProbe.stderr, null);
|
||||
if (versionProbe.timedOut) {
|
||||
checks.push({
|
||||
code: "cursor_version_probe_timed_out",
|
||||
level: "error",
|
||||
message: "Cursor version probe timed out.",
|
||||
hint: "Run `agent --version` manually in this working directory to confirm the installed CLI is reachable non-interactively.",
|
||||
});
|
||||
} else if ((versionProbe.exitCode ?? 1) === 0) {
|
||||
checks.push({
|
||||
code: "cursor_version_probe_passed",
|
||||
level: "info",
|
||||
message: "Cursor version probe succeeded.",
|
||||
...(versionDetail ? { detail: versionDetail } : {}),
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "cursor_version_probe_failed",
|
||||
level: "error",
|
||||
message: "Cursor version probe failed.",
|
||||
...(versionDetail ? { detail: versionDetail } : {}),
|
||||
hint: "Run `agent --version` manually in this working directory to confirm the installed CLI is reachable non-interactively.",
|
||||
});
|
||||
}
|
||||
|
||||
const canRunHelloProbe = checks.every(
|
||||
(check) =>
|
||||
check.code !== "cursor_version_probe_failed" &&
|
||||
check.code !== "cursor_version_probe_timed_out",
|
||||
);
|
||||
if (!canRunHelloProbe) {
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
const model = asString(config.model, DEFAULT_CURSOR_LOCAL_MODEL).trim();
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
buildSandboxNpmInstallCommand,
|
||||
type AdapterModelProfileDefinition,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "gemini_local";
|
||||
export const label = "Gemini CLI (local)";
|
||||
|
||||
export const SANDBOX_INSTALL_COMMAND = "npm install -g @google/gemini-cli";
|
||||
export const SANDBOX_INSTALL_COMMAND = buildSandboxNpmInstallCommand("@google/gemini-cli");
|
||||
|
||||
export const DEFAULT_GEMINI_LOCAL_MODEL = "auto";
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ const {
|
||||
})),
|
||||
ensureCommandResolvable: vi.fn(async () => undefined),
|
||||
resolveCommandForLogs: vi.fn(async () => "ssh://fixture@127.0.0.1:2222/remote/workspace :: gemini"),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => ({ gitBacked: false })),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
runSshCommand: vi.fn(async () => ({
|
||||
stdout: "/home/agent",
|
||||
@@ -105,6 +105,7 @@ describe("gemini remote execution", () => {
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-1/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
@@ -163,19 +164,19 @@ describe("gemini remote execution", () => {
|
||||
|
||||
expect(result.sessionParams).toMatchObject({
|
||||
sessionId: "gemini-session-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
});
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledTimes(1);
|
||||
expect(syncDirectoryToSsh).toHaveBeenCalledWith(expect.objectContaining({
|
||||
remoteDir: "/remote/workspace/.paperclip-runtime/gemini/skills",
|
||||
remoteDir: `${managedRemoteWorkspace}/.paperclip-runtime/gemini/skills`,
|
||||
followSymlinks: true,
|
||||
}));
|
||||
expect(runSshCommand).toHaveBeenCalledWith(
|
||||
@@ -186,11 +187,11 @@ describe("gemini remote execution", () => {
|
||||
const call = runChildProcess.mock.calls[0] as unknown as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe(managedRemoteWorkspace);
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
@@ -202,7 +203,8 @@ describe("gemini remote execution", () => {
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.GEMINI_CLI_TRUST_WORKSPACE).toBe("true");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe(managedRemoteWorkspace);
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
@@ -211,6 +213,7 @@ describe("gemini remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-gemini-remote-resume-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const managedRemoteWorkspace = "/remote/workspace/.paperclip-runtime/runs/run-ssh-resume/workspace";
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
|
||||
await execute({
|
||||
@@ -226,13 +229,13 @@ describe("gemini remote execution", () => {
|
||||
sessionId: "session-123",
|
||||
sessionParams: {
|
||||
sessionId: "session-123",
|
||||
cwd: "/remote/workspace",
|
||||
cwd: managedRemoteWorkspace,
|
||||
remoteExecution: {
|
||||
transport: "ssh",
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
remoteCwd: managedRemoteWorkspace,
|
||||
},
|
||||
},
|
||||
sessionDisplayId: "session-123",
|
||||
|
||||
@@ -7,6 +7,7 @@ import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclip
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
overrideAdapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
@@ -17,6 +18,7 @@ import {
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
readAdapterExecutionTargetHomeDir,
|
||||
resolveAdapterExecutionTargetTimeoutSec,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
@@ -27,13 +29,13 @@ import {
|
||||
asNumber,
|
||||
asString,
|
||||
asStringArray,
|
||||
applyPaperclipWorkspaceEnv,
|
||||
buildPaperclipEnv,
|
||||
buildInvocationEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
ensurePaperclipSkillSymlink,
|
||||
joinPromptSections,
|
||||
ensurePathInEnv,
|
||||
refreshPaperclipWorkspaceEnvForExecution,
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
readPaperclipIssueWorkModeFromContext,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
@@ -41,7 +43,6 @@ import {
|
||||
parseObject,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
runChildProcess,
|
||||
@@ -202,13 +203,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
let effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
const geminiSkillEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const desiredGeminiSkillNames = resolvePaperclipDesiredSkillNames(config, geminiSkillEntries);
|
||||
@@ -254,19 +249,21 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
if (executionTargetIsRemote && typeof env.GEMINI_CLI_TRUST_WORKSPACE !== "string") {
|
||||
env.GEMINI_CLI_TRUST_WORKSPACE = "true";
|
||||
}
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
@@ -282,7 +279,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const timeoutSec = resolveAdapterExecutionTargetTimeoutSec(
|
||||
executionTarget,
|
||||
asNumber(config.timeoutSec, 0),
|
||||
);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
@@ -295,7 +295,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, { installCommand: SANDBOX_INSTALL_COMMAND });
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv, {
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
timeoutSec,
|
||||
});
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
runtimeEnv,
|
||||
@@ -322,8 +325,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
`[paperclip] Syncing workspace and Gemini runtime assets to ${describeAdapterExecutionTarget(executionTarget)}.\n`,
|
||||
);
|
||||
const preparedExecutionTargetRuntime = await prepareAdapterExecutionTargetRuntime({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
adapterKey: "gemini",
|
||||
timeoutSec,
|
||||
workspaceLocalDir: cwd,
|
||||
installCommand: SANDBOX_INSTALL_COMMAND,
|
||||
detectCommand: command,
|
||||
@@ -334,6 +339,20 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}],
|
||||
});
|
||||
restoreRemoteWorkspace = () => preparedExecutionTargetRuntime.restoreWorkspace();
|
||||
effectiveExecutionCwd = preparedExecutionTargetRuntime.workspaceRemoteDir ?? effectiveExecutionCwd;
|
||||
refreshPaperclipWorkspaceEnvForExecution({
|
||||
env,
|
||||
envConfig,
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceHints,
|
||||
agentHome,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
remoteRuntimeRootDir = preparedExecutionTargetRuntime.runtimeRootDir;
|
||||
const managedHome = adapterExecutionTargetUsesManagedHome(executionTarget);
|
||||
if (managedHome && preparedExecutionTargetRuntime.runtimeRootDir) {
|
||||
@@ -365,12 +384,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
const runtimeExecutionTarget = overrideAdapterExecutionTargetRemoteCwd(executionTarget, effectiveExecutionCwd);
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
target: runtimeExecutionTarget,
|
||||
runtimeRootDir: remoteRuntimeRootDir,
|
||||
adapterKey: "gemini",
|
||||
timeoutSec,
|
||||
hostApiToken: env.PAPERCLIP_API_KEY,
|
||||
onLog,
|
||||
});
|
||||
@@ -391,7 +412,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const canResumeSession =
|
||||
runtimeSessionId.length > 0 &&
|
||||
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(effectiveExecutionCwd)) &&
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, executionTarget);
|
||||
adapterExecutionTargetSessionMatches(runtimeRemoteExecution, runtimeExecutionTarget);
|
||||
const sessionId = canResumeSession ? runtimeSessionId : null;
|
||||
if (executionTargetIsRemote && runtimeSessionId && !canResumeSession) {
|
||||
await onLog(
|
||||
@@ -426,6 +447,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const commandNotes = (() => {
|
||||
const notes: string[] = ["Prompt is passed to Gemini via --prompt for non-interactive execution."];
|
||||
notes.push("Added --approval-mode yolo for unattended execution.");
|
||||
if (executionTargetIsRemote) {
|
||||
notes.push("Set GEMINI_CLI_TRUST_WORKSPACE=true for remote headless execution.");
|
||||
}
|
||||
if (!instructionsFilePath) return notes;
|
||||
if (instructionsPrefix.length > 0) {
|
||||
notes.push(
|
||||
@@ -512,7 +536,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
}
|
||||
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, executionTarget, command, args, {
|
||||
const proc = await runAdapterExecutionTargetProcess(runId, runtimeExecutionTarget, command, args, {
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec,
|
||||
@@ -586,7 +610,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
...(workspaceRepoRef ? { repoRef: workspaceRepoRef } : {}),
|
||||
...(executionTargetIsRemote
|
||||
? {
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(executionTarget),
|
||||
remoteExecution: adapterExecutionTargetSessionIdentity(runtimeExecutionTarget),
|
||||
}
|
||||
: {}),
|
||||
} as Record<string, unknown>)
|
||||
|
||||