Compare commits
44 Commits
codex/pap-
...
pap-3598/o
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bab5136645 | ||
|
|
09ed4e54cb | ||
|
|
783f4d2f28 | ||
|
|
433326ffcb | ||
|
|
3c73ed26b5 | ||
|
|
d6bee62f02 | ||
|
|
edbb670c3b | ||
|
|
fd10404374 | ||
|
|
47920f9c47 | ||
|
|
e01ffc18d3 | ||
|
|
ae23e02526 | ||
|
|
29401b231b | ||
|
|
a5430f010d | ||
|
|
6c090f84a9 | ||
|
|
90631b09b3 | ||
|
|
2dce81fbf6 | ||
|
|
0e51fa2b0d | ||
|
|
09eceb952a | ||
|
|
d22e790bd4 | ||
|
|
856c6cb192 | ||
|
|
bb7d040894 | ||
|
|
076067865f | ||
|
|
a7b45938b7 | ||
|
|
15eac43b43 | ||
|
|
57229d0f24 | ||
|
|
76f09c8eb6 | ||
|
|
685ee84e4a | ||
|
|
d7719423e9 | ||
|
|
fe401b7fa9 | ||
|
|
2d72292ad6 | ||
|
|
570a4206da | ||
|
|
3cd26a78fc | ||
|
|
e8275318ba | ||
|
|
e273d621fc | ||
|
|
42a299fb9d | ||
|
|
d2dd759caa | ||
|
|
b02e67cea5 | ||
|
|
6a7cca95ef | ||
|
|
4272c1604d | ||
|
|
ad5432fece | ||
|
|
a3de1d764d | ||
|
|
1fe1067361 | ||
|
|
c4269bab59 | ||
|
|
87f19cd9a6 |
2
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ permissions:
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 60
|
||||
concurrency:
|
||||
group: docker-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
97
.github/workflows/pr.yml
vendored
@@ -23,7 +23,9 @@ jobs:
|
||||
- name: Block manual lockfile edits
|
||||
if: github.head_ref != 'chore/refresh-lockfile'
|
||||
run: |
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")"
|
||||
# Diff the PR branch against its merge base so recent base-branch commits
|
||||
# do not masquerade as changes made by the PR itself.
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}")"
|
||||
if printf '%s\n' "$changed" | grep -qx 'pnpm-lock.yaml'; then
|
||||
echo "Do not commit pnpm-lock.yaml in pull requests. CI owns lockfile updates."
|
||||
exit 1
|
||||
@@ -43,9 +45,18 @@ jobs:
|
||||
- name: Validate Dockerfile deps stage
|
||||
run: node ./scripts/check-docker-deps-stage.mjs
|
||||
|
||||
- name: Validate release package manifest
|
||||
run: node ./scripts/release-package-map.mjs check
|
||||
|
||||
- name: Verify release package bootstrap for changed manifests
|
||||
run: |
|
||||
mapfile -t changed_paths < <(git diff --name-only "${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}")
|
||||
PAPERCLIP_RELEASE_BOOTSTRAP_BASE_SHA="${{ github.event.pull_request.base.sha }}" \
|
||||
node ./scripts/check-release-package-bootstrap.mjs "${changed_paths[@]}"
|
||||
|
||||
- name: Validate dependency resolution when manifests change
|
||||
run: |
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")"
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}")"
|
||||
manifest_pattern='(^|/)package\.json$|^pnpm-workspace\.yaml$|^\.npmrc$|^pnpmfile\.(cjs|js|mjs)$'
|
||||
if printf '%s\n' "$changed" | grep -Eq "$manifest_pattern"; then
|
||||
pnpm install --lockfile-only --ignore-scripts --no-frozen-lockfile
|
||||
@@ -74,11 +85,11 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Typecheck
|
||||
run: pnpm -r typecheck
|
||||
- name: Typecheck workspaces whose build scripts skip TypeScript
|
||||
run: pnpm run typecheck:build-gaps
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test:run
|
||||
- name: Run general test suites
|
||||
run: pnpm test:run:general
|
||||
|
||||
- name: Verify release registry test coverage
|
||||
run: pnpm run test:release-registry
|
||||
@@ -86,7 +97,76 @@ jobs:
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
|
||||
- name: Release canary dry run
|
||||
verify_serialized_server:
|
||||
name: Verify serialized server suites (${{ matrix.shard_label }})
|
||||
needs: [policy]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- shard_index: 0
|
||||
shard_count: 4
|
||||
shard_label: 1/4
|
||||
- shard_index: 1
|
||||
shard_count: 4
|
||||
shard_label: 2/4
|
||||
- shard_index: 2
|
||||
shard_count: 4
|
||||
shard_label: 3/4
|
||||
- shard_index: 3
|
||||
shard_count: 4
|
||||
shard_label: 4/4
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run serialized server test shard
|
||||
run: pnpm test:run:serialized -- --shard-index ${{ matrix.shard_index }} --shard-count ${{ matrix.shard_count }}
|
||||
|
||||
canary_dry_run:
|
||||
name: Canary Dry Run
|
||||
needs: [policy]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# `release.sh` always executes its Step 2/7 workspace build, even when
|
||||
# `--skip-verify` bypasses the initial verification gate.
|
||||
- name: Release canary dry run via release.sh internal build
|
||||
run: |
|
||||
git checkout -B master HEAD
|
||||
git checkout -- pnpm-lock.yaml
|
||||
@@ -115,9 +195,6 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps chromium
|
||||
|
||||
|
||||
12
.github/workflows/release.yml
vendored
@@ -50,6 +50,9 @@ jobs:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Validate release package manifest
|
||||
run: node ./scripts/release-package-map.mjs check
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
@@ -89,6 +92,9 @@ jobs:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Validate release package manifest
|
||||
run: node ./scripts/release-package-map.mjs check
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
@@ -139,6 +145,9 @@ jobs:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Validate release package manifest
|
||||
run: node ./scripts/release-package-map.mjs check
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
@@ -177,6 +186,9 @@ jobs:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Validate release package manifest
|
||||
run: node ./scripts/release-package-map.mjs check
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ COPY packages/shared/package.json packages/shared/
|
||||
COPY packages/db/package.json packages/db/
|
||||
COPY packages/adapter-utils/package.json packages/adapter-utils/
|
||||
COPY packages/mcp-server/package.json packages/mcp-server/
|
||||
COPY packages/adapters/acpx-local/package.json packages/adapters/acpx-local/
|
||||
COPY packages/adapters/claude-local/package.json packages/adapters/claude-local/
|
||||
COPY packages/adapters/codex-local/package.json packages/adapters/codex-local/
|
||||
COPY packages/adapters/cursor-local/package.json packages/adapters/cursor-local/
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@clack/prompts": "^0.10.0",
|
||||
"@paperclipai/adapter-acpx-local": "workspace:*",
|
||||
"@paperclipai/adapter-claude-local": "workspace:*",
|
||||
"@paperclipai/adapter-codex-local": "workspace:*",
|
||||
"@paperclipai/adapter-cursor-local": "workspace:*",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import type { CLIAdapterModule } from "@paperclipai/adapter-utils";
|
||||
import { printAcpxStreamEvent } from "@paperclipai/adapter-acpx-local/cli";
|
||||
import { printClaudeStreamEvent } from "@paperclipai/adapter-claude-local/cli";
|
||||
import { printCodexStreamEvent } from "@paperclipai/adapter-codex-local/cli";
|
||||
import { printCursorStreamEvent } from "@paperclipai/adapter-cursor-local/cli";
|
||||
@@ -14,6 +15,11 @@ const claudeLocalCLIAdapter: CLIAdapterModule = {
|
||||
formatStdoutEvent: printClaudeStreamEvent,
|
||||
};
|
||||
|
||||
const acpxLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "acpx_local",
|
||||
formatStdoutEvent: printAcpxStreamEvent,
|
||||
};
|
||||
|
||||
const codexLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "codex_local",
|
||||
formatStdoutEvent: printCodexStreamEvent,
|
||||
@@ -46,6 +52,7 @@ const openclawGatewayCLIAdapter: CLIAdapterModule = {
|
||||
|
||||
const adaptersByType = new Map<string, CLIAdapterModule>(
|
||||
[
|
||||
acpxLocalCLIAdapter,
|
||||
claudeLocalCLIAdapter,
|
||||
codexLocalCLIAdapter,
|
||||
openCodeLocalCLIAdapter,
|
||||
|
||||
@@ -176,6 +176,58 @@ That means:
|
||||
|
||||
See [doc/RELEASE-AUTOMATION-SETUP.md](RELEASE-AUTOMATION-SETUP.md) for the GitHub/npm setup steps.
|
||||
|
||||
## Release enrollment for new public packages
|
||||
|
||||
Paperclip does not auto-publish every non-private workspace package anymore.
|
||||
CI publishing is controlled by [`scripts/release-package-manifest.json`](../scripts/release-package-manifest.json).
|
||||
|
||||
When you add a new public package:
|
||||
|
||||
1. add it to the manifest and decide whether CI should publish it immediately
|
||||
2. if CI should publish it, bootstrap the package on npm before merge
|
||||
3. if CI should not publish it yet, keep `"publishFromCi": false`
|
||||
4. only enable `"publishFromCi": true` after npm trusted publishing is configured for that package
|
||||
|
||||
PR CI now checks changed release-enabled package manifests against npm. That catches a missing first-publish bootstrap before the change reaches `master`.
|
||||
|
||||
### One-time bootstrap sequence for a new package
|
||||
|
||||
The first publish of a brand-new package still needs one human maintainer with npm write access.
|
||||
After that, trusted publishing can take over.
|
||||
|
||||
Example for `@paperclipai/adapter-acpx-local` from the repo root:
|
||||
|
||||
```bash
|
||||
# safe preview
|
||||
pnpm run release:bootstrap-package -- @paperclipai/adapter-acpx-local
|
||||
|
||||
# one-time first publish from an authenticated maintainer machine
|
||||
pnpm run release:bootstrap-package -- @paperclipai/adapter-acpx-local --publish --otp 123456
|
||||
```
|
||||
|
||||
The helper script:
|
||||
|
||||
- checks that the package does not already exist on npm
|
||||
- builds the target package unless `--skip-build` is passed
|
||||
- runs `npm pack --dry-run` in the package directory
|
||||
- only runs the real `npm publish --access public` when `--publish --otp <code>` is provided
|
||||
|
||||
For the real `--publish` step, the maintainer machine must already be authenticated to npm.
|
||||
If `npm whoami` returns `401`, first run `npm logout --registry=https://registry.npmjs.org/` to clear any stale local auth, then run `npm login` or `npm adduser` locally as an npm org member, and finally rerun the helper.
|
||||
That local human auth is fine for the one-time bootstrap publish; we just do not want the same auth model inside CI.
|
||||
The helper now requires `--otp <code>` up front for `--publish`, so it fails before the real publish attempt if the one-time password is missing.
|
||||
|
||||
After that first publish succeeds:
|
||||
|
||||
1. open `https://www.npmjs.com/package/@paperclipai/adapter-acpx-local`
|
||||
2. go to `Settings` → `Trusted publishing`
|
||||
3. add repository `paperclipai/paperclip`
|
||||
4. set workflow filename to `release.yml`
|
||||
5. optionally go to `Settings` → `Publishing access` and enable `Require two-factor authentication and disallow tokens`
|
||||
6. keep `publishFromCi: true` in [`scripts/release-package-manifest.json`](../scripts/release-package-manifest.json)
|
||||
|
||||
Once those steps are done, future canary and stable publishes for that package are automated through GitHub OIDC. The manual step is only the first package creation on npm.
|
||||
|
||||
## Rollback model
|
||||
|
||||
Rollback does not unpublish anything.
|
||||
|
||||
@@ -67,6 +67,27 @@ Why:
|
||||
- the single `release.yml` workflow handles both canary and stable publishing
|
||||
- GitHub environments `npm-canary` and `npm-stable` still enforce different approval rules on the GitHub side
|
||||
|
||||
### 2.2.1. Newly added public packages need a bootstrap phase
|
||||
|
||||
Trusted publishing is configured on the npm package itself, not at the repo scope.
|
||||
That means a brand-new public package must not be auto-enrolled into CI publishing until its npm package exists and its trusted publisher has been configured.
|
||||
|
||||
Repo policy:
|
||||
|
||||
1. add every non-private package to [`scripts/release-package-manifest.json`](../scripts/release-package-manifest.json)
|
||||
2. set `"publishFromCi": true` only when CI is expected to publish that package
|
||||
3. if the package is not ready for CI publishing yet, keep `"publishFromCi": false`
|
||||
4. complete the package bootstrap before merging any PR that changes a release-enabled new package
|
||||
|
||||
Bootstrap sequence for a new package:
|
||||
|
||||
1. publish the package once from a trusted maintainer machine using normal npm auth
|
||||
2. open that package on npm and add the `paperclipai/paperclip` trusted publisher for `.github/workflows/release.yml`
|
||||
3. rerun or dry-run the release flow as needed to confirm CI publishing now works
|
||||
4. only then enable `"publishFromCi": true`
|
||||
|
||||
PR CI enforces this by checking changed release-enabled package manifests against npm. That keeps `master` canary publishing healthy while preserving the no-long-lived-token model for normal CI releases.
|
||||
|
||||
### 2.3. Verify trusted publishing before removing old auth
|
||||
|
||||
After the workflows are live:
|
||||
|
||||
@@ -150,7 +150,7 @@ Invariant: every business record belongs to exactly one company.
|
||||
- `capabilities` text null
|
||||
- `adapter_type` text; built-ins include `process`, `http`, `claude_local`, `codex_local`, `gemini_local`, `opencode_local`, `pi_local`, `cursor`, and `openclaw_gateway`
|
||||
- `adapter_config` jsonb not null
|
||||
- `runtime_config` jsonb not null default `{}`
|
||||
- `runtime_config` jsonb not null default `{}`; may include Paperclip runtime policy such as `modelProfiles.cheap.adapterConfig` for an optional low-cost model lane that does not change the primary adapter config
|
||||
- `default_environment_id` uuid fk `environments.id` null
|
||||
- `context_mode` enum: `thin | fat` default `thin`
|
||||
- `budget_monthly_cents` int not null default 0
|
||||
@@ -676,7 +676,7 @@ Per-agent schedule fields in `adapter_config`:
|
||||
|
||||
- `enabled` boolean
|
||||
- `intervalSec` integer (minimum 30)
|
||||
- `maxConcurrentRuns` integer; new agents default to `5`
|
||||
- `maxConcurrentRuns` integer; new agents default to `20`; scheduler clamps configured values to `1..50`
|
||||
|
||||
Scheduler must skip invocation when:
|
||||
|
||||
|
||||
@@ -67,13 +67,15 @@ This is the right state for:
|
||||
|
||||
- waiting on another issue
|
||||
- waiting on a human decision
|
||||
- waiting on an external dependency or system
|
||||
- waiting on an external dependency or system when Paperclip does not own a scheduled re-check
|
||||
- work that automatic recovery could not safely continue
|
||||
|
||||
### `in_review`
|
||||
|
||||
Execution work is paused because the next move belongs to a reviewer or approver, not the current executor.
|
||||
|
||||
An external review service can also be a valid review path when the issue keeps an agent assignee and has an active one-shot monitor that will wake that assignee to check the service later.
|
||||
|
||||
### `done`
|
||||
|
||||
The work is complete and terminal.
|
||||
@@ -164,6 +166,7 @@ The valid action-path primitives are:
|
||||
- a queued wake or continuation that can be delivered to the responsible agent
|
||||
- a typed execution-policy participant, such as `executionState.currentParticipant`
|
||||
- a pending issue-thread interaction or linked approval that is waiting for a specific responder
|
||||
- a one-shot issue monitor (`executionPolicy.monitor.nextCheckAt`) that will wake the assignee for a future check
|
||||
- a human owner via `assigneeUserId`
|
||||
- a first-class blocker chain whose unresolved leaf issues are themselves healthy
|
||||
- an open explicit recovery issue that names the owner and action needed to restore liveness
|
||||
@@ -188,6 +191,7 @@ A healthy active-work state means at least one of these is true:
|
||||
|
||||
- there is an active run for the issue
|
||||
- there is already a queued continuation wake
|
||||
- there is an active one-shot monitor that will wake the assignee for a future check
|
||||
- there is an open explicit recovery issue for the lost execution path
|
||||
|
||||
An agent-owned `in_progress` issue is stalled when it has no active run, no queued continuation, and no explicit recovery surface. A still-running but silent process is not automatically stalled; it is handled by the active-run watchdog contract.
|
||||
@@ -202,11 +206,34 @@ A healthy `in_review` issue has at least one valid action path:
|
||||
- a pending issue-thread interaction or linked approval waiting for a named responder
|
||||
- a human owner via `assigneeUserId`
|
||||
- an active run or queued wake that is expected to process the review state
|
||||
- an active one-shot monitor for an external service or async review loop that the assignee owns
|
||||
- an open explicit recovery issue for an ambiguous review handoff
|
||||
|
||||
Agent-assigned `in_review` with no typed participant is only healthy when one of the other paths exists. Assignment to the same agent that produced the handoff is not, by itself, a review path.
|
||||
|
||||
An `in_review` issue is stalled when it has no typed participant, no pending interaction or approval, no user owner, no active run, no queued wake, and no explicit recovery issue. Paperclip should surface that state as recovery work rather than silently completing the issue or leaving blocker chains parked indefinitely.
|
||||
An `in_review` issue is stalled when it has no typed participant, no pending interaction or approval, no user owner, no active monitor, no active run, no queued wake, and no explicit recovery issue. Paperclip should surface that state as recovery work rather than silently completing the issue or leaving blocker chains parked indefinitely.
|
||||
|
||||
### Issue monitors
|
||||
|
||||
An issue monitor is a one-shot deferred action path for agent-owned issues in `in_progress` or `in_review`.
|
||||
|
||||
Use a monitor when the current assignee owns a future check against an async system or external service. Examples include Greptile review loops, GitHub checks, Vercel deployments, or provider jobs where the agent should come back later and decide what happens next.
|
||||
|
||||
Monitor policy lives under `executionPolicy.monitor` and includes:
|
||||
|
||||
- `nextCheckAt`: when Paperclip should wake the assignee
|
||||
- `notes`: non-secret instructions for what the assignee should check
|
||||
- `serviceName`: optional non-secret external-service context
|
||||
- `externalRef`: optional external-service reference input; Paperclip treats it as secret-adjacent, redacts it before persistence/visibility, and omits it from activity and wake payloads
|
||||
- `timeoutAt`, `maxAttempts`, and `recoveryPolicy`: optional recovery hints for bounded waits
|
||||
|
||||
Monitors are not recurring intervals. When a monitor fires, Paperclip clears the scheduled monitor and queues an `issue_monitor_due` wake for the assignee. If the external service is still pending, the assignee must explicitly re-arm the monitor with a new `nextCheckAt`. If the issue moves to `done`, `cancelled`, an invalid status, or a human/unassigned owner, the monitor is cleared.
|
||||
|
||||
Because `serviceName` and `notes` remain visible in issue activity and wake context, operators should keep them short and non-secret. Put enough context for the assignee to know what to inspect, but do not include signed URLs, bearer tokens, customer secrets, tenant-private identifiers, or provider links with embedded credentials.
|
||||
|
||||
Monitor bounds are enforced. Paperclip rejects attempts to re-arm a monitor whose `timeoutAt` or `maxAttempts` is already exhausted. When a scheduled monitor reaches an exhausted bound at trigger time, Paperclip clears it and follows `recoveryPolicy`: `wake_owner` queues a bounded recovery wake for the assignee, `create_recovery_issue` opens visible recovery work, and `escalate_to_board` records a board-visible escalation comment/activity.
|
||||
|
||||
Use `blocked` instead of a monitor when no Paperclip assignee owns a responsible polling path. In that case, name the external owner/action or create first-class recovery/blocker work.
|
||||
|
||||
### `blocked`
|
||||
|
||||
|
||||
@@ -13,7 +13,9 @@ It is intentionally narrower than [PLUGIN_SPEC.md](./PLUGIN_SPEC.md). The spec i
|
||||
- Plugin database migrations are restricted to a host-derived plugin namespace.
|
||||
- Plugin-owned JSON API routes must be declared in the manifest and are mounted
|
||||
only under `/api/plugins/:pluginId/api/*`.
|
||||
- There is no host-provided shared React component kit for plugins yet.
|
||||
- The host provides a small shared React component kit through
|
||||
`@paperclipai/plugin-sdk/ui`; use it for common Paperclip controls before
|
||||
building custom versions.
|
||||
- `ctx.assets` is not supported in the current runtime.
|
||||
|
||||
## Scaffold a plugin
|
||||
@@ -168,6 +170,187 @@ Mount surfaces currently wired in the host include:
|
||||
- `commentAnnotation`
|
||||
- `commentContextMenuItem`
|
||||
|
||||
## Shared host components
|
||||
|
||||
Use shared components from `@paperclipai/plugin-sdk/ui` when the plugin needs a
|
||||
Paperclip-native control. The host owns the implementation, so plugins inherit
|
||||
the board's current styling, ordering, recent selections, and dark-mode behavior
|
||||
without importing `ui/src` internals.
|
||||
|
||||
Currently exposed components include:
|
||||
|
||||
- `MarkdownBlock` and `MarkdownEditor` for rendered and editable markdown.
|
||||
- `FileTree` for serializable file and directory trees.
|
||||
- `IssuesList` for a native company-scoped issue table.
|
||||
- `AssigneePicker` for the same agent/user selector used in the new issue pane.
|
||||
Use the controlled `value` format `agent:<id>`, `user:<id>`, or `""`.
|
||||
- `ProjectPicker` for the same project selector used in the new issue pane.
|
||||
Use the controlled project id value, or `""` for no project.
|
||||
- `ManagedRoutinesList` for plugin-owned routine settings pages.
|
||||
|
||||
```tsx
|
||||
import { AssigneePicker, ProjectPicker } from "@paperclipai/plugin-sdk/ui";
|
||||
|
||||
export function PluginAssignmentControls({ companyId }: { companyId: string }) {
|
||||
const [assignee, setAssignee] = useState("");
|
||||
const [projectId, setProjectId] = useState("");
|
||||
|
||||
return (
|
||||
<>
|
||||
<AssigneePicker
|
||||
companyId={companyId}
|
||||
value={assignee}
|
||||
onChange={(value) => setAssignee(value)}
|
||||
/>
|
||||
<ProjectPicker
|
||||
companyId={companyId}
|
||||
value={projectId}
|
||||
onChange={setProjectId}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## File and path UI
|
||||
|
||||
Plugin UI often needs to render a file tree, accept a folder path, or browse a
|
||||
project workspace. There are three different surfaces for that, and they map to
|
||||
different trust and data-flow boundaries. Pick the surface that matches the
|
||||
data the plugin actually has.
|
||||
|
||||
### When to use the shared `FileTree`
|
||||
|
||||
Use `FileTree` from `@paperclipai/plugin-sdk/ui` whenever the plugin only needs
|
||||
to render a serializable file/directory list and react to selection or
|
||||
expand/collapse. The host owns the implementation, so plugin UI inherits the
|
||||
board's icons, indent, focus ring, and dark-mode styling without importing host
|
||||
internals.
|
||||
|
||||
```tsx
|
||||
import {
|
||||
FileTree,
|
||||
type FileTreeNode,
|
||||
} from "@paperclipai/plugin-sdk/ui";
|
||||
|
||||
const nodes: FileTreeNode[] = [
|
||||
{ name: "AGENTS.md", path: "AGENTS.md", kind: "file", children: [] },
|
||||
{
|
||||
name: "wiki",
|
||||
path: "wiki",
|
||||
kind: "dir",
|
||||
children: [
|
||||
{ name: "index.md", path: "wiki/index.md", kind: "file", children: [] },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export function WikiTree() {
|
||||
const [expanded, setExpanded] = useState<Set<string>>(() => new Set(["wiki"]));
|
||||
const [selected, setSelected] = useState<string | null>(null);
|
||||
|
||||
return (
|
||||
<FileTree
|
||||
nodes={nodes}
|
||||
selectedFile={selected}
|
||||
expandedPaths={expanded}
|
||||
onSelectFile={(path) => setSelected(path)}
|
||||
onToggleDir={(path) =>
|
||||
setExpanded((current) => {
|
||||
const next = new Set(current);
|
||||
next.has(path) ? next.delete(path) : next.add(path);
|
||||
return next;
|
||||
})
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Good fits:
|
||||
|
||||
- LLM Wiki page navigation in `packages/plugins/plugin-llm-wiki` builds a
|
||||
`FileTreeNode[]` from worker query results and renders it through `FileTree`.
|
||||
- The example `plugin-file-browser-example` lazily fetches a directory's
|
||||
children through a `loadFileList` action when `onToggleDir` fires, then
|
||||
merges the children into the local tree state — letting the shared component
|
||||
handle rendering and selection.
|
||||
|
||||
Boundary rules:
|
||||
|
||||
- Keep the prop surface serializable (`nodes`, `expandedPaths`, `checkedPaths`,
|
||||
`fileBadges`, `fileTones`). Do not pass arbitrary render functions across the
|
||||
plugin/host boundary in v1; the supported escape hatches are
|
||||
`fileBadges` (status pill keyed by path) and `fileTones` (row tone keyed by
|
||||
path).
|
||||
- Do not import the host's `FileTree.tsx` or any `ui/src/*` module. The SDK
|
||||
declaration is the only supported import path for plugin UI.
|
||||
- The shared `FileTree` is for rendering and selection. Plugin-specific editors,
|
||||
ingest flows, query forms, and lint runs stay inside the plugin and do not
|
||||
belong as `FileTree` props.
|
||||
|
||||
### When to declare `localFolders`
|
||||
|
||||
When the plugin needs operator-configured filesystem roots — typically for
|
||||
trusted local plugins like wiki tooling — declare `localFolders[]` on the
|
||||
manifest and add the `local.folders` capability. The host renders a settings
|
||||
surface for the operator to set the absolute path, validates the path
|
||||
server-side (containment, symlinks, required files/directories), and exposes
|
||||
`ctx.localFolders.readText()` and `ctx.localFolders.writeTextAtomic()` in the
|
||||
worker.
|
||||
|
||||
```ts
|
||||
export const manifest = {
|
||||
capabilities: ["local.folders"],
|
||||
localFolders: [
|
||||
{
|
||||
folderKey: "content-root",
|
||||
displayName: "Content root",
|
||||
access: "readWrite",
|
||||
requiredDirectories: ["sources", "pages"],
|
||||
requiredFiles: ["schema.md"],
|
||||
},
|
||||
],
|
||||
};
|
||||
```
|
||||
|
||||
Use this when:
|
||||
|
||||
- The data lives outside any project workspace.
|
||||
- Reads and writes need company-scoped configuration.
|
||||
- The operator picks the path once in plugin settings and the worker resolves
|
||||
files relative to that root.
|
||||
|
||||
Do not use `localFolders` to grant the UI direct browser-side access to the
|
||||
filesystem — there is no such capability. The browser still goes through the
|
||||
worker via `getData` / `performAction`, and the worker only exposes paths it
|
||||
chose to expose.
|
||||
|
||||
### When to keep worker-mediated project workspace browsing
|
||||
|
||||
When the data lives inside an existing project workspace, keep the browsing
|
||||
flow worker-mediated:
|
||||
|
||||
- The worker uses `ctx.projects.listWorkspaces()` to resolve the workspace
|
||||
path, then reads its filesystem with normal Node APIs.
|
||||
- The plugin UI calls a `getData` handler for the root listing and an action
|
||||
for lazy children, then renders them through `FileTree`.
|
||||
- The worker is the only side that touches the disk. The browser receives a
|
||||
serializable tree and never sees raw absolute paths it can replay.
|
||||
|
||||
The example `plugin-file-browser-example` is the reference for this pattern:
|
||||
the worker registers `fileList` (data) and `loadFileList` (action) over the
|
||||
same handler, and the UI uses the action for on-toggle directory loading so the
|
||||
shared `FileTree` stays the rendering surface.
|
||||
|
||||
### Mixing surfaces
|
||||
|
||||
A single plugin can use more than one of these. The LLM Wiki uses
|
||||
`localFolders` for its content root, then renders the resulting page list
|
||||
through `FileTree`. The file browser example uses `ctx.projects.listWorkspaces`
|
||||
to pick a workspace and renders its on-disk tree through `FileTree` with lazy
|
||||
loading. Pick the boundary per data source, not per plugin.
|
||||
|
||||
## Company routes
|
||||
|
||||
Plugins may declare a `page` slot with `routePath` to own a company route like:
|
||||
|
||||
@@ -27,7 +27,7 @@ Current limitations to keep in mind:
|
||||
- Published npm packages are the intended install artifact for deployed plugins.
|
||||
- The repo example plugins under `packages/plugins/examples/` are development conveniences. They work from a source checkout and should not be assumed to exist in a generic published build unless they are explicitly shipped with that build.
|
||||
- Dynamic plugin install is not yet cloud-ready for horizontally scaled or ephemeral deployments. There is no shared artifact store, install coordination, or cross-node distribution layer yet.
|
||||
- The current runtime does not yet ship a real host-provided plugin UI component kit, and it does not support plugin asset uploads/reads. Treat those as future-scope ideas in this spec, not current implementation promises.
|
||||
- The current runtime ships a small host-provided plugin UI component kit through `@paperclipai/plugin-sdk/ui`, but does not support plugin asset uploads/reads yet. Treat plugin asset APIs as future-scope ideas, not current implementation promises.
|
||||
- Scoped plugin API routes are JSON-only and must be declared in `apiRoutes`.
|
||||
They mount under `/api/plugins/:pluginId/api/*`; plugins cannot shadow core
|
||||
API routes.
|
||||
@@ -976,13 +976,23 @@ export function DashboardWidget({ context }: PluginWidgetProps) {
|
||||
|
||||
The SDK includes a `ui` subpath export that plugin frontends import. This subpath provides:
|
||||
|
||||
- **Bridge hooks**: `usePluginData(key, params)`, `usePluginAction(key)`, `useHostContext()`
|
||||
- **Bridge hooks**: `usePluginData(key, params)`, `usePluginAction(key)`, `useHostContext()`, `useHostNavigation()`
|
||||
- **Design tokens**: colors, spacing, typography, shadows matching the host theme
|
||||
- **Shared components**: `MetricCard`, `StatusBadge`, `DataTable`, `LogView`, `ActionBar`, `Spinner`, etc.
|
||||
- **Type definitions**: `PluginPageProps`, `PluginWidgetProps`, `PluginDetailTabProps`
|
||||
|
||||
Plugins are encouraged but not required to use the shared components. A plugin may render entirely custom UI as long as it communicates through the bridge.
|
||||
|
||||
`useHostNavigation()` is the supported way for plugin UI to navigate to
|
||||
Paperclip-internal pages. It exposes `resolveHref(to)`, `navigate(to,
|
||||
options?)`, and `linkProps(to, options?)`. Plugin links should prefer
|
||||
`linkProps()` so anchors keep real `href` values for copy-link, modifier-click,
|
||||
middle-click, and open-in-new-tab behavior while plain left-clicks route through
|
||||
the host SPA router. The host resolves company-scoped paths against the active
|
||||
company prefix without double-prefixing already-prefixed paths. Plugin UI should
|
||||
not use raw same-origin `href`s or `window.location.assign()` for internal
|
||||
Paperclip navigation because those can force a full document reload.
|
||||
|
||||
### 19.0.2 Bundle Isolation
|
||||
|
||||
Plugin UI bundles are loaded as standard ES modules, not iframed. This gives plugins full rendering performance and access to the host's design tokens.
|
||||
@@ -1062,6 +1072,11 @@ The host SDK ships shared components that plugins can import to quickly build UI
|
||||
| `LogView` | Scrollable log output with timestamps | Webhook deliveries, job output, process logs |
|
||||
| `JsonTree` | Collapsible JSON tree for debugging | Raw API responses, plugin state inspection |
|
||||
| `Spinner` | Loading indicator | Data fetch states |
|
||||
| `FileTree` | Host-styled file/directory tree | Wiki pages, workspace files, import previews |
|
||||
| `IssuesList` | Host issue list | Plugin pages that need a native issue view |
|
||||
| `AssigneePicker` | Host assignee picker for agents and board users | Creating issues, assigning routines, filtering work |
|
||||
| `ProjectPicker` | Host project picker | Creating issues, scoping dashboards, filtering work |
|
||||
| `ManagedRoutinesList` | Host routine list | Plugin settings pages that manage routines |
|
||||
|
||||
Plugins may also use entirely custom components. The shared components exist to reduce boilerplate and keep visual consistency, not to limit what plugins can render.
|
||||
|
||||
|
||||
BIN
docs/pr-screenshots/pap-2837/newissue-cheap-desktop.png
Normal file
|
After Width: | Height: | Size: 182 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-cheap-mobile.png
Normal file
|
After Width: | Height: | Size: 108 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-custom-desktop.png
Normal file
|
After Width: | Height: | Size: 191 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-custom-mobile.png
Normal file
|
After Width: | Height: | Size: 121 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-primary-desktop.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-primary-mobile.png
Normal file
|
After Width: | Height: | Size: 105 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-unsupported-desktop.png
Normal file
|
After Width: | Height: | Size: 188 KiB |
BIN
docs/pr-screenshots/pap-2837/newissue-unsupported-mobile.png
Normal file
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 335 KiB |
BIN
docs/pr-screenshots/pap-2837/runledger-profile-badges-mobile.png
Normal file
|
After Width: | Height: | Size: 151 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-claude-dark.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-claude-light.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-codex-dark.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-codex-light.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-custom-dark.png
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-custom-light.png
Normal file
|
After Width: | Height: | Size: 87 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-empty-library-dark.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-empty-library-light.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-loading-dark.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
docs/pr-screenshots/pap-2944/skills-loading-light.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
docs/pr-screenshots/pap-2945/monitor-surfaces.png
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
docs/pr-screenshots/pr-5291/after-issue-management.png
Normal file
|
After Width: | Height: | Size: 701 KiB |
BIN
docs/pr-screenshots/pr-5291/after-navigation-layout.png
Normal file
|
After Width: | Height: | Size: 316 KiB |
BIN
docs/pr-screenshots/pr-5291/after-projects-workspaces.png
Normal file
|
After Width: | Height: | Size: 694 KiB |
BIN
docs/pr-screenshots/pr-5291/after-status-language.png
Normal file
|
After Width: | Height: | Size: 546 KiB |
BIN
docs/pr-screenshots/pr-5291/before-issue-management.png
Normal file
|
After Width: | Height: | Size: 701 KiB |
BIN
docs/pr-screenshots/pr-5291/before-navigation-layout.png
Normal file
|
After Width: | Height: | Size: 316 KiB |
BIN
docs/pr-screenshots/pr-5291/before-projects-workspaces.png
Normal file
|
After Width: | Height: | Size: 694 KiB |
10
package.json
@@ -15,9 +15,12 @@
|
||||
"build-storybook": "pnpm --filter @paperclipai/ui build-storybook",
|
||||
"build": "pnpm run preflight:workspace-links && pnpm -r build",
|
||||
"typecheck": "pnpm run preflight:workspace-links && pnpm -r typecheck",
|
||||
"typecheck:build-gaps": "pnpm run preflight:workspace-links && node scripts/run-typecheck-build-gaps.mjs",
|
||||
"test": "pnpm run test:run",
|
||||
"test:watch": "pnpm run preflight:workspace-links && vitest",
|
||||
"test:run": "pnpm run preflight:workspace-links && node scripts/run-vitest-stable.mjs",
|
||||
"test:run:general": "pnpm run preflight:workspace-links && pnpm --filter @paperclipai/plugin-sdk build && node scripts/run-vitest-stable.mjs --mode general",
|
||||
"test:run:serialized": "pnpm run preflight:workspace-links && pnpm --filter @paperclipai/plugin-sdk build && node scripts/run-vitest-stable.mjs --mode serialized",
|
||||
"db:generate": "pnpm --filter @paperclipai/db generate",
|
||||
"db:migrate": "pnpm --filter @paperclipai/db migrate",
|
||||
"issue-references:backfill": "pnpm run preflight:workspace-links && tsx scripts/backfill-issue-reference-mentions.ts",
|
||||
@@ -30,19 +33,22 @@
|
||||
"release:stable": "./scripts/release.sh stable",
|
||||
"release:github": "./scripts/create-github-release.sh",
|
||||
"release:rollback": "./scripts/rollback-latest.sh",
|
||||
"release:bootstrap-package": "node scripts/bootstrap-npm-package.mjs",
|
||||
"check:tokens": "node scripts/check-forbidden-tokens.mjs",
|
||||
"docs:dev": "cd docs && npx mintlify dev",
|
||||
"smoke:openclaw-join": "./scripts/smoke/openclaw-join.sh",
|
||||
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh",
|
||||
"smoke:openclaw-sse-standalone": "./scripts/smoke/openclaw-sse-standalone.sh",
|
||||
"test:release-registry": "node --test scripts/verify-release-registry-state.test.mjs",
|
||||
"smoke:terminal-bench-loop-skill": "node scripts/smoke/terminal-bench-loop-skill-smoke.mjs",
|
||||
"test:release-registry": "node --test scripts/verify-release-registry-state.test.mjs scripts/release-package-map.test.mjs scripts/check-release-package-bootstrap.test.mjs",
|
||||
"test:e2e": "npx playwright test --config tests/e2e/playwright.config.ts",
|
||||
"test:e2e:headed": "npx playwright test --config tests/e2e/playwright.config.ts --headed",
|
||||
"test:e2e:multiuser-authenticated": "npx playwright test --config tests/e2e/playwright-multiuser-authenticated.config.ts",
|
||||
"evals:smoke": "cd evals/promptfoo && npx promptfoo@0.103.3 eval",
|
||||
"test:release-smoke": "npx playwright test --config tests/release-smoke/playwright.config.ts",
|
||||
"test:release-smoke:headed": "npx playwright test --config tests/release-smoke/playwright.config.ts --headed",
|
||||
"metrics:paperclip-commits": "tsx scripts/paperclip-commit-metrics.ts"
|
||||
"metrics:paperclip-commits": "tsx scripts/paperclip-commit-metrics.ts",
|
||||
"perf:issue-chat-long-thread": "node scripts/measure-issue-chat-long-thread.mjs"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.58.2",
|
||||
|
||||
@@ -55,9 +55,15 @@ describe("command managed runtime", () => {
|
||||
...process.env,
|
||||
...input.env,
|
||||
};
|
||||
const command = input.command === "sh" ? "/bin/sh" : input.command;
|
||||
const command =
|
||||
input.command === "sh" ? "/bin/sh" : input.command === "bash" ? "/bin/bash" : input.command;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (input.stdin != null && input.command === "sh" && args[0] === "-lc" && typeof args[1] === "string") {
|
||||
if (
|
||||
input.stdin != null &&
|
||||
(input.command === "sh" || input.command === "bash") &&
|
||||
args[0] === "-lc" &&
|
||||
typeof args[1] === "string"
|
||||
) {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
args[1] = `printf '%s' \"$PAPERCLIP_TEST_STDIN\" | (${args[1]})`;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
type SandboxManagedRuntimeClient,
|
||||
type SandboxRemoteExecutionSpec,
|
||||
} from "./sandbox-managed-runtime.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
export interface CommandManagedRuntimeRunner {
|
||||
@@ -23,10 +24,10 @@ export interface CommandManagedRuntimeRunner {
|
||||
|
||||
export interface CommandManagedRuntimeSpec {
|
||||
providerKey?: string | null;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
leaseId?: string | null;
|
||||
remoteCwd: string;
|
||||
timeoutMs?: number | null;
|
||||
paperclipApiUrl?: string | null;
|
||||
}
|
||||
|
||||
export type CommandManagedRuntimeAsset = SandboxManagedRuntimeAsset;
|
||||
@@ -58,10 +59,12 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
timeoutMs: number;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
}): SandboxManagedRuntimeClient {
|
||||
const shellCommand = preferredShellForSandbox(input.shellCommand);
|
||||
const runShell = async (script: string, opts: { stdin?: string; timeoutMs?: number } = {}) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", script],
|
||||
cwd: input.remoteCwd,
|
||||
stdin: opts.stdin,
|
||||
@@ -112,7 +115,7 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
},
|
||||
remove: async (remotePath) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", `rm -rf ${shellQuote(remotePath)}`],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs: input.timeoutMs,
|
||||
@@ -121,7 +124,7 @@ export function createCommandManagedRuntimeClient(input: {
|
||||
},
|
||||
run: async (command, options) => {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", command],
|
||||
cwd: input.remoteCwd,
|
||||
timeoutMs: options.timeoutMs,
|
||||
@@ -151,17 +154,18 @@ export async function prepareCommandManagedRuntime(input: {
|
||||
remoteCwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
apiKey: null,
|
||||
paperclipApiUrl: input.spec.paperclipApiUrl ?? null,
|
||||
};
|
||||
const client = createCommandManagedRuntimeClient({
|
||||
runner: input.runner,
|
||||
remoteCwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
shellCommand: input.spec.shellCommand,
|
||||
});
|
||||
const shellCommand = preferredShellForSandbox(input.spec.shellCommand);
|
||||
|
||||
if (input.installCommand?.trim()) {
|
||||
const result = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", input.installCommand.trim()],
|
||||
cwd: workspaceRemoteDir,
|
||||
timeoutMs,
|
||||
|
||||
21
packages/adapter-utils/src/command-redaction.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
export const REDACTED_COMMAND_TEXT_VALUE = "***REDACTED***";
|
||||
|
||||
const COMMAND_CLI_SECRET_OPTION_RE =
|
||||
/(\B-{1,2}(?:api[-_]?key|(?:access[-_]?|auth[-_]?)?token|token|authorization|bearer|secret|passwd|password|credential|jwt|private[-_]?key|cookie|connectionstring)(?:\s+|=)(["']?))[^\s"'`]+(\2)/gi;
|
||||
const COMMAND_ENV_SECRET_ASSIGNMENT_RE =
|
||||
/(\b[A-Za-z0-9_]*(?:TOKEN|KEY|SECRET|PASSWORD|PASSWD|AUTHORIZATION|JWT)[A-Za-z0-9_]*\s*=\s*)[^\s"'`]+/gi;
|
||||
const COMMAND_AUTHORIZATION_BEARER_RE = /(\bAuthorization\s*:\s*Bearer\s+)[^\s"'`]+/gi;
|
||||
const COMMAND_OPENAI_KEY_RE = /\bsk-[A-Za-z0-9_-]{12,}\b/g;
|
||||
const COMMAND_GITHUB_TOKEN_RE = /\bgh[pousr]_[A-Za-z0-9_]{20,}\b/g;
|
||||
const COMMAND_JWT_RE =
|
||||
/\b[A-Za-z0-9_-]{8,}\.[A-Za-z0-9_-]{8,}\.[A-Za-z0-9_-]{8,}(?:\.[A-Za-z0-9_-]{8,})?\b/g;
|
||||
|
||||
export function redactCommandText(command: string, redactedValue = REDACTED_COMMAND_TEXT_VALUE): string {
|
||||
return command
|
||||
.replace(COMMAND_AUTHORIZATION_BEARER_RE, `$1${redactedValue}`)
|
||||
.replace(COMMAND_CLI_SECRET_OPTION_RE, `$1${redactedValue}$3`)
|
||||
.replace(COMMAND_ENV_SECRET_ASSIGNMENT_RE, `$1${redactedValue}`)
|
||||
.replace(COMMAND_OPENAI_KEY_RE, redactedValue)
|
||||
.replace(COMMAND_GITHUB_TOKEN_RE, redactedValue)
|
||||
.replace(COMMAND_JWT_RE, redactedValue);
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetToRemoteSpec,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
runAdapterExecutionTargetProcess,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
@@ -39,7 +40,8 @@ describe("sandbox adapter execution targets", () => {
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}) => {
|
||||
counter += 1;
|
||||
return runChildProcess(`sandbox-run-${counter}`, input.command, input.args ?? [], {
|
||||
const command = input.command === "bash" ? "/bin/bash" : input.command;
|
||||
return runChildProcess(`sandbox-run-${counter}`, command, input.args ?? [], {
|
||||
cwd: input.cwd ?? process.cwd(),
|
||||
env: input.env ?? {},
|
||||
stdin: input.stdin,
|
||||
@@ -103,7 +105,6 @@ describe("sandbox adapter execution targets", () => {
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd: "/workspace",
|
||||
paperclipTransport: "bridge",
|
||||
});
|
||||
});
|
||||
|
||||
@@ -140,6 +141,68 @@ describe("sandbox adapter execution targets", () => {
|
||||
}));
|
||||
});
|
||||
|
||||
it("treats SSH targets as bridge-only", () => {
|
||||
const target = {
|
||||
kind: "remote" as const,
|
||||
transport: "ssh" as const,
|
||||
remoteCwd: "/workspace",
|
||||
spec: {
|
||||
host: "ssh.example.test",
|
||||
port: 22,
|
||||
username: "paperclip",
|
||||
remoteWorkspacePath: "/workspace",
|
||||
remoteCwd: "/workspace",
|
||||
privateKey: null,
|
||||
knownHosts: null,
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
};
|
||||
|
||||
expect(adapterExecutionTargetUsesPaperclipBridge(target)).toBe(true);
|
||||
expect(adapterExecutionTargetSessionIdentity(target)).toEqual({
|
||||
transport: "ssh",
|
||||
host: "ssh.example.test",
|
||||
port: 22,
|
||||
username: "paperclip",
|
||||
remoteCwd: "/workspace",
|
||||
});
|
||||
});
|
||||
|
||||
it("uses the provider-declared shell for sandbox helper commands", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "/home/sandbox",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
};
|
||||
const target: AdapterSandboxExecutionTarget = {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "custom-provider",
|
||||
shellCommand: "bash",
|
||||
remoteCwd: "/workspace",
|
||||
runner,
|
||||
};
|
||||
|
||||
await runAdapterExecutionTargetShellCommand("run-2b", target, 'printf %s "$HOME"', {
|
||||
cwd: "/local/workspace",
|
||||
env: {},
|
||||
timeoutSec: 7,
|
||||
});
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "bash",
|
||||
args: ["-lc", 'printf %s "$HOME"'],
|
||||
cwd: "/workspace",
|
||||
timeoutMs: 7000,
|
||||
}));
|
||||
});
|
||||
|
||||
it("starts a localhost Paperclip bridge for sandbox targets in bridge mode", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-execution-target-bridge-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
@@ -174,7 +237,6 @@ describe("sandbox adapter execution targets", () => {
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd,
|
||||
paperclipTransport: "bridge",
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
@@ -252,7 +314,6 @@ describe("sandbox adapter execution targets", () => {
|
||||
environmentId: "env-1",
|
||||
leaseId: "lease-1",
|
||||
remoteCwd,
|
||||
paperclipTransport: "bridge",
|
||||
runner: createLocalSandboxRunner(),
|
||||
timeoutMs: 30_000,
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import * as ssh from "./ssh.js";
|
||||
import {
|
||||
adapterExecutionTargetUsesManagedHome,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
resolveAdapterExecutionTargetCwd,
|
||||
runAdapterExecutionTargetShellCommand,
|
||||
} from "./execution-target.js";
|
||||
@@ -161,6 +162,80 @@ describe("runAdapterExecutionTargetShellCommand", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("ensureAdapterExecutionTargetRuntimeCommandInstalled", () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("runs install commands for sandbox targets", async () => {
|
||||
const runner = {
|
||||
execute: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
pid: null,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
};
|
||||
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId: "run-install",
|
||||
target: {
|
||||
kind: "remote",
|
||||
transport: "sandbox",
|
||||
providerKey: "e2b",
|
||||
remoteCwd: "/remote/workspace",
|
||||
runner,
|
||||
},
|
||||
installCommand: "npm install -g @google/gemini-cli",
|
||||
cwd: "/local/workspace",
|
||||
env: { PATH: "/usr/bin" },
|
||||
timeoutSec: 30,
|
||||
});
|
||||
|
||||
expect(runner.execute).toHaveBeenCalledWith(expect.objectContaining({
|
||||
command: "sh",
|
||||
args: ["-lc", "npm install -g @google/gemini-cli"],
|
||||
cwd: "/remote/workspace",
|
||||
env: { PATH: "/usr/bin" },
|
||||
timeoutMs: 30_000,
|
||||
}));
|
||||
});
|
||||
|
||||
it("skips install commands for SSH targets", async () => {
|
||||
const runSshCommandSpy = vi.spyOn(ssh, "runSshCommand").mockResolvedValue({
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
});
|
||||
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId: "run-skip",
|
||||
target: {
|
||||
kind: "remote",
|
||||
transport: "ssh",
|
||||
remoteCwd: "/srv/paperclip/workspace",
|
||||
spec: {
|
||||
host: "ssh.example.test",
|
||||
port: 22,
|
||||
username: "ssh-user",
|
||||
remoteCwd: "/srv/paperclip/workspace",
|
||||
remoteWorkspacePath: "/srv/paperclip/workspace",
|
||||
privateKey: null,
|
||||
knownHosts: null,
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
},
|
||||
installCommand: "npm install -g @google/gemini-cli",
|
||||
cwd: "/tmp/local",
|
||||
env: {},
|
||||
});
|
||||
|
||||
expect(runSshCommandSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveAdapterExecutionTargetCwd", () => {
|
||||
const sshTarget = {
|
||||
kind: "remote" as const,
|
||||
|
||||
@@ -18,7 +18,7 @@ import {
|
||||
startSandboxCallbackBridgeServer,
|
||||
startSandboxCallbackBridgeWorker,
|
||||
} from "./sandbox-callback-bridge.js";
|
||||
import { parseSshRemoteExecutionSpec, runSshCommand, shellQuote } from "./ssh.js";
|
||||
import { createSshCommandManagedRuntimeRunner, parseSshRemoteExecutionSpec, runSshCommand, shellQuote } from "./ssh.js";
|
||||
import {
|
||||
ensureCommandResolvable,
|
||||
resolveCommandForLogs,
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
type RunProcessResult,
|
||||
type TerminalResultCleanupOptions,
|
||||
} from "./server-utils.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
|
||||
export interface AdapterLocalExecutionTarget {
|
||||
kind: "local";
|
||||
@@ -39,7 +40,6 @@ export interface AdapterSshExecutionTarget {
|
||||
environmentId?: string | null;
|
||||
leaseId?: string | null;
|
||||
remoteCwd: string;
|
||||
paperclipApiUrl?: string | null;
|
||||
spec: SshRemoteExecutionSpec;
|
||||
}
|
||||
|
||||
@@ -47,11 +47,10 @@ export interface AdapterSandboxExecutionTarget {
|
||||
kind: "remote";
|
||||
transport: "sandbox";
|
||||
providerKey?: string | null;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
environmentId?: string | null;
|
||||
leaseId?: string | null;
|
||||
remoteCwd: string;
|
||||
paperclipApiUrl?: string | null;
|
||||
paperclipTransport?: "direct" | "bridge";
|
||||
timeoutMs?: number | null;
|
||||
runner?: CommandManagedRuntimeRunner;
|
||||
}
|
||||
@@ -126,13 +125,9 @@ function resolveDefaultPaperclipApiUrl(): string {
|
||||
return `http://${runtimeHost}:${runtimePort}`;
|
||||
}
|
||||
|
||||
function resolveSandboxPaperclipTransport(
|
||||
target: Pick<AdapterSandboxExecutionTarget, "paperclipTransport" | "paperclipApiUrl">,
|
||||
): "direct" | "bridge" {
|
||||
if (target.paperclipTransport === "direct" || target.paperclipTransport === "bridge") {
|
||||
return target.paperclipTransport;
|
||||
}
|
||||
return target.paperclipApiUrl ? "direct" : "bridge";
|
||||
function isBridgeDebugEnabled(env: NodeJS.ProcessEnv): boolean {
|
||||
const value = env.PAPERCLIP_BRIDGE_DEBUG?.trim().toLowerCase();
|
||||
return value === "1" || value === "true" || value === "yes";
|
||||
}
|
||||
|
||||
function isAdapterExecutionTargetInstance(value: unknown): value is AdapterExecutionTarget {
|
||||
@@ -180,21 +175,10 @@ export function resolveAdapterExecutionTargetCwd(
|
||||
return adapterExecutionTargetRemoteCwd(target, localFallbackCwd);
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetPaperclipApiUrl(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): string | null {
|
||||
if (target?.kind !== "remote") return null;
|
||||
if (target.transport === "ssh") return target.paperclipApiUrl ?? target.spec.paperclipApiUrl ?? null;
|
||||
if (resolveSandboxPaperclipTransport(target) === "bridge") return null;
|
||||
return target.paperclipApiUrl ?? null;
|
||||
}
|
||||
|
||||
export function adapterExecutionTargetUsesPaperclipBridge(
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
): boolean {
|
||||
return target?.kind === "remote" &&
|
||||
target.transport === "sandbox" &&
|
||||
resolveSandboxPaperclipTransport(target) === "bridge";
|
||||
return target?.kind === "remote";
|
||||
}
|
||||
|
||||
export function describeAdapterExecutionTarget(
|
||||
@@ -214,6 +198,33 @@ function requireSandboxRunner(target: AdapterSandboxExecutionTarget): CommandMan
|
||||
);
|
||||
}
|
||||
|
||||
function preferredSandboxShell(target: AdapterSandboxExecutionTarget): "bash" | "sh" {
|
||||
return preferredShellForSandbox(target.shellCommand);
|
||||
}
|
||||
|
||||
type AdapterCommandCapableExecutionTarget = AdapterSshExecutionTarget | AdapterSandboxExecutionTarget;
|
||||
|
||||
function adapterExecutionTargetCommandRunner(target: AdapterCommandCapableExecutionTarget): CommandManagedRuntimeRunner {
|
||||
if (target.transport === "ssh") {
|
||||
return createSshCommandManagedRuntimeRunner({
|
||||
spec: target.spec,
|
||||
defaultCwd: target.remoteCwd,
|
||||
maxBufferBytes: DEFAULT_SANDBOX_CALLBACK_BRIDGE_MAX_BODY_BYTES * 4,
|
||||
});
|
||||
}
|
||||
return requireSandboxRunner(target);
|
||||
}
|
||||
|
||||
function adapterExecutionTargetShellCommand(target: AdapterCommandCapableExecutionTarget): "bash" | "sh" {
|
||||
return target.transport === "ssh" ? "sh" : preferredSandboxShell(target);
|
||||
}
|
||||
|
||||
function adapterExecutionTargetTimeoutMs(
|
||||
target: AdapterCommandCapableExecutionTarget,
|
||||
): number | null | undefined {
|
||||
return target.transport === "sandbox" ? target.timeoutMs : undefined;
|
||||
}
|
||||
|
||||
export async function ensureAdapterExecutionTargetCommandResolvable(
|
||||
command: string,
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
@@ -341,8 +352,9 @@ export async function runAdapterExecutionTargetShellCommand(
|
||||
}
|
||||
}
|
||||
|
||||
const shellCommand = preferredSandboxShell(target);
|
||||
return await requireSandboxRunner(target).execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", command],
|
||||
cwd: target.remoteCwd,
|
||||
env: options.env,
|
||||
@@ -381,6 +393,60 @@ export async function readAdapterExecutionTargetHomeDir(
|
||||
return homeDir.length > 0 ? homeDir : null;
|
||||
}
|
||||
|
||||
export async function ensureAdapterExecutionTargetRuntimeCommandInstalled(input: {
|
||||
runId: string;
|
||||
target: AdapterExecutionTarget | null | undefined;
|
||||
installCommand?: string | null;
|
||||
detectCommand?: string | null;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
timeoutSec?: number;
|
||||
graceSec?: number;
|
||||
onLog?: AdapterExecutionTargetShellOptions["onLog"];
|
||||
}): Promise<void> {
|
||||
const installCommand = input.installCommand?.trim();
|
||||
if (!installCommand || input.target?.kind !== "remote" || input.target.transport !== "sandbox") {
|
||||
return;
|
||||
}
|
||||
|
||||
const detectCommand = input.detectCommand?.trim();
|
||||
if (detectCommand) {
|
||||
const probe = await runAdapterExecutionTargetShellCommand(
|
||||
input.runId,
|
||||
input.target,
|
||||
`command -v ${shellQuote(detectCommand)} >/dev/null 2>&1`,
|
||||
{
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
timeoutSec: input.timeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
},
|
||||
);
|
||||
if (!probe.timedOut && probe.exitCode === 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const result = await runAdapterExecutionTargetShellCommand(
|
||||
input.runId,
|
||||
input.target,
|
||||
installCommand,
|
||||
{
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
timeoutSec: input.timeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
onLog: input.onLog,
|
||||
},
|
||||
);
|
||||
if (result.timedOut) {
|
||||
throw new Error(`Timed out while installing the adapter runtime command via: ${installCommand}`);
|
||||
}
|
||||
if ((result.exitCode ?? 0) !== 0) {
|
||||
throw new Error(`Failed to install the adapter runtime command via: ${installCommand}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function ensureAdapterExecutionTargetFile(
|
||||
runId: string,
|
||||
target: AdapterExecutionTarget | null | undefined,
|
||||
@@ -458,15 +524,12 @@ export function adapterExecutionTargetSessionIdentity(
|
||||
): Record<string, unknown> | null {
|
||||
if (!target || target.kind === "local") return null;
|
||||
if (target.transport === "ssh") return buildRemoteExecutionSessionIdentity(target.spec);
|
||||
const paperclipTransport = resolveSandboxPaperclipTransport(target);
|
||||
return {
|
||||
transport: "sandbox",
|
||||
providerKey: target.providerKey ?? null,
|
||||
environmentId: target.environmentId ?? null,
|
||||
leaseId: target.leaseId ?? null,
|
||||
remoteCwd: target.remoteCwd,
|
||||
paperclipTransport,
|
||||
...(paperclipTransport === "direct" && target.paperclipApiUrl ? { paperclipApiUrl: target.paperclipApiUrl } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -485,9 +548,7 @@ export function adapterExecutionTargetSessionMatches(
|
||||
readStringMeta(parsedSaved, "providerKey") === current?.providerKey &&
|
||||
readStringMeta(parsedSaved, "environmentId") === current?.environmentId &&
|
||||
readStringMeta(parsedSaved, "leaseId") === current?.leaseId &&
|
||||
readStringMeta(parsedSaved, "remoteCwd") === current?.remoteCwd &&
|
||||
readStringMeta(parsedSaved, "paperclipTransport") === (current?.paperclipTransport ?? null) &&
|
||||
readStringMeta(parsedSaved, "paperclipApiUrl") === (current?.paperclipApiUrl ?? null)
|
||||
readStringMeta(parsedSaved, "remoteCwd") === current?.remoteCwd
|
||||
);
|
||||
}
|
||||
|
||||
@@ -512,14 +573,12 @@ export function parseAdapterExecutionTarget(value: unknown): AdapterExecutionTar
|
||||
environmentId: readStringMeta(parsed, "environmentId"),
|
||||
leaseId: readStringMeta(parsed, "leaseId"),
|
||||
remoteCwd: spec.remoteCwd,
|
||||
paperclipApiUrl: readStringMeta(parsed, "paperclipApiUrl") ?? spec.paperclipApiUrl ?? null,
|
||||
spec,
|
||||
};
|
||||
}
|
||||
|
||||
if (kind === "remote" && readStringMeta(parsed, "transport") === "sandbox") {
|
||||
const remoteCwd = readStringMeta(parsed, "remoteCwd");
|
||||
const paperclipTransport = readStringMeta(parsed, "paperclipTransport");
|
||||
if (!remoteCwd) return null;
|
||||
return {
|
||||
kind: "remote",
|
||||
@@ -528,11 +587,6 @@ export function parseAdapterExecutionTarget(value: unknown): AdapterExecutionTar
|
||||
environmentId: readStringMeta(parsed, "environmentId"),
|
||||
leaseId: readStringMeta(parsed, "leaseId"),
|
||||
remoteCwd,
|
||||
paperclipApiUrl: readStringMeta(parsed, "paperclipApiUrl"),
|
||||
paperclipTransport:
|
||||
paperclipTransport === "direct" || paperclipTransport === "bridge"
|
||||
? paperclipTransport
|
||||
: undefined,
|
||||
timeoutMs: typeof parsed.timeoutMs === "number" ? parsed.timeoutMs : null,
|
||||
};
|
||||
}
|
||||
@@ -553,7 +607,6 @@ export function adapterExecutionTargetFromRemoteExecution(
|
||||
environmentId: metadata.environmentId ?? null,
|
||||
leaseId: metadata.leaseId ?? null,
|
||||
remoteCwd: ssh.remoteCwd,
|
||||
paperclipApiUrl: ssh.paperclipApiUrl ?? null,
|
||||
spec: ssh,
|
||||
};
|
||||
}
|
||||
@@ -612,10 +665,10 @@ export async function prepareAdapterExecutionTargetRuntime(input: {
|
||||
runner: requireSandboxRunner(target),
|
||||
spec: {
|
||||
providerKey: target.providerKey,
|
||||
shellCommand: target.shellCommand,
|
||||
leaseId: target.leaseId,
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs,
|
||||
paperclipApiUrl: target.paperclipApiUrl,
|
||||
},
|
||||
adapterKey: input.adapterKey,
|
||||
workspaceLocalDir: input.workspaceLocalDir,
|
||||
@@ -703,7 +756,7 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
if (!adapterExecutionTargetUsesPaperclipBridge(input.target)) {
|
||||
return null;
|
||||
}
|
||||
if (!input.target || input.target.kind !== "remote" || input.target.transport !== "sandbox") {
|
||||
if (!input.target || input.target.kind !== "remote") {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -731,6 +784,8 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
process.env.PAPERCLIP_RUNTIME_API_URL?.trim() ||
|
||||
process.env.PAPERCLIP_API_URL?.trim() ||
|
||||
resolveDefaultPaperclipApiUrl();
|
||||
const shellCommand = adapterExecutionTargetShellCommand(target);
|
||||
const runner = adapterExecutionTargetCommandRunner(target);
|
||||
|
||||
await onLog(
|
||||
"stdout",
|
||||
@@ -742,15 +797,30 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
let worker: Awaited<ReturnType<typeof startSandboxCallbackBridgeWorker>> | null = null;
|
||||
try {
|
||||
const client = createCommandManagedSandboxCallbackBridgeQueueClient({
|
||||
runner: requireSandboxRunner(target),
|
||||
runner,
|
||||
remoteCwd: target.remoteCwd,
|
||||
timeoutMs: target.timeoutMs,
|
||||
timeoutMs: adapterExecutionTargetTimeoutMs(target),
|
||||
shellCommand,
|
||||
});
|
||||
// PAPERCLIP_BRIDGE_DEBUG opts into verbose stdout logs of every bridge
|
||||
// proxy request/response. The query string is logged verbatim, so callers
|
||||
// who pass auth tokens or other sensitive values as query parameters
|
||||
// should be aware those values appear in the host process's stdout when
|
||||
// this flag is enabled. Only intended for active debugging in trusted
|
||||
// environments.
|
||||
const bridgeDebugEnabled = isBridgeDebugEnabled(process.env);
|
||||
worker = await startSandboxCallbackBridgeWorker({
|
||||
client,
|
||||
queueDir,
|
||||
maxBodyBytes,
|
||||
handleRequest: async (request) => {
|
||||
const method = request.method.trim().toUpperCase() || "GET";
|
||||
if (bridgeDebugEnabled) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] Bridge proxy ${method} ${request.path}${request.query ? `?${request.query}` : ""}\n`,
|
||||
);
|
||||
}
|
||||
const headers = new Headers();
|
||||
for (const [key, value] of Object.entries(request.headers)) {
|
||||
if (value.trim().length === 0) continue;
|
||||
@@ -758,13 +828,18 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
}
|
||||
headers.set("authorization", `Bearer ${hostApiToken}`);
|
||||
headers.set("x-paperclip-run-id", input.runId);
|
||||
const method = request.method.trim().toUpperCase() || "GET";
|
||||
const response = await fetch(buildBridgeForwardUrl(hostApiUrl, request), {
|
||||
method,
|
||||
headers,
|
||||
...(method === "GET" || method === "HEAD" ? {} : { body: request.body }),
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
if (bridgeDebugEnabled) {
|
||||
await onLog(
|
||||
"stdout",
|
||||
`[paperclip] Bridge proxy response ${response.status} for ${method} ${request.path}${request.query ? `?${request.query}` : ""}\n`,
|
||||
);
|
||||
}
|
||||
return {
|
||||
status: response.status,
|
||||
headers: buildBridgeResponseHeaders(response),
|
||||
@@ -773,14 +848,15 @@ export async function startAdapterExecutionTargetPaperclipBridge(input: {
|
||||
},
|
||||
});
|
||||
server = await startSandboxCallbackBridgeServer({
|
||||
runner: requireSandboxRunner(target),
|
||||
runner,
|
||||
remoteCwd: target.remoteCwd,
|
||||
assetRemoteDir,
|
||||
queueDir,
|
||||
bridgeToken,
|
||||
bridgeAsset,
|
||||
timeoutMs: target.timeoutMs,
|
||||
timeoutMs: adapterExecutionTargetTimeoutMs(target),
|
||||
maxBodyBytes,
|
||||
shellCommand,
|
||||
});
|
||||
} catch (error) {
|
||||
await Promise.allSettled([
|
||||
|
||||
@@ -20,11 +20,14 @@ export type {
|
||||
AdapterSkillContext,
|
||||
AdapterSessionCodec,
|
||||
AdapterModel,
|
||||
AdapterModelProfileKey,
|
||||
AdapterModelProfileDefinition,
|
||||
HireApprovedPayload,
|
||||
HireApprovedHookResult,
|
||||
ConfigFieldOption,
|
||||
ConfigFieldSchema,
|
||||
AdapterConfigSchema,
|
||||
AdapterRuntimeCommandSpec,
|
||||
ServerAdapterModule,
|
||||
QuotaWindow,
|
||||
ProviderQuotaResult,
|
||||
@@ -53,6 +56,10 @@ export {
|
||||
redactHomePathUserSegmentsInValue,
|
||||
redactTranscriptEntryPaths,
|
||||
} from "./log-redaction.js";
|
||||
export {
|
||||
REDACTED_COMMAND_TEXT_VALUE,
|
||||
redactCommandText,
|
||||
} from "./command-redaction.js";
|
||||
export { inferOpenAiCompatibleBiller } from "./billing.js";
|
||||
// Keep the root adapter-utils entry browser-safe because the UI imports it.
|
||||
// The sandbox callback bridge stays available via its dedicated subpath export.
|
||||
|
||||
@@ -44,7 +44,6 @@ export function buildRemoteExecutionSessionIdentity(spec: SshRemoteExecutionSpec
|
||||
port: spec.port,
|
||||
username: spec.username,
|
||||
remoteCwd: spec.remoteCwd,
|
||||
...(spec.paperclipApiUrl ? { paperclipApiUrl: spec.paperclipApiUrl } : {}),
|
||||
} as const;
|
||||
}
|
||||
|
||||
@@ -58,8 +57,7 @@ export function remoteExecutionSessionMatches(saved: unknown, current: SshRemote
|
||||
asString(parsedSaved.host) === currentIdentity.host &&
|
||||
asNumber(parsedSaved.port) === currentIdentity.port &&
|
||||
asString(parsedSaved.username) === currentIdentity.username &&
|
||||
asString(parsedSaved.remoteCwd) === currentIdentity.remoteCwd &&
|
||||
asString(parsedSaved.paperclipApiUrl) === asString(currentIdentity.paperclipApiUrl)
|
||||
asString(parsedSaved.remoteCwd) === currentIdentity.remoteCwd
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -37,9 +37,15 @@ describe("sandbox callback bridge", () => {
|
||||
...process.env,
|
||||
...input.env,
|
||||
};
|
||||
const command = input.command === "sh" ? "/bin/sh" : input.command;
|
||||
const command =
|
||||
input.command === "sh" ? "/bin/sh" : input.command === "bash" ? "/bin/bash" : input.command;
|
||||
const args = [...(input.args ?? [])];
|
||||
if (input.stdin != null && input.command === "sh" && args[0] === "-lc" && typeof args[1] === "string") {
|
||||
if (
|
||||
input.stdin != null &&
|
||||
(input.command === "sh" || input.command === "bash") &&
|
||||
args[0] === "-lc" &&
|
||||
typeof args[1] === "string"
|
||||
) {
|
||||
env.PAPERCLIP_TEST_STDIN = input.stdin;
|
||||
args[1] = `printf '%s' \"$PAPERCLIP_TEST_STDIN\" | (${args[1]})`;
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import type { CommandManagedRuntimeRunner } from "./command-managed-runtime.js";
|
||||
import { preferredShellForSandbox } from "./sandbox-shell.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
const DEFAULT_BRIDGE_TOKEN_BYTES = 24;
|
||||
@@ -133,9 +134,10 @@ async function runShell(
|
||||
cwd: string,
|
||||
script: string,
|
||||
timeoutMs: number,
|
||||
shellCommand: "bash" | "sh" = "sh",
|
||||
): Promise<RunProcessResult> {
|
||||
return await runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: ["-lc", script],
|
||||
cwd,
|
||||
timeoutMs,
|
||||
@@ -266,10 +268,12 @@ export function createCommandManagedSandboxCallbackBridgeQueueClient(input: {
|
||||
runner: CommandManagedRuntimeRunner;
|
||||
remoteCwd: string;
|
||||
timeoutMs?: number | null;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
}): SandboxCallbackBridgeQueueClient {
|
||||
const timeoutMs = normalizeTimeoutMs(input.timeoutMs, DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS);
|
||||
const shellCommand = preferredShellForSandbox(input.shellCommand);
|
||||
const runChecked = async (action: string, script: string) =>
|
||||
requireSuccessfulResult(action, await runShell(input.runner, input.remoteCwd, script, timeoutMs));
|
||||
requireSuccessfulResult(action, await runShell(input.runner, input.remoteCwd, script, timeoutMs, shellCommand));
|
||||
|
||||
return {
|
||||
makeDir: async (remotePath) => {
|
||||
@@ -288,6 +292,7 @@ export function createCommandManagedSandboxCallbackBridgeQueueClient(input: {
|
||||
"fi",
|
||||
].join("\n"),
|
||||
timeoutMs,
|
||||
shellCommand,
|
||||
);
|
||||
requireSuccessfulResult(`list ${remotePath}`, result);
|
||||
return result.stdout
|
||||
@@ -525,10 +530,12 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
responseTimeoutMs?: number | null;
|
||||
timeoutMs?: number | null;
|
||||
nodeCommand?: string;
|
||||
shellCommand?: "bash" | "sh" | null;
|
||||
maxQueueDepth?: number | null;
|
||||
maxBodyBytes?: number | null;
|
||||
}): Promise<StartedSandboxCallbackBridgeServer> {
|
||||
const timeoutMs = normalizeTimeoutMs(input.timeoutMs, DEFAULT_BRIDGE_RESPONSE_TIMEOUT_MS);
|
||||
const shellCommand = preferredShellForSandbox(input.shellCommand);
|
||||
const directories = sandboxCallbackBridgeDirectories(input.queueDir);
|
||||
const remoteEntrypoint = path.posix.join(input.assetRemoteDir, SANDBOX_CALLBACK_BRIDGE_ENTRYPOINT);
|
||||
if (input.bridgeAsset) {
|
||||
@@ -536,6 +543,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
runner: input.runner,
|
||||
remoteCwd: input.remoteCwd,
|
||||
timeoutMs,
|
||||
shellCommand,
|
||||
});
|
||||
await assetClient.makeDir(input.assetRemoteDir);
|
||||
const entrypointSource = await fs.readFile(input.bridgeAsset.entrypoint, "utf8");
|
||||
@@ -553,7 +561,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
});
|
||||
const nodeCommand = input.nodeCommand?.trim() || "node";
|
||||
const startResult = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: [
|
||||
"-lc",
|
||||
[
|
||||
@@ -594,6 +602,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
"exit 1",
|
||||
].join("\n"),
|
||||
timeoutMs,
|
||||
shellCommand,
|
||||
);
|
||||
requireSuccessfulResult("wait for sandbox callback bridge readiness", readyResult);
|
||||
|
||||
@@ -626,7 +635,7 @@ export async function startSandboxCallbackBridgeServer(input: {
|
||||
directories,
|
||||
stop: async () => {
|
||||
const stopResult = await input.runner.execute({
|
||||
command: "sh",
|
||||
command: shellCommand,
|
||||
args: [
|
||||
"-lc",
|
||||
[
|
||||
|
||||
@@ -13,7 +13,6 @@ export interface SandboxRemoteExecutionSpec {
|
||||
remoteCwd: string;
|
||||
timeoutMs: number;
|
||||
apiKey: string | null;
|
||||
paperclipApiUrl?: string | null;
|
||||
}
|
||||
|
||||
export interface SandboxManagedRuntimeAsset {
|
||||
@@ -85,7 +84,6 @@ export function parseSandboxRemoteExecutionSpec(value: unknown): SandboxRemoteEx
|
||||
remoteCwd,
|
||||
timeoutMs,
|
||||
apiKey: asString(parsed.apiKey).trim() || null,
|
||||
paperclipApiUrl: asString(parsed.paperclipApiUrl).trim() || null,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -96,7 +94,6 @@ export function buildSandboxExecutionSessionIdentity(spec: SandboxRemoteExecutio
|
||||
provider: spec.provider,
|
||||
sandboxId: spec.sandboxId,
|
||||
remoteCwd: spec.remoteCwd,
|
||||
...(spec.paperclipApiUrl ? { paperclipApiUrl: spec.paperclipApiUrl } : {}),
|
||||
} as const;
|
||||
}
|
||||
|
||||
@@ -108,8 +105,7 @@ export function sandboxExecutionSessionMatches(saved: unknown, current: SandboxR
|
||||
asString(parsedSaved.transport) === currentIdentity.transport &&
|
||||
asString(parsedSaved.provider) === currentIdentity.provider &&
|
||||
asString(parsedSaved.sandboxId) === currentIdentity.sandboxId &&
|
||||
asString(parsedSaved.remoteCwd) === currentIdentity.remoteCwd &&
|
||||
asString(parsedSaved.paperclipApiUrl) === asString(currentIdentity.paperclipApiUrl)
|
||||
asString(parsedSaved.remoteCwd) === currentIdentity.remoteCwd
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
3
packages/adapter-utils/src/sandbox-shell.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export function preferredShellForSandbox(shellCommand: string | null | undefined): "bash" | "sh" {
|
||||
return shellCommand === "bash" ? "bash" : "sh";
|
||||
}
|
||||
@@ -1,12 +1,19 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
applyPaperclipWorkspaceEnv,
|
||||
appendWithByteCap,
|
||||
buildInvocationEnvForLogs,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
materializePaperclipSkillCopy,
|
||||
renderPaperclipWakePrompt,
|
||||
runningProcesses,
|
||||
runChildProcess,
|
||||
sanitizeSshRemoteEnv,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
} from "./server-utils.js";
|
||||
|
||||
@@ -39,6 +46,162 @@ async function waitForTextMatch(read: () => string, pattern: RegExp, timeoutMs =
|
||||
return read().match(pattern);
|
||||
}
|
||||
|
||||
describe("buildInvocationEnvForLogs", () => {
|
||||
it("redacts inline secrets from resolved command metadata", () => {
|
||||
const loggedEnv = buildInvocationEnvForLogs(
|
||||
{ SAFE_VALUE: "visible" },
|
||||
{
|
||||
resolvedCommand: "env OPENAI_API_KEY=sk-live-example custom-acp --token ghp_example_secret",
|
||||
},
|
||||
);
|
||||
|
||||
expect(loggedEnv.SAFE_VALUE).toBe("visible");
|
||||
expect(loggedEnv.PAPERCLIP_RESOLVED_COMMAND).toBe(
|
||||
"env OPENAI_API_KEY=***REDACTED*** custom-acp --token ***REDACTED***",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("sanitizeSshRemoteEnv", () => {
|
||||
it("drops inherited host shell identity variables for SSH remote execution", () => {
|
||||
expect(
|
||||
sanitizeSshRemoteEnv(
|
||||
{
|
||||
PATH: "/host/bin:/usr/bin",
|
||||
HOME: "/Users/local",
|
||||
NVM_DIR: "/Users/local/.nvm",
|
||||
TMPDIR: "/var/folders/local/T",
|
||||
XDG_CONFIG_HOME: "/Users/local/.config",
|
||||
SAFE_VALUE: "visible",
|
||||
},
|
||||
{
|
||||
PATH: "/host/bin:/usr/bin",
|
||||
HOME: "/Users/local",
|
||||
NVM_DIR: "/Users/local/.nvm",
|
||||
TMPDIR: "/var/folders/local/T",
|
||||
XDG_CONFIG_HOME: "/Users/local/.config",
|
||||
},
|
||||
),
|
||||
).toEqual({
|
||||
SAFE_VALUE: "visible",
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves explicit remote overrides even for filtered key names", () => {
|
||||
expect(
|
||||
sanitizeSshRemoteEnv(
|
||||
{
|
||||
PATH: "/custom/remote/bin:/usr/bin",
|
||||
HOME: "/home/agent",
|
||||
TMPDIR: "/tmp",
|
||||
SAFE_VALUE: "visible",
|
||||
},
|
||||
{
|
||||
PATH: "/host/bin:/usr/bin",
|
||||
HOME: "/Users/local",
|
||||
TMPDIR: "/var/folders/local/T",
|
||||
},
|
||||
),
|
||||
).toEqual({
|
||||
PATH: "/custom/remote/bin:/usr/bin",
|
||||
HOME: "/home/agent",
|
||||
TMPDIR: "/tmp",
|
||||
SAFE_VALUE: "visible",
|
||||
});
|
||||
});
|
||||
|
||||
it("filters identity keys via case-insensitive match against the inherited env", () => {
|
||||
expect(
|
||||
sanitizeSshRemoteEnv(
|
||||
{
|
||||
// Caller passed PATH in upper case while the inherited (Windows-style)
|
||||
// host env exposes it as Path. The lookup must still treat them as
|
||||
// equal so the leaked host PATH gets stripped.
|
||||
PATH: "/host/bin:/usr/bin",
|
||||
HOME: "/host/home",
|
||||
},
|
||||
{
|
||||
Path: "/host/bin:/usr/bin",
|
||||
home: "/host/home",
|
||||
},
|
||||
),
|
||||
).toEqual({});
|
||||
});
|
||||
|
||||
it("preserves explicitly-set identity keys when the inherited env disagrees in case but not in value", () => {
|
||||
expect(
|
||||
sanitizeSshRemoteEnv(
|
||||
{
|
||||
PATH: "/explicit/remote/bin",
|
||||
},
|
||||
{
|
||||
Path: "/host/bin:/usr/bin",
|
||||
},
|
||||
),
|
||||
).toEqual({ PATH: "/explicit/remote/bin" });
|
||||
});
|
||||
});
|
||||
|
||||
describe("materializePaperclipSkillCopy", () => {
|
||||
it("refuses to materialize into an ancestor of the source", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-skill-copy-"));
|
||||
try {
|
||||
const source = path.join(root, "parent", "skill");
|
||||
await fs.mkdir(source, { recursive: true });
|
||||
await fs.writeFile(path.join(source, "SKILL.md"), "# skill\n", "utf8");
|
||||
|
||||
await expect(materializePaperclipSkillCopy(source, path.join(root, "parent"))).rejects.toThrow(
|
||||
/ancestor/,
|
||||
);
|
||||
await expect(fs.readFile(path.join(source, "SKILL.md"), "utf8")).resolves.toBe("# skill\n");
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("does not delete and recopy an unchanged materialized skill target", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-skill-copy-"));
|
||||
try {
|
||||
const source = path.join(root, "source");
|
||||
const target = path.join(root, "target");
|
||||
await fs.mkdir(source, { recursive: true });
|
||||
await fs.writeFile(path.join(source, "SKILL.md"), "# skill\n", "utf8");
|
||||
|
||||
const first = await materializePaperclipSkillCopy(source, target);
|
||||
expect(first.copiedFiles).toBe(1);
|
||||
await fs.writeFile(path.join(target, "local-marker.txt"), "keep\n", "utf8");
|
||||
|
||||
const second = await materializePaperclipSkillCopy(source, target);
|
||||
expect(second.copiedFiles).toBe(0);
|
||||
await expect(fs.readFile(path.join(target, "local-marker.txt"), "utf8")).resolves.toBe("keep\n");
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("breaks stale materialization locks left by dead processes", async () => {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-skill-copy-"));
|
||||
try {
|
||||
const source = path.join(root, "source");
|
||||
const target = path.join(root, "target");
|
||||
const lock = `${target}.lock`;
|
||||
await fs.mkdir(source, { recursive: true });
|
||||
await fs.writeFile(path.join(source, "SKILL.md"), "# skill\n", "utf8");
|
||||
await fs.mkdir(lock, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(lock, "owner.json"),
|
||||
JSON.stringify({ pid: 999_999_999, createdAt: "2000-01-01T00:00:00.000Z" }),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await expect(materializePaperclipSkillCopy(source, target)).resolves.toMatchObject({ copiedFiles: 1 });
|
||||
await expect(fs.readFile(path.join(target, "SKILL.md"), "utf8")).resolves.toBe("# skill\n");
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("runChildProcess", () => {
|
||||
it("does not arm a timeout when timeoutSec is 0", async () => {
|
||||
const result = await runChildProcess(
|
||||
@@ -470,6 +633,70 @@ describe("applyPaperclipWorkspaceEnv", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("shapePaperclipWorkspaceEnvForExecution", () => {
|
||||
it("rewrites workspace env paths for remote execution", () => {
|
||||
const shaped = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: "/tmp/workspace",
|
||||
workspaceWorktreePath: "/tmp/worktree",
|
||||
workspaceHints: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/tmp/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: "/tmp/other-workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-3",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
],
|
||||
executionTargetIsRemote: true,
|
||||
executionCwd: "/remote/workspace",
|
||||
});
|
||||
|
||||
expect(shaped).toEqual({
|
||||
workspaceCwd: "/remote/workspace",
|
||||
workspaceWorktreePath: null,
|
||||
workspaceHints: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-3",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it("leaves local execution workspace paths unchanged", () => {
|
||||
const workspaceHints = [{ workspaceId: "workspace-1", cwd: "/tmp/workspace" }];
|
||||
const shaped = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: "/tmp/workspace",
|
||||
workspaceWorktreePath: "/tmp/worktree",
|
||||
workspaceHints,
|
||||
executionTargetIsRemote: false,
|
||||
executionCwd: "/remote/workspace",
|
||||
});
|
||||
|
||||
expect(shaped).toEqual({
|
||||
workspaceCwd: "/tmp/workspace",
|
||||
workspaceWorktreePath: "/tmp/worktree",
|
||||
workspaceHints,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("appendWithByteCap", () => {
|
||||
it("keeps valid UTF-8 when trimming through multibyte text", () => {
|
||||
const output = appendWithByteCap("prefix ", "hello — world", 7);
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { spawn, type ChildProcess } from "node:child_process";
|
||||
import { createHash, randomUUID } from "node:crypto";
|
||||
import { constants as fsConstants, promises as fs, type Dirent } from "node:fs";
|
||||
import path from "node:path";
|
||||
import { buildSshSpawnTarget, type SshRemoteExecutionSpec } from "./ssh.js";
|
||||
import { redactCommandText } from "./command-redaction.js";
|
||||
import type {
|
||||
AdapterSkillEntry,
|
||||
AdapterSkillSnapshot,
|
||||
@@ -76,10 +78,14 @@ export const MAX_CAPTURE_BYTES = 4 * 1024 * 1024;
|
||||
export const MAX_EXCERPT_BYTES = 32 * 1024;
|
||||
const TERMINAL_RESULT_SCAN_OVERLAP_CHARS = 64 * 1024;
|
||||
const SENSITIVE_ENV_KEY = /(key|token|secret|password|passwd|authorization|cookie)/i;
|
||||
const REDACTED_LOG_VALUE = "***REDACTED***";
|
||||
const PAPERCLIP_SKILL_ROOT_RELATIVE_CANDIDATES = [
|
||||
"../../skills",
|
||||
"../../../../../skills",
|
||||
];
|
||||
const MATERIALIZED_SKILL_SENTINEL = ".paperclip-materialized-skill.json";
|
||||
const MATERIALIZED_SKILL_LOCK_OWNER = "owner.json";
|
||||
const MATERIALIZED_SKILL_LOCK_STALE_MS = 30_000;
|
||||
|
||||
export const DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE = [
|
||||
"You are agent {{agent.id}} ({{agent.name}}). Continue your Paperclip work.",
|
||||
@@ -111,6 +117,11 @@ export interface InstalledSkillTarget {
|
||||
kind: "symlink" | "directory" | "file";
|
||||
}
|
||||
|
||||
export interface MaterializedPaperclipSkillCopyResult {
|
||||
copiedFiles: number;
|
||||
skippedSymlinks: string[];
|
||||
}
|
||||
|
||||
interface PersistentSkillSnapshotOptions {
|
||||
adapterType: string;
|
||||
availableEntries: PaperclipSkillEntry[];
|
||||
@@ -780,11 +791,15 @@ export function renderPaperclipWakePrompt(
|
||||
export function redactEnvForLogs(env: Record<string, string>): Record<string, string> {
|
||||
const redacted: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
redacted[key] = SENSITIVE_ENV_KEY.test(key) ? "***REDACTED***" : value;
|
||||
redacted[key] = SENSITIVE_ENV_KEY.test(key) ? REDACTED_LOG_VALUE : value;
|
||||
}
|
||||
return redacted;
|
||||
}
|
||||
|
||||
export function redactCommandTextForLogs(command: string): string {
|
||||
return redactCommandText(command, REDACTED_LOG_VALUE);
|
||||
}
|
||||
|
||||
export function buildInvocationEnvForLogs(
|
||||
env: Record<string, string>,
|
||||
options: {
|
||||
@@ -806,7 +821,7 @@ export function buildInvocationEnvForLogs(
|
||||
|
||||
const resolvedCommand = options.resolvedCommand?.trim();
|
||||
if (resolvedCommand) {
|
||||
merged[options.resolvedCommandEnvKey ?? "PAPERCLIP_RESOLVED_COMMAND"] = resolvedCommand;
|
||||
merged[options.resolvedCommandEnvKey ?? "PAPERCLIP_RESOLVED_COMMAND"] = redactCommandTextForLogs(resolvedCommand);
|
||||
}
|
||||
|
||||
return redactEnvForLogs(merged);
|
||||
@@ -870,6 +885,79 @@ export function applyPaperclipWorkspaceEnv(
|
||||
return env;
|
||||
}
|
||||
|
||||
export function shapePaperclipWorkspaceEnvForExecution(input: {
|
||||
workspaceCwd?: string | null;
|
||||
workspaceWorktreePath?: string | null;
|
||||
workspaceHints?: Array<Record<string, unknown>>;
|
||||
executionTargetIsRemote?: boolean;
|
||||
executionCwd?: string | null;
|
||||
}): {
|
||||
workspaceCwd: string | null;
|
||||
workspaceWorktreePath: string | null;
|
||||
workspaceHints: Array<Record<string, unknown>>;
|
||||
} {
|
||||
const workspaceCwd =
|
||||
typeof input.workspaceCwd === "string" && input.workspaceCwd.trim().length > 0
|
||||
? input.workspaceCwd.trim()
|
||||
: null;
|
||||
const workspaceWorktreePath =
|
||||
typeof input.workspaceWorktreePath === "string" && input.workspaceWorktreePath.trim().length > 0
|
||||
? input.workspaceWorktreePath.trim()
|
||||
: null;
|
||||
const workspaceHints = Array.isArray(input.workspaceHints) ? input.workspaceHints : [];
|
||||
|
||||
if (!input.executionTargetIsRemote) {
|
||||
return {
|
||||
workspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
};
|
||||
}
|
||||
|
||||
const executionCwd =
|
||||
typeof input.executionCwd === "string" && input.executionCwd.trim().length > 0
|
||||
? input.executionCwd.trim()
|
||||
: null;
|
||||
// On a remote target we must never fall back to the local workspaceCwd —
|
||||
// doing so leaks host paths into the remote env (the exact failure mode
|
||||
// this helper exists to prevent). Callers are expected to resolve
|
||||
// executionCwd via adapterExecutionTargetRemoteCwd before calling this
|
||||
// helper, which always returns a non-empty string. Surface a warning so
|
||||
// future callers don't silently regress to the leak.
|
||||
if (executionCwd === null) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.warn(
|
||||
"[paperclip] shapePaperclipWorkspaceEnvForExecution called with executionCwd=null on a remote target; " +
|
||||
"stripping workspaceCwd to avoid leaking local paths into the remote environment.",
|
||||
);
|
||||
}
|
||||
const realizedWorkspaceCwd = executionCwd;
|
||||
const localWorkspaceCwd = workspaceCwd ? path.resolve(workspaceCwd) : null;
|
||||
const shapedWorkspaceHints = workspaceHints.map((hint) => {
|
||||
const nextHint = { ...hint };
|
||||
const hintCwd = typeof nextHint.cwd === "string" ? nextHint.cwd.trim() : "";
|
||||
if (!hintCwd) return nextHint;
|
||||
|
||||
if (localWorkspaceCwd && path.resolve(hintCwd) === localWorkspaceCwd) {
|
||||
if (realizedWorkspaceCwd) {
|
||||
nextHint.cwd = realizedWorkspaceCwd;
|
||||
} else {
|
||||
delete nextHint.cwd;
|
||||
}
|
||||
return nextHint;
|
||||
}
|
||||
|
||||
delete nextHint.cwd;
|
||||
return nextHint;
|
||||
});
|
||||
|
||||
return {
|
||||
workspaceCwd: realizedWorkspaceCwd,
|
||||
workspaceWorktreePath: null,
|
||||
workspaceHints: shapedWorkspaceHints,
|
||||
};
|
||||
}
|
||||
|
||||
export function sanitizeInheritedPaperclipEnv(baseEnv: NodeJS.ProcessEnv): NodeJS.ProcessEnv {
|
||||
const env: NodeJS.ProcessEnv = { ...baseEnv };
|
||||
for (const key of Object.keys(env)) {
|
||||
@@ -951,6 +1039,56 @@ function quoteForCmd(arg: string) {
|
||||
return /[\s"&<>|^()]/.test(escaped) ? `"${escaped}"` : escaped;
|
||||
}
|
||||
|
||||
const SSH_REMOTE_ENV_IDENTITY_KEYS = new Set([
|
||||
"PATH",
|
||||
"HOME",
|
||||
"PWD",
|
||||
"SHELL",
|
||||
"USER",
|
||||
"LOGNAME",
|
||||
"NVM_DIR",
|
||||
"TMPDIR",
|
||||
"TMP",
|
||||
"TEMP",
|
||||
"XDG_CONFIG_HOME",
|
||||
"XDG_CACHE_HOME",
|
||||
"XDG_DATA_HOME",
|
||||
"XDG_STATE_HOME",
|
||||
"XDG_RUNTIME_DIR",
|
||||
]);
|
||||
|
||||
function readEnvValueCaseInsensitive(env: NodeJS.ProcessEnv, key: string): string | undefined {
|
||||
const direct = env[key];
|
||||
if (typeof direct === "string") return direct;
|
||||
const upper = key.toUpperCase();
|
||||
for (const [candidateKey, candidateValue] of Object.entries(env)) {
|
||||
if (candidateKey.toUpperCase() === upper && typeof candidateValue === "string") {
|
||||
return candidateValue;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function sanitizeSshRemoteEnv(
|
||||
env: Record<string, string>,
|
||||
inheritedEnv: NodeJS.ProcessEnv = process.env,
|
||||
): Record<string, string> {
|
||||
const sanitized: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
const normalizedKey = key.toUpperCase();
|
||||
if (!SSH_REMOTE_ENV_IDENTITY_KEYS.has(normalizedKey)) {
|
||||
sanitized[key] = value;
|
||||
continue;
|
||||
}
|
||||
const inheritedValue = readEnvValueCaseInsensitive(inheritedEnv, key);
|
||||
if (typeof inheritedValue === "string" && inheritedValue === value) {
|
||||
continue;
|
||||
}
|
||||
sanitized[key] = value;
|
||||
}
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
function resolveWindowsCmdShell(env: NodeJS.ProcessEnv): string {
|
||||
const fallbackRoot = env.SystemRoot || process.env.SystemRoot || "C:\\Windows";
|
||||
return path.join(fallbackRoot, "System32", "cmd.exe");
|
||||
@@ -976,9 +1114,9 @@ async function resolveSpawnTarget(
|
||||
spec: remote,
|
||||
command,
|
||||
args,
|
||||
env: Object.fromEntries(
|
||||
env: sanitizeSshRemoteEnv(Object.fromEntries(
|
||||
Object.entries(options.remoteEnv ?? {}).filter((entry): entry is [string, string] => typeof entry[1] === "string"),
|
||||
),
|
||||
)),
|
||||
});
|
||||
return {
|
||||
command: sshResolved,
|
||||
@@ -1395,6 +1533,190 @@ export async function ensurePaperclipSkillSymlink(
|
||||
return "repaired";
|
||||
}
|
||||
|
||||
async function hashSkillDirectory(root: string): Promise<string> {
|
||||
const hash = createHash("sha256");
|
||||
|
||||
async function visit(candidate: string, relativePath: string): Promise<void> {
|
||||
const stat = await fs.lstat(candidate);
|
||||
if (stat.isSymbolicLink()) {
|
||||
hash.update(`symlink:${relativePath}\n`);
|
||||
return;
|
||||
}
|
||||
if (stat.isDirectory()) {
|
||||
hash.update(`dir:${relativePath}\n`);
|
||||
const entries = await fs.readdir(candidate, { withFileTypes: true });
|
||||
entries.sort((left, right) => left.name.localeCompare(right.name));
|
||||
for (const entry of entries) {
|
||||
const childRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
||||
await visit(path.join(candidate, entry.name), childRelativePath);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (stat.isFile()) {
|
||||
hash.update(`file:${relativePath}:${stat.mode}\n`);
|
||||
hash.update(await fs.readFile(candidate));
|
||||
hash.update("\n");
|
||||
return;
|
||||
}
|
||||
hash.update(`other:${relativePath}:${stat.mode}\n`);
|
||||
}
|
||||
|
||||
await visit(root, "");
|
||||
return hash.digest("hex");
|
||||
}
|
||||
|
||||
async function materializedSkillFingerprintMatches(targetRoot: string, sourceFingerprint: string): Promise<boolean> {
|
||||
try {
|
||||
const raw = JSON.parse(await fs.readFile(path.join(targetRoot, MATERIALIZED_SKILL_SENTINEL), "utf8")) as unknown;
|
||||
const parsed = parseObject(raw);
|
||||
return parsed.version === 1 && parsed.sourceFingerprint === sourceFingerprint;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function acquireMaterializeLock(lockDir: string): Promise<() => Promise<void>> {
|
||||
await fs.mkdir(path.dirname(lockDir), { recursive: true });
|
||||
const deadline = Date.now() + MATERIALIZED_SKILL_LOCK_STALE_MS;
|
||||
while (true) {
|
||||
try {
|
||||
await fs.mkdir(lockDir);
|
||||
await fs.writeFile(
|
||||
path.join(lockDir, MATERIALIZED_SKILL_LOCK_OWNER),
|
||||
`${JSON.stringify({ pid: process.pid, createdAt: new Date().toISOString() })}\n`,
|
||||
"utf8",
|
||||
);
|
||||
return async () => {
|
||||
await fs.rm(lockDir, { recursive: true, force: true });
|
||||
};
|
||||
} catch (err) {
|
||||
const code = err && typeof err === "object" ? (err as { code?: unknown }).code : null;
|
||||
if (code !== "EEXIST") throw err;
|
||||
if (await removeStaleMaterializeLock(lockDir, MATERIALIZED_SKILL_LOCK_STALE_MS)) continue;
|
||||
if (Date.now() >= deadline) {
|
||||
throw new Error(`Timed out waiting for Paperclip skill materialization lock at ${lockDir}`);
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function isPidAlive(pid: number): boolean {
|
||||
if (!Number.isInteger(pid) || pid <= 0) return false;
|
||||
try {
|
||||
process.kill(pid, 0);
|
||||
return true;
|
||||
} catch (err) {
|
||||
const code = err && typeof err === "object" ? (err as { code?: unknown }).code : null;
|
||||
return code === "EPERM";
|
||||
}
|
||||
}
|
||||
|
||||
async function removeStaleMaterializeLock(lockDir: string, staleMs: number): Promise<boolean> {
|
||||
const ownerPath = path.join(lockDir, MATERIALIZED_SKILL_LOCK_OWNER);
|
||||
let shouldRemove = false;
|
||||
try {
|
||||
const raw = JSON.parse(await fs.readFile(ownerPath, "utf8")) as unknown;
|
||||
const owner = parseObject(raw);
|
||||
const pid = typeof owner.pid === "number" ? owner.pid : 0;
|
||||
const createdAt = typeof owner.createdAt === "string" ? Date.parse(owner.createdAt) : Number.NaN;
|
||||
const ageMs = Number.isFinite(createdAt) ? Date.now() - createdAt : staleMs + 1;
|
||||
shouldRemove = !isPidAlive(pid) || ageMs > staleMs;
|
||||
} catch {
|
||||
const stat = await fs.stat(lockDir).catch(() => null);
|
||||
shouldRemove = !stat || Date.now() - stat.mtimeMs > staleMs;
|
||||
}
|
||||
if (!shouldRemove) return false;
|
||||
await fs.rm(lockDir, { recursive: true, force: true }).catch(() => {});
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function materializePaperclipSkillCopy(
|
||||
source: string,
|
||||
target: string,
|
||||
): Promise<MaterializedPaperclipSkillCopyResult> {
|
||||
const sourceRoot = path.resolve(source);
|
||||
const targetRoot = path.resolve(target);
|
||||
const relativeTarget = path.relative(sourceRoot, targetRoot);
|
||||
const relativeSource = path.relative(targetRoot, sourceRoot);
|
||||
if (
|
||||
!relativeTarget ||
|
||||
(!relativeTarget.startsWith("..") && !path.isAbsolute(relativeTarget)) ||
|
||||
!relativeSource ||
|
||||
(!relativeSource.startsWith("..") && !path.isAbsolute(relativeSource))
|
||||
) {
|
||||
throw new Error("Refusing to materialize a skill into itself, an ancestor, or one of its descendants.");
|
||||
}
|
||||
|
||||
const rootStat = await fs.lstat(sourceRoot);
|
||||
if (rootStat.isSymbolicLink()) {
|
||||
throw new Error("Refusing to materialize a skill root that is itself a symlink.");
|
||||
}
|
||||
if (!rootStat.isDirectory()) {
|
||||
throw new Error("Paperclip skills must be directories.");
|
||||
}
|
||||
|
||||
const result: MaterializedPaperclipSkillCopyResult = {
|
||||
copiedFiles: 0,
|
||||
skippedSymlinks: [],
|
||||
};
|
||||
|
||||
const lockDir = `${targetRoot}.lock`;
|
||||
const releaseLock = await acquireMaterializeLock(lockDir);
|
||||
const tempRoot = `${targetRoot}.tmp-${process.pid}-${randomUUID()}`;
|
||||
|
||||
async function copyEntry(sourcePath: string, targetPath: string, relativePath: string): Promise<void> {
|
||||
const stat = await fs.lstat(sourcePath);
|
||||
if (stat.isSymbolicLink()) {
|
||||
result.skippedSymlinks.push(relativePath || ".");
|
||||
return;
|
||||
}
|
||||
|
||||
if (stat.isDirectory()) {
|
||||
await fs.mkdir(targetPath, { recursive: true });
|
||||
const entries = await fs.readdir(sourcePath, { withFileTypes: true });
|
||||
entries.sort((left, right) => left.name.localeCompare(right.name));
|
||||
for (const entry of entries) {
|
||||
const childRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
||||
await copyEntry(path.join(sourcePath, entry.name), path.join(targetPath, entry.name), childRelativePath);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (stat.isFile()) {
|
||||
await fs.mkdir(path.dirname(targetPath), { recursive: true });
|
||||
await fs.copyFile(sourcePath, targetPath, fsConstants.COPYFILE_FICLONE).catch(async () => {
|
||||
await fs.copyFile(sourcePath, targetPath);
|
||||
});
|
||||
await fs.chmod(targetPath, stat.mode).catch(() => {});
|
||||
result.copiedFiles += 1;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const sourceFingerprint = await hashSkillDirectory(sourceRoot);
|
||||
if (await materializedSkillFingerprintMatches(targetRoot, sourceFingerprint)) return result;
|
||||
await copyEntry(sourceRoot, tempRoot, "");
|
||||
await fs.writeFile(
|
||||
path.join(tempRoot, MATERIALIZED_SKILL_SENTINEL),
|
||||
`${JSON.stringify({
|
||||
version: 1,
|
||||
sourceFingerprint,
|
||||
copiedFiles: result.copiedFiles,
|
||||
skippedSymlinks: result.skippedSymlinks,
|
||||
}, null, 2)}\n`,
|
||||
"utf8",
|
||||
);
|
||||
if (await materializedSkillFingerprintMatches(targetRoot, sourceFingerprint)) return result;
|
||||
await fs.rm(targetRoot, { recursive: true, force: true });
|
||||
await fs.rename(tempRoot, targetRoot);
|
||||
return result;
|
||||
} finally {
|
||||
await fs.rm(tempRoot, { recursive: true, force: true }).catch(() => {});
|
||||
await releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
export async function removeMaintainerOnlySkillSymlinks(
|
||||
skillsHome: string,
|
||||
allowedSkillNames: Iterable<string>,
|
||||
|
||||
@@ -37,6 +37,7 @@ const ADAPTER_MANAGED_SESSION_POLICY: SessionCompactionPolicy = {
|
||||
};
|
||||
|
||||
export const LEGACY_SESSIONED_ADAPTER_TYPES = new Set([
|
||||
"acpx_local",
|
||||
"claude_local",
|
||||
"codex_local",
|
||||
"cursor",
|
||||
@@ -47,6 +48,11 @@ export const LEGACY_SESSIONED_ADAPTER_TYPES = new Set([
|
||||
]);
|
||||
|
||||
export const ADAPTER_SESSION_MANAGEMENT: Record<string, AdapterSessionManagement> = {
|
||||
acpx_local: {
|
||||
supportsSessionResume: true,
|
||||
nativeContextManagement: "confirmed",
|
||||
defaultSessionCompaction: ADAPTER_MANAGED_SESSION_POLICY,
|
||||
},
|
||||
claude_local: {
|
||||
supportsSessionResume: true,
|
||||
nativeContextManagement: "confirmed",
|
||||
|
||||
@@ -3,6 +3,8 @@ import { constants as fsConstants, createReadStream, createWriteStream, promises
|
||||
import net from "node:net";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { CommandManagedRuntimeRunner } from "./command-managed-runtime.js";
|
||||
import type { RunProcessResult } from "./server-utils.js";
|
||||
|
||||
export interface SshConnectionConfig {
|
||||
host: string;
|
||||
@@ -21,7 +23,86 @@ export interface SshCommandResult {
|
||||
|
||||
export interface SshRemoteExecutionSpec extends SshConnectionConfig {
|
||||
remoteCwd: string;
|
||||
paperclipApiUrl?: string | null;
|
||||
}
|
||||
|
||||
export function createSshCommandManagedRuntimeRunner(input: {
|
||||
spec: SshRemoteExecutionSpec;
|
||||
defaultCwd?: string | null;
|
||||
maxBufferBytes?: number | null;
|
||||
}): CommandManagedRuntimeRunner {
|
||||
const defaultCwd = input.defaultCwd?.trim() || input.spec.remoteCwd;
|
||||
const maxBufferBytes =
|
||||
typeof input.maxBufferBytes === "number" && Number.isFinite(input.maxBufferBytes) && input.maxBufferBytes > 0
|
||||
? Math.trunc(input.maxBufferBytes)
|
||||
: 1024 * 1024;
|
||||
|
||||
return {
|
||||
execute: async (commandInput): Promise<RunProcessResult> => {
|
||||
const startedAt = new Date().toISOString();
|
||||
const command = commandInput.command.trim();
|
||||
const args = commandInput.args ?? [];
|
||||
const cwd = commandInput.cwd?.trim() || defaultCwd;
|
||||
const envEntries = Object.entries(commandInput.env ?? {})
|
||||
.filter((entry): entry is [string, string] => typeof entry[1] === "string");
|
||||
const envPrefix = envEntries.length > 0
|
||||
? `env ${envEntries.map(([key, value]) => `${key}=${shellQuote(value)}`).join(" ")} `
|
||||
: "";
|
||||
const exportPrefix = envEntries.length > 0
|
||||
? envEntries.map(([key, value]) => `export ${key}=${shellQuote(value)};`).join(" ") + " "
|
||||
: "";
|
||||
const commandScript = command === "sh" || command === "bash"
|
||||
? args[0] === "-lc" && typeof args[1] === "string"
|
||||
? `${exportPrefix}${args[1]}`
|
||||
: `${envPrefix}exec ${[shellQuote(command), ...args.map((arg) => shellQuote(arg))].join(" ")}`
|
||||
: `${envPrefix}exec ${[shellQuote(command), ...args.map((arg) => shellQuote(arg))].join(" ")}`;
|
||||
const remoteCommand = `${command === "bash" ? "bash" : "sh"} -lc ${
|
||||
shellQuote(`cd ${shellQuote(cwd)} && ${commandScript}`)
|
||||
}`;
|
||||
|
||||
try {
|
||||
const result = await runSshCommand(input.spec, remoteCommand, {
|
||||
timeoutMs: commandInput.timeoutMs,
|
||||
maxBuffer: maxBufferBytes,
|
||||
});
|
||||
if (result.stdout) await commandInput.onLog?.("stdout", result.stdout);
|
||||
if (result.stderr) await commandInput.onLog?.("stderr", result.stderr);
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
} catch (error) {
|
||||
const failure = error as {
|
||||
stdout?: unknown;
|
||||
stderr?: unknown;
|
||||
code?: unknown;
|
||||
signal?: unknown;
|
||||
killed?: unknown;
|
||||
};
|
||||
const stdout = typeof failure.stdout === "string" ? failure.stdout : "";
|
||||
const stderr = typeof failure.stderr === "string"
|
||||
? failure.stderr
|
||||
: error instanceof Error
|
||||
? error.message
|
||||
: String(error);
|
||||
if (stdout) await commandInput.onLog?.("stdout", stdout);
|
||||
if (stderr) await commandInput.onLog?.("stderr", stderr);
|
||||
return {
|
||||
exitCode: typeof failure.code === "number" ? failure.code : null,
|
||||
signal: typeof failure.signal === "string" ? failure.signal : null,
|
||||
timedOut: failure.killed === true,
|
||||
stdout,
|
||||
stderr,
|
||||
pid: null,
|
||||
startedAt,
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export interface SshEnvLabSupport {
|
||||
@@ -83,10 +164,6 @@ export function parseSshRemoteExecutionSpec(value: unknown): SshRemoteExecutionS
|
||||
port: portValue,
|
||||
username,
|
||||
remoteCwd,
|
||||
paperclipApiUrl:
|
||||
typeof parsed.paperclipApiUrl === "string" && parsed.paperclipApiUrl.trim().length > 0
|
||||
? parsed.paperclipApiUrl.trim()
|
||||
: null,
|
||||
remoteWorkspacePath:
|
||||
typeof parsed.remoteWorkspacePath === "string" && parsed.remoteWorkspacePath.trim().length > 0
|
||||
? parsed.remoteWorkspacePath.trim()
|
||||
@@ -98,50 +175,6 @@ export function parseSshRemoteExecutionSpec(value: unknown): SshRemoteExecutionS
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeHttpUrlCandidate(value: string): string | null {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return null;
|
||||
try {
|
||||
const parsed = new URL(trimmed);
|
||||
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
|
||||
return null;
|
||||
}
|
||||
return parsed.origin;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function findReachablePaperclipApiUrlOverSsh(input: {
|
||||
config: SshConnectionConfig;
|
||||
candidates: string[];
|
||||
timeoutMs?: number;
|
||||
}): Promise<string | null> {
|
||||
const uniqueCandidates = Array.from(
|
||||
new Set(
|
||||
input.candidates
|
||||
.map((candidate) => normalizeHttpUrlCandidate(candidate))
|
||||
.filter((candidate): candidate is string => candidate !== null),
|
||||
),
|
||||
);
|
||||
|
||||
for (const candidate of uniqueCandidates) {
|
||||
const healthUrl = new URL("/api/health", candidate).toString();
|
||||
try {
|
||||
await runSshCommand(
|
||||
input.config,
|
||||
`sh -lc ${shellQuote(`curl -fsS -m ${Math.max(1, Math.ceil((input.timeoutMs ?? 5_000) / 1000))} ${shellQuote(healthUrl)} >/dev/null`)}`,
|
||||
{ timeoutMs: input.timeoutMs ?? 5_000 },
|
||||
);
|
||||
return candidate;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async function execFileText(
|
||||
file: string,
|
||||
args: string[],
|
||||
|
||||
@@ -125,6 +125,7 @@ export interface AdapterExecutionContext {
|
||||
runtime: AdapterRuntime;
|
||||
config: Record<string, unknown>;
|
||||
context: Record<string, unknown>;
|
||||
runtimeCommandSpec?: AdapterRuntimeCommandSpec | null;
|
||||
executionTarget?: AdapterExecutionTarget | null;
|
||||
/**
|
||||
* Legacy remote transport view. Prefer `executionTarget`, which is the
|
||||
@@ -144,6 +145,16 @@ export interface AdapterModel {
|
||||
label: string;
|
||||
}
|
||||
|
||||
export type AdapterModelProfileKey = "cheap";
|
||||
|
||||
export interface AdapterModelProfileDefinition {
|
||||
key: AdapterModelProfileKey;
|
||||
label: string;
|
||||
description?: string;
|
||||
adapterConfig: Record<string, unknown>;
|
||||
source?: "adapter_default" | "discovered";
|
||||
}
|
||||
|
||||
export type AdapterEnvironmentCheckLevel = "info" | "warn" | "error";
|
||||
|
||||
export interface AdapterEnvironmentCheck {
|
||||
@@ -318,6 +329,23 @@ export interface AdapterConfigSchema {
|
||||
fields: ConfigFieldSchema[];
|
||||
}
|
||||
|
||||
export interface AdapterRuntimeCommandSpec {
|
||||
/**
|
||||
* The command Paperclip should execute for this adapter in the current config.
|
||||
*/
|
||||
command: string;
|
||||
/**
|
||||
* Optional command name/path to probe for availability before launch.
|
||||
* Defaults to `command` when omitted by the consumer.
|
||||
*/
|
||||
detectCommand?: string | null;
|
||||
/**
|
||||
* Optional shell snippet that can install or expose the adapter command in a
|
||||
* fresh remote runtime. It should be idempotent.
|
||||
*/
|
||||
installCommand?: string | null;
|
||||
}
|
||||
|
||||
export interface ServerAdapterModule {
|
||||
type: string;
|
||||
execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult>;
|
||||
@@ -329,6 +357,8 @@ export interface ServerAdapterModule {
|
||||
supportsLocalAgentJwt?: boolean;
|
||||
models?: AdapterModel[];
|
||||
listModels?: () => Promise<AdapterModel[]>;
|
||||
modelProfiles?: AdapterModelProfileDefinition[];
|
||||
listModelProfiles?: () => Promise<AdapterModelProfileDefinition[]>;
|
||||
/**
|
||||
* Optional explicit refresh hook for model discovery.
|
||||
* Use this when the adapter caches discovered models and needs a bypass path
|
||||
@@ -394,6 +424,11 @@ export interface ServerAdapterModule {
|
||||
* rather than reading config.paperclipRuntimeSkills.
|
||||
*/
|
||||
requiresMaterializedRuntimeSkills?: boolean;
|
||||
/**
|
||||
* Optional: describe how this adapter's runtime command should be launched
|
||||
* and provisioned in fresh remote environments such as sandboxes.
|
||||
*/
|
||||
getRuntimeCommandSpec?: (config: Record<string, unknown>) => AdapterRuntimeCommandSpec | null;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -435,6 +470,14 @@ export interface CreateConfigValues {
|
||||
promptTemplate: string;
|
||||
model: string;
|
||||
thinkingEffort: string;
|
||||
/**
|
||||
* Optional cheap model profile config for new agents on adapters that
|
||||
* support model profiles. Persisted under
|
||||
* `runtimeConfig.modelProfiles.cheap.adapterConfig`, never on the primary
|
||||
* `adapterConfig`.
|
||||
*/
|
||||
cheapModel?: string;
|
||||
cheapModelEnabled?: boolean;
|
||||
chrome: boolean;
|
||||
dangerouslySkipPermissions: boolean;
|
||||
search: boolean;
|
||||
|
||||
64
packages/adapters/acpx-local/package.json
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-acpx-local",
|
||||
"version": "0.3.1",
|
||||
"license": "MIT",
|
||||
"homepage": "https://github.com/paperclipai/paperclip",
|
||||
"bugs": {
|
||||
"url": "https://github.com/paperclipai/paperclip/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/paperclipai/paperclip",
|
||||
"directory": "packages/adapters/acpx-local"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
"./server": "./src/server/index.ts",
|
||||
"./ui": "./src/ui/index.ts",
|
||||
"./cli": "./src/cli/index.ts"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
},
|
||||
"./server": {
|
||||
"types": "./dist/server/index.d.ts",
|
||||
"import": "./dist/server/index.js"
|
||||
},
|
||||
"./ui": {
|
||||
"types": "./dist/ui/index.d.ts",
|
||||
"import": "./dist/ui/index.js"
|
||||
},
|
||||
"./cli": {
|
||||
"types": "./dist/cli/index.d.ts",
|
||||
"import": "./dist/cli/index.js"
|
||||
}
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"skills"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@agentclientprotocol/claude-agent-acp": "^0.31.4",
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"@zed-industries/codex-acp": "^0.12.0",
|
||||
"acpx": "^0.6.1",
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
121
packages/adapters/acpx-local/src/cli/format-event.test.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { printAcpxStreamEvent } from "./format-event.js";
|
||||
|
||||
function emit(payload: Record<string, unknown>): string {
|
||||
return JSON.stringify(payload);
|
||||
}
|
||||
|
||||
interface CapturedOutput {
|
||||
log: string[];
|
||||
stdout: string[];
|
||||
}
|
||||
|
||||
function captureOutput(): { capture: CapturedOutput; restore: () => void } {
|
||||
const log: string[] = [];
|
||||
const stdout: string[] = [];
|
||||
const logSpy = vi.spyOn(console, "log").mockImplementation((value?: unknown) => {
|
||||
log.push(String(value ?? ""));
|
||||
});
|
||||
const stdoutSpy = vi.spyOn(process.stdout, "write").mockImplementation(((chunk: unknown) => {
|
||||
stdout.push(String(chunk ?? ""));
|
||||
return true;
|
||||
}) as typeof process.stdout.write);
|
||||
return {
|
||||
capture: { log, stdout },
|
||||
restore: () => {
|
||||
logSpy.mockRestore();
|
||||
stdoutSpy.mockRestore();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function strip(value: string): string {
|
||||
return value.replace(/\x1b\[[0-9;]*m/g, "");
|
||||
}
|
||||
|
||||
describe("printAcpxStreamEvent", () => {
|
||||
let captured: CapturedOutput;
|
||||
let restore: () => void;
|
||||
|
||||
beforeEach(() => {
|
||||
const result = captureOutput();
|
||||
captured = result.capture;
|
||||
restore = result.restore;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
restore();
|
||||
});
|
||||
|
||||
it("renders acpx.session as a labeled session header", () => {
|
||||
printAcpxStreamEvent(
|
||||
emit({
|
||||
type: "acpx.session",
|
||||
agent: "claude",
|
||||
acpSessionId: "acp-1",
|
||||
mode: "persistent",
|
||||
permissionMode: "approve-all",
|
||||
}),
|
||||
false,
|
||||
);
|
||||
expect(captured.log.map(strip)).toEqual(["claude session: acp-1 [persistent / approve-all]"]);
|
||||
});
|
||||
|
||||
it("streams output text_delta to stdout for live progress", () => {
|
||||
printAcpxStreamEvent(
|
||||
emit({ type: "acpx.text_delta", text: "hello", channel: "output" }),
|
||||
false,
|
||||
);
|
||||
expect(captured.log).toEqual([]);
|
||||
expect(captured.stdout.map(strip)).toEqual(["hello"]);
|
||||
});
|
||||
|
||||
it("renders thought text_delta on its own line", () => {
|
||||
printAcpxStreamEvent(
|
||||
emit({ type: "acpx.text_delta", text: "thinking…", channel: "thought" }),
|
||||
false,
|
||||
);
|
||||
expect(captured.log.map(strip)).toEqual(["thinking…"]);
|
||||
});
|
||||
|
||||
it("renders tool_call with status and id", () => {
|
||||
printAcpxStreamEvent(
|
||||
emit({
|
||||
type: "acpx.tool_call",
|
||||
name: "read",
|
||||
toolCallId: "tool-1",
|
||||
status: "running",
|
||||
text: "read README.md",
|
||||
}),
|
||||
false,
|
||||
);
|
||||
expect(captured.log.map(strip)).toEqual([
|
||||
"tool_call: read [running] (tool-1)",
|
||||
"read README.md",
|
||||
]);
|
||||
});
|
||||
|
||||
it("renders status events with optional context window", () => {
|
||||
printAcpxStreamEvent(
|
||||
emit({ type: "acpx.status", tag: "context_window", used: 100, size: 200000 }),
|
||||
false,
|
||||
);
|
||||
expect(captured.log.map(strip)).toEqual(["status: context_window (100/200000 ctx)"]);
|
||||
});
|
||||
|
||||
it("renders acpx.result and acpx.error", () => {
|
||||
printAcpxStreamEvent(emit({ type: "acpx.result", summary: "completed", stopReason: "end_turn" }), false);
|
||||
printAcpxStreamEvent(emit({ type: "acpx.error", message: "auth required" }), false);
|
||||
expect(captured.log.map(strip)).toEqual(["result: completed", "error: auth required"]);
|
||||
});
|
||||
|
||||
it("falls back to plain output for non-JSON lines", () => {
|
||||
printAcpxStreamEvent("not json", false);
|
||||
expect(captured.log).toEqual(["not json"]);
|
||||
});
|
||||
|
||||
it("still emits unknown / non-JSON lines when debug is enabled", () => {
|
||||
printAcpxStreamEvent("not json", true);
|
||||
expect(strip(captured.log[0])).toBe("not json");
|
||||
});
|
||||
});
|
||||
121
packages/adapters/acpx-local/src/cli/format-event.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import pc from "picocolors";
|
||||
|
||||
function parseJson(line: string): Record<string, unknown> | null {
|
||||
try {
|
||||
const parsed = JSON.parse(line);
|
||||
if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) return null;
|
||||
return parsed as Record<string, unknown>;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function asString(value: unknown, fallback = ""): string {
|
||||
return typeof value === "string" ? value : fallback;
|
||||
}
|
||||
|
||||
function asNumber(value: unknown, fallback = 0): number {
|
||||
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
|
||||
}
|
||||
|
||||
function stringify(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
if (value === null || value === undefined) return "";
|
||||
try {
|
||||
return JSON.stringify(value, null, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
function pickToolUseId(parsed: Record<string, unknown>): string {
|
||||
return (
|
||||
asString(parsed.toolCallId) ||
|
||||
asString(parsed.toolUseId) ||
|
||||
asString(parsed.id)
|
||||
);
|
||||
}
|
||||
|
||||
function statusLine(parsed: Record<string, unknown>): string {
|
||||
const text = asString(parsed.text).trim();
|
||||
const tag = asString(parsed.tag).trim();
|
||||
const used = asNumber(parsed.used, -1);
|
||||
const size = asNumber(parsed.size, -1);
|
||||
const parts: string[] = [];
|
||||
if (text) parts.push(text);
|
||||
if (tag && !text) parts.push(tag);
|
||||
if (used >= 0 && size > 0) parts.push(`(${used}/${size} ctx)`);
|
||||
return parts.join(" ") || tag || "status";
|
||||
}
|
||||
|
||||
export function printAcpxStreamEvent(raw: string, debug: boolean): void {
|
||||
const line = raw.trim();
|
||||
if (!line) return;
|
||||
const parsed = parseJson(line);
|
||||
if (!parsed) {
|
||||
if (debug) console.log(pc.gray(line));
|
||||
else console.log(line);
|
||||
return;
|
||||
}
|
||||
|
||||
const type = asString(parsed.type);
|
||||
if (type === "acpx.session") {
|
||||
const agent = asString(parsed.agent, "acpx");
|
||||
const session =
|
||||
asString(parsed.acpSessionId) ||
|
||||
asString(parsed.sessionId) ||
|
||||
asString(parsed.runtimeSessionName);
|
||||
const mode = asString(parsed.mode);
|
||||
const permissionMode = asString(parsed.permissionMode);
|
||||
const tail = [mode, permissionMode].filter(Boolean).join(" / ");
|
||||
const suffix = tail ? ` [${tail}]` : "";
|
||||
console.log(pc.blue(`${agent} session${session ? `: ${session}` : ""}${suffix}`));
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.text_delta") {
|
||||
const text = asString(parsed.text);
|
||||
if (!text) return;
|
||||
const channel = asString(parsed.channel) || asString(parsed.stream);
|
||||
const isThought = channel === "thought" || channel === "thinking";
|
||||
if (isThought) console.log(pc.gray(text));
|
||||
else process.stdout.write(pc.green(text));
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.tool_call") {
|
||||
const name = asString(parsed.name, "acp_tool");
|
||||
const status = asString(parsed.status);
|
||||
const id = pickToolUseId(parsed);
|
||||
const header = status ? `tool_call: ${name} [${status}]` : `tool_call: ${name}`;
|
||||
const idSuffix = id ? ` (${id})` : "";
|
||||
const isError = status === "failed" || status === "cancelled";
|
||||
console.log((isError ? pc.red : pc.yellow)(`${header}${idSuffix}`));
|
||||
if (parsed.input !== undefined) {
|
||||
console.log(pc.gray(stringify(parsed.input)));
|
||||
} else {
|
||||
const text = asString(parsed.text).trim();
|
||||
if (text) console.log(pc.gray(text));
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.tool_result") {
|
||||
const isError = parsed.isError === true || parsed.error !== undefined;
|
||||
console.log((isError ? pc.red : pc.cyan)(`tool_result: ${asString(parsed.name, "acp_tool")}`));
|
||||
const content = stringify(parsed.content ?? parsed.output ?? parsed.error);
|
||||
if (content) console.log((isError ? pc.red : pc.gray)(content));
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.status") {
|
||||
console.log(pc.gray(`status: ${statusLine(parsed)}`));
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.result") {
|
||||
const summary = asString(parsed.summary, asString(parsed.stopReason, asString(parsed.subtype, "complete")));
|
||||
console.log(pc.blue(`result: ${summary}`));
|
||||
return;
|
||||
}
|
||||
if (type === "acpx.error") {
|
||||
console.log(pc.red(`error: ${asString(parsed.message, line)}`));
|
||||
return;
|
||||
}
|
||||
console.log(debug ? pc.gray(line) : line);
|
||||
}
|
||||
1
packages/adapters/acpx-local/src/cli/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { printAcpxStreamEvent } from "./format-event.js";
|
||||
47
packages/adapters/acpx-local/src/index.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
export const type = "acpx_local";
|
||||
export const label = "ACPX (local)";
|
||||
|
||||
export const DEFAULT_ACPX_LOCAL_AGENT = "claude";
|
||||
export const DEFAULT_ACPX_LOCAL_MODE = "persistent";
|
||||
export const DEFAULT_ACPX_LOCAL_PERMISSION_MODE = "approve-all";
|
||||
export const DEFAULT_ACPX_LOCAL_NON_INTERACTIVE_PERMISSIONS = "deny";
|
||||
export const DEFAULT_ACPX_LOCAL_TIMEOUT_SEC = 0;
|
||||
|
||||
export const acpxAgentOptions = [
|
||||
{ id: "claude", label: "Claude via ACPX" },
|
||||
{ id: "codex", label: "Codex via ACPX" },
|
||||
{ id: "custom", label: "Custom ACP command" },
|
||||
] as const;
|
||||
|
||||
export const agentConfigurationDoc = `# acpx_local agent configuration
|
||||
|
||||
Adapter: acpx_local
|
||||
|
||||
Use when:
|
||||
- The agent should run through Agent Client Protocol via ACPX on the Paperclip host or a managed execution environment.
|
||||
- You want one built-in adapter that can target Claude, Codex, or a custom ACP server command.
|
||||
- You need Paperclip-managed session identity and live streamed ACP events in later ACPX runtime phases.
|
||||
|
||||
Don't use when:
|
||||
- You need today's stable Claude Code or Codex CLI wrapper behavior. Use claude_local or codex_local until acpx_local runtime execution is enabled.
|
||||
- The host cannot satisfy ACPX's Node >=22.12.0 prerequisite.
|
||||
- The agent runtime is not an ACP server and cannot be launched through ACPX.
|
||||
|
||||
Core fields:
|
||||
- agent (string, optional): claude, codex, or custom. Defaults to claude.
|
||||
- agentCommand (string, optional): custom ACP command when agent=custom, or an override for a built-in ACP agent command.
|
||||
- mode (string, optional): persistent or oneshot. Defaults to persistent.
|
||||
- cwd (string, optional): default absolute working directory fallback for the agent process.
|
||||
- permissionMode (string, optional): defaults to approve-all, meaning ACPX permission requests are auto-approved.
|
||||
- nonInteractivePermissions (string, optional): fallback behavior when ACPX cannot ask interactively. Supported values are deny and fail.
|
||||
- stateDir (string, optional): ACPX state directory. Defaults to a Paperclip-managed company/agent scoped location.
|
||||
- instructionsFilePath (string, optional): absolute path to a markdown instructions file used by Paperclip prompt construction.
|
||||
- promptTemplate (string, optional): run prompt template.
|
||||
- bootstrapPromptTemplate (string, optional): first-run bootstrap prompt template.
|
||||
- timeoutSec (number, optional): run timeout in seconds. Defaults to 0, meaning no adapter timeout.
|
||||
- env (object, optional): KEY=VALUE environment variables or secret bindings.
|
||||
|
||||
Dependency decision:
|
||||
- acpx_local declares direct dependencies on acpx, @agentclientprotocol/claude-agent-acp, and @zed-industries/codex-acp so the built-in adapter has deterministic package resolution instead of relying on globally installed ACP commands.
|
||||
- ACPX currently requires Node >=22.12.0. Paperclip keeps the repo-wide Node >=20 engine and surfaces the stricter runtime prerequisite through acpx_local diagnostics.
|
||||
`;
|
||||
102
packages/adapters/acpx-local/src/server/config-schema.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import type { AdapterConfigSchema } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
DEFAULT_ACPX_LOCAL_AGENT,
|
||||
DEFAULT_ACPX_LOCAL_MODE,
|
||||
DEFAULT_ACPX_LOCAL_NON_INTERACTIVE_PERMISSIONS,
|
||||
DEFAULT_ACPX_LOCAL_PERMISSION_MODE,
|
||||
DEFAULT_ACPX_LOCAL_TIMEOUT_SEC,
|
||||
acpxAgentOptions,
|
||||
} from "../index.js";
|
||||
|
||||
export function getConfigSchema(): AdapterConfigSchema {
|
||||
return {
|
||||
fields: [
|
||||
{
|
||||
key: "agent",
|
||||
label: "ACP agent",
|
||||
type: "select",
|
||||
default: DEFAULT_ACPX_LOCAL_AGENT,
|
||||
required: true,
|
||||
options: acpxAgentOptions.map((agent) => ({ value: agent.id, label: agent.label })),
|
||||
hint: "Choose the ACP agent launched through ACPX.",
|
||||
},
|
||||
{
|
||||
key: "agentCommand",
|
||||
label: "Agent command",
|
||||
type: "text",
|
||||
hint: "Required for custom agents; optional override for built-in Claude or Codex ACP commands.",
|
||||
},
|
||||
{
|
||||
key: "mode",
|
||||
label: "Session mode",
|
||||
type: "select",
|
||||
default: DEFAULT_ACPX_LOCAL_MODE,
|
||||
options: [
|
||||
{ value: "persistent", label: "Persistent" },
|
||||
{ value: "oneshot", label: "One shot" },
|
||||
],
|
||||
},
|
||||
{
|
||||
key: "permissionMode",
|
||||
label: "Permission mode",
|
||||
type: "select",
|
||||
default: DEFAULT_ACPX_LOCAL_PERMISSION_MODE,
|
||||
options: [
|
||||
{ value: "approve-all", label: "Approve all" },
|
||||
{ value: "default", label: "Approve reads" },
|
||||
],
|
||||
hint: "Defaults to maximum permissions. Approve reads grants read-only requests and asks for approval on writes.",
|
||||
},
|
||||
{
|
||||
key: "nonInteractivePermissions",
|
||||
label: "Non-interactive permissions",
|
||||
type: "select",
|
||||
default: DEFAULT_ACPX_LOCAL_NON_INTERACTIVE_PERMISSIONS,
|
||||
options: [
|
||||
{ value: "deny", label: "Deny" },
|
||||
{ value: "fail", label: "Fail" },
|
||||
],
|
||||
},
|
||||
{
|
||||
key: "cwd",
|
||||
label: "Working directory",
|
||||
type: "text",
|
||||
hint: "Absolute fallback directory. Paperclip execution workspaces can override this at runtime.",
|
||||
},
|
||||
{
|
||||
key: "stateDir",
|
||||
label: "State directory",
|
||||
type: "text",
|
||||
hint: "Optional ACPX session state directory. Defaults to Paperclip-managed company/agent scoped storage.",
|
||||
},
|
||||
{
|
||||
key: "instructionsFilePath",
|
||||
label: "Instructions file",
|
||||
type: "text",
|
||||
hint: "Optional absolute path to markdown instructions injected into the run prompt.",
|
||||
},
|
||||
{
|
||||
key: "promptTemplate",
|
||||
label: "Prompt template",
|
||||
type: "textarea",
|
||||
},
|
||||
{
|
||||
key: "bootstrapPromptTemplate",
|
||||
label: "Bootstrap prompt template",
|
||||
type: "textarea",
|
||||
},
|
||||
{
|
||||
key: "timeoutSec",
|
||||
label: "Timeout seconds",
|
||||
type: "number",
|
||||
default: DEFAULT_ACPX_LOCAL_TIMEOUT_SEC,
|
||||
},
|
||||
{
|
||||
key: "env",
|
||||
label: "Environment JSON",
|
||||
type: "textarea",
|
||||
hint: "Optional JSON object of environment values or secret bindings.",
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
425
packages/adapters/acpx-local/src/server/execute.test.ts
Normal file
@@ -0,0 +1,425 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import { createAcpxLocalExecutor } from "./execute.js";
|
||||
|
||||
const tempRoots: string[] = [];
|
||||
|
||||
async function makeTempRoot() {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-acpx-skills-"));
|
||||
tempRoots.push(root);
|
||||
return root;
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(tempRoots.splice(0).map((root) => fs.rm(root, { recursive: true, force: true })));
|
||||
});
|
||||
|
||||
async function pathExists(candidate: string): Promise<boolean> {
|
||||
return fs.access(candidate).then(() => true).catch(() => false);
|
||||
}
|
||||
|
||||
async function onlyChildDir(parent: string): Promise<string> {
|
||||
const entries = await fs.readdir(parent);
|
||||
expect(entries).toHaveLength(1);
|
||||
return path.join(parent, entries[0]!);
|
||||
}
|
||||
|
||||
async function createSkill(root: string, name: string, body = `---\nrequired: false\n---\n# ${name}\n`) {
|
||||
const skillDir = path.join(root, name);
|
||||
await fs.mkdir(skillDir, { recursive: true });
|
||||
await fs.writeFile(path.join(skillDir, "SKILL.md"), body, "utf8");
|
||||
return {
|
||||
key: `paperclipai/test/${name}`,
|
||||
runtimeName: name,
|
||||
source: skillDir,
|
||||
required: false,
|
||||
};
|
||||
}
|
||||
|
||||
function buildRuntime() {
|
||||
return {
|
||||
ensureSession: async () => ({
|
||||
backendSessionId: "backend-session",
|
||||
agentSessionId: "agent-session",
|
||||
runtimeSessionName: "runtime-session",
|
||||
}),
|
||||
startTurn: () => ({
|
||||
events: (async function* () {
|
||||
yield { type: "done", stopReason: "end_turn" };
|
||||
})(),
|
||||
result: Promise.resolve({ status: "completed", stopReason: "end_turn" }),
|
||||
cancel: async () => {},
|
||||
}),
|
||||
close: async () => {},
|
||||
};
|
||||
}
|
||||
|
||||
async function runExecutor(
|
||||
config: Record<string, unknown>,
|
||||
options: {
|
||||
context?: Record<string, unknown>;
|
||||
executionTransport?: Record<string, unknown>;
|
||||
} = {},
|
||||
) {
|
||||
const runtimeOptions: Record<string, unknown>[] = [];
|
||||
const meta: Record<string, unknown>[] = [];
|
||||
const logs: Array<{ stream: string; text: string }> = [];
|
||||
const execute = createAcpxLocalExecutor({
|
||||
createRuntime: (options) => {
|
||||
runtimeOptions.push(options as unknown as Record<string, unknown>);
|
||||
return buildRuntime() as never;
|
||||
},
|
||||
});
|
||||
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
agent: {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
},
|
||||
runtime: {},
|
||||
config,
|
||||
context: options.context ?? {},
|
||||
executionTransport: options.executionTransport,
|
||||
onLog: async (stream: "stdout" | "stderr", text: string) => {
|
||||
logs.push({ stream, text });
|
||||
},
|
||||
onMeta: async (payload: unknown) => {
|
||||
meta.push(payload as Record<string, unknown>);
|
||||
},
|
||||
} as never);
|
||||
|
||||
expect(result.exitCode).toBe(0);
|
||||
return { logs, meta, runtimeOptions, result };
|
||||
}
|
||||
|
||||
describe("acpx_local runtime skill isolation", () => {
|
||||
it.skipIf(process.platform === "win32")("materializes ACPX Claude skills without symlinked descendants", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const skillRoot = path.join(root, "skills");
|
||||
const outsideRoot = path.join(root, "outside");
|
||||
await fs.mkdir(outsideRoot, { recursive: true });
|
||||
await fs.writeFile(path.join(outsideRoot, "secret.txt"), "do not expose", "utf8");
|
||||
const skill = await createSkill(skillRoot, "danger");
|
||||
await fs.symlink(path.join(outsideRoot, "secret.txt"), path.join(skill.source, "leak.txt"));
|
||||
await fs.symlink(outsideRoot, path.join(skill.source, "leak-dir"));
|
||||
|
||||
const stateDir = path.join(root, "state");
|
||||
const { meta } = await runExecutor({
|
||||
agent: "claude",
|
||||
stateDir,
|
||||
paperclipRuntimeSkills: [skill],
|
||||
paperclipSkillSync: { desiredSkills: [skill.key] },
|
||||
});
|
||||
|
||||
const mountedRoot = await onlyChildDir(path.join(stateDir, "runtime-skills", "claude"));
|
||||
const skillsHome = path.join(mountedRoot, ".claude", "skills");
|
||||
const materializedSkill = path.join(skillsHome, skill.runtimeName);
|
||||
expect(await fs.readFile(path.join(materializedSkill, "SKILL.md"), "utf8")).toContain("# danger");
|
||||
expect(await pathExists(path.join(materializedSkill, "leak.txt"))).toBe(false);
|
||||
expect(await pathExists(path.join(materializedSkill, "leak-dir"))).toBe(false);
|
||||
expect(String(meta[0]?.prompt ?? "")).toContain(`Skill root: ${skillsHome}`);
|
||||
});
|
||||
|
||||
it.skipIf(process.platform === "win32")("revokes removed ACPX Codex skills and skips symlinked descendants", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const skillRoot = path.join(root, "skills");
|
||||
const outsideRoot = path.join(root, "outside");
|
||||
const codexHome = path.join(root, "codex-home");
|
||||
await fs.mkdir(outsideRoot, { recursive: true });
|
||||
await fs.writeFile(path.join(outsideRoot, "secret.txt"), "do not expose", "utf8");
|
||||
const keep = await createSkill(skillRoot, "keep");
|
||||
const remove = await createSkill(skillRoot, "remove");
|
||||
await fs.symlink(path.join(outsideRoot, "secret.txt"), path.join(keep.source, "leak.txt"));
|
||||
await fs.symlink(outsideRoot, path.join(keep.source, "leak-dir"));
|
||||
|
||||
const baseConfig = {
|
||||
agent: "codex",
|
||||
stateDir: path.join(root, "state"),
|
||||
env: { CODEX_HOME: codexHome },
|
||||
paperclipRuntimeSkills: [keep, remove],
|
||||
};
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
paperclipSkillSync: { desiredSkills: [keep.key, remove.key] },
|
||||
});
|
||||
expect(await pathExists(path.join(codexHome, "skills", remove.runtimeName, "SKILL.md"))).toBe(true);
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
paperclipSkillSync: { desiredSkills: [keep.key] },
|
||||
});
|
||||
|
||||
expect(await pathExists(path.join(codexHome, "skills", keep.runtimeName, "SKILL.md"))).toBe(true);
|
||||
expect(await pathExists(path.join(codexHome, "skills", keep.runtimeName, "leak.txt"))).toBe(false);
|
||||
expect(await pathExists(path.join(codexHome, "skills", keep.runtimeName, "leak-dir"))).toBe(false);
|
||||
expect(await pathExists(path.join(codexHome, "skills", remove.runtimeName))).toBe(false);
|
||||
});
|
||||
|
||||
it.skipIf(process.platform === "win32")("removes legacy ACPX Codex skill symlinks when a skill is no longer desired", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const skillRoot = path.join(root, "skills");
|
||||
const codexHome = path.join(root, "codex-home");
|
||||
const legacy = await createSkill(skillRoot, "legacy");
|
||||
const skillsHome = path.join(codexHome, "skills");
|
||||
await fs.mkdir(skillsHome, { recursive: true });
|
||||
await fs.symlink(legacy.source, path.join(skillsHome, legacy.runtimeName));
|
||||
|
||||
await runExecutor({
|
||||
agent: "codex",
|
||||
stateDir: path.join(root, "state"),
|
||||
env: { CODEX_HOME: codexHome },
|
||||
paperclipRuntimeSkills: [legacy],
|
||||
paperclipSkillSync: { desiredSkills: [] },
|
||||
});
|
||||
|
||||
expect(await pathExists(path.join(skillsHome, legacy.runtimeName))).toBe(false);
|
||||
});
|
||||
|
||||
it.skipIf(process.platform === "win32")("replaces stale managed Codex auth files with source symlinks", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const sourceCodexHome = path.join(root, "source-codex-home");
|
||||
const paperclipHome = path.join(root, "paperclip-home");
|
||||
const paperclipInstanceId = "test-instance";
|
||||
const managedCodexHome = path.join(
|
||||
paperclipHome,
|
||||
"instances",
|
||||
paperclipInstanceId,
|
||||
"companies",
|
||||
"company-1",
|
||||
"codex-home",
|
||||
);
|
||||
await fs.mkdir(sourceCodexHome, { recursive: true });
|
||||
await fs.mkdir(managedCodexHome, { recursive: true });
|
||||
const sourceAuth = path.join(sourceCodexHome, "auth.json");
|
||||
const managedAuth = path.join(managedCodexHome, "auth.json");
|
||||
await fs.writeFile(sourceAuth, "{\"source\":true}", "utf8");
|
||||
await fs.writeFile(managedAuth, "{\"stale\":true}", "utf8");
|
||||
|
||||
const previousCodexHome = process.env.CODEX_HOME;
|
||||
const previousPaperclipHome = process.env.PAPERCLIP_HOME;
|
||||
const previousPaperclipInstanceId = process.env.PAPERCLIP_INSTANCE_ID;
|
||||
try {
|
||||
process.env.CODEX_HOME = sourceCodexHome;
|
||||
process.env.PAPERCLIP_HOME = paperclipHome;
|
||||
process.env.PAPERCLIP_INSTANCE_ID = paperclipInstanceId;
|
||||
await runExecutor({
|
||||
agent: "codex",
|
||||
stateDir: path.join(root, "state"),
|
||||
paperclipRuntimeSkills: [],
|
||||
paperclipSkillSync: { desiredSkills: [] },
|
||||
});
|
||||
} finally {
|
||||
if (previousCodexHome === undefined) delete process.env.CODEX_HOME;
|
||||
else process.env.CODEX_HOME = previousCodexHome;
|
||||
if (previousPaperclipHome === undefined) delete process.env.PAPERCLIP_HOME;
|
||||
else process.env.PAPERCLIP_HOME = previousPaperclipHome;
|
||||
if (previousPaperclipInstanceId === undefined) delete process.env.PAPERCLIP_INSTANCE_ID;
|
||||
else process.env.PAPERCLIP_INSTANCE_ID = previousPaperclipInstanceId;
|
||||
}
|
||||
|
||||
const authStat = await fs.lstat(managedAuth);
|
||||
expect(authStat.isSymbolicLink()).toBe(true);
|
||||
expect(path.resolve(path.dirname(managedAuth), await fs.readlink(managedAuth))).toBe(sourceAuth);
|
||||
});
|
||||
|
||||
it("keeps fresh credential wrapper scripts across ACPX agent changes", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const stateDir = path.join(root, "state");
|
||||
const baseConfig = {
|
||||
agentCommand: "node ./fake-acp.js",
|
||||
stateDir,
|
||||
};
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
agent: "custom-a",
|
||||
env: { PAPERCLIP_API_KEY: "old-key" },
|
||||
});
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
agent: "custom-b",
|
||||
env: { PAPERCLIP_API_KEY: "new-key" },
|
||||
});
|
||||
|
||||
const wrappers = await fs.readdir(path.join(stateDir, "wrappers"));
|
||||
expect(wrappers.filter((name) => name.endsWith(".sh"))).toHaveLength(2);
|
||||
expect(wrappers.filter((name) => name.endsWith(".env"))).toHaveLength(2);
|
||||
expect(wrappers.some((name) => name.startsWith("custom-a-"))).toBe(true);
|
||||
expect(wrappers.some((name) => name.startsWith("custom-b-"))).toBe(true);
|
||||
const wrapperPath = path.join(stateDir, "wrappers", wrappers.find((name) => name.startsWith("custom-b-") && name.endsWith(".sh"))!);
|
||||
const envPath = path.join(stateDir, "wrappers", wrappers.find((name) => name.startsWith("custom-b-") && name.endsWith(".env"))!);
|
||||
const wrapper = await fs.readFile(wrapperPath, "utf8");
|
||||
const env = await fs.readFile(envPath, "utf8");
|
||||
expect((await fs.stat(envPath)).mode & 0o777).toBe(0o600);
|
||||
expect((await fs.stat(wrapperPath)).mode & 0o777).toBe(0o700);
|
||||
expect(wrapper).toContain("node ./fake-acp.js");
|
||||
expect(wrapper).not.toContain("PAPERCLIP_API_KEY");
|
||||
expect(wrapper).not.toContain("new-key");
|
||||
expect(wrapper).not.toContain("old-key");
|
||||
expect(env).toContain("PAPERCLIP_API_KEY='new-key'");
|
||||
expect(env).not.toContain("old-key");
|
||||
});
|
||||
|
||||
it("shapes ACPX wrapper workspace env for remote execution identities", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const stateDir = path.join(root, "state");
|
||||
const workspaceDir = path.join(root, "workspace");
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
|
||||
await runExecutor(
|
||||
{
|
||||
agentCommand: "node ./fake-acp.js",
|
||||
stateDir,
|
||||
},
|
||||
{
|
||||
context: {
|
||||
paperclipWorkspace: {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
strategy: "git_worktree",
|
||||
workspaceId: "workspace-1",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
branchName: "feature/remote-acpx",
|
||||
worktreePath: workspaceDir,
|
||||
},
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteWorkspacePath: "/remote/workspace",
|
||||
remoteCwd: "/remote/workspace",
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const wrappers = await fs.readdir(path.join(stateDir, "wrappers"));
|
||||
const envPath = path.join(
|
||||
stateDir,
|
||||
"wrappers",
|
||||
wrappers.find((name) => name.endsWith(".env"))!,
|
||||
);
|
||||
const env = await fs.readFile(envPath, "utf8");
|
||||
|
||||
expect(env).toContain("PAPERCLIP_WORKSPACE_CWD='/remote/workspace'");
|
||||
expect(env).not.toContain("PAPERCLIP_WORKSPACE_WORKTREE_PATH=");
|
||||
});
|
||||
|
||||
it("cleans aged credential wrapper scripts across ACPX agent changes", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const stateDir = path.join(root, "state");
|
||||
const wrappersDir = path.join(stateDir, "wrappers");
|
||||
const baseConfig = {
|
||||
agentCommand: "node ./fake-acp.js",
|
||||
stateDir,
|
||||
};
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
agent: "custom-a",
|
||||
env: { PAPERCLIP_API_KEY: "old-key" },
|
||||
});
|
||||
const oldDate = new Date(Date.now() - 16 * 60 * 1000);
|
||||
await Promise.all(
|
||||
(await fs.readdir(wrappersDir))
|
||||
.filter((name) => name.startsWith("custom-a-"))
|
||||
.map((name) => fs.utimes(path.join(wrappersDir, name), oldDate, oldDate)),
|
||||
);
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
agent: "custom-b",
|
||||
env: { PAPERCLIP_API_KEY: "new-key" },
|
||||
});
|
||||
|
||||
const wrappers = await fs.readdir(wrappersDir);
|
||||
expect(wrappers.filter((name) => name.endsWith(".sh"))).toHaveLength(1);
|
||||
expect(wrappers.filter((name) => name.endsWith(".env"))).toHaveLength(1);
|
||||
expect(wrappers.some((name) => name.startsWith("custom-a-"))).toBe(false);
|
||||
expect(wrappers.some((name) => name.startsWith("custom-b-"))).toBe(true);
|
||||
});
|
||||
|
||||
it("keeps distinct wrapper env files for concurrent runs with different credentials", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const stateDir = path.join(root, "state");
|
||||
const baseConfig = {
|
||||
agent: "custom-a",
|
||||
agentCommand: "node ./fake-acp.js",
|
||||
stateDir,
|
||||
};
|
||||
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
env: { PAPERCLIP_API_KEY: "first-key" },
|
||||
});
|
||||
await runExecutor({
|
||||
...baseConfig,
|
||||
env: { PAPERCLIP_API_KEY: "second-key" },
|
||||
});
|
||||
|
||||
const envFileNames = (await fs.readdir(path.join(stateDir, "wrappers"))).filter((name) => name.endsWith(".env"));
|
||||
expect(envFileNames).toHaveLength(2);
|
||||
const envFiles = await Promise.all(
|
||||
envFileNames.map(async (name) => fs.readFile(path.join(stateDir, "wrappers", name), "utf8")),
|
||||
);
|
||||
expect(envFiles.filter((contents) => contents.includes("PAPERCLIP_API_KEY='first-key'"))).toHaveLength(1);
|
||||
expect(envFiles.filter((contents) => contents.includes("PAPERCLIP_API_KEY='second-key'"))).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("passes Paperclip env through the ACP agent wrapper instead of process.env", async () => {
|
||||
let observedApiKeyDuringStream: string | undefined;
|
||||
const execute = createAcpxLocalExecutor({
|
||||
createRuntime: () => ({
|
||||
ensureSession: async () => ({
|
||||
backendSessionId: "backend-session",
|
||||
agentSessionId: "agent-session",
|
||||
runtimeSessionName: "runtime-session",
|
||||
}),
|
||||
startTurn: () => ({
|
||||
events: (async function* () {
|
||||
await Promise.resolve();
|
||||
observedApiKeyDuringStream = process.env.PAPERCLIP_API_KEY;
|
||||
yield { type: "done", stopReason: "end_turn" };
|
||||
})(),
|
||||
result: Promise.resolve({ status: "completed", stopReason: "end_turn" }),
|
||||
cancel: async () => {},
|
||||
}),
|
||||
close: async () => {},
|
||||
}) as never,
|
||||
});
|
||||
|
||||
const previousApiKey = process.env.PAPERCLIP_API_KEY;
|
||||
try {
|
||||
delete process.env.PAPERCLIP_API_KEY;
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
agent: {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
},
|
||||
runtime: {},
|
||||
config: { agent: "custom", agentCommand: "node ./fake-acp.js" },
|
||||
context: {},
|
||||
authToken: "runtime-key",
|
||||
onLog: async () => {},
|
||||
onMeta: async () => {},
|
||||
} as never);
|
||||
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(observedApiKeyDuringStream).toBeUndefined();
|
||||
} finally {
|
||||
if (previousApiKey === undefined) delete process.env.PAPERCLIP_API_KEY;
|
||||
else process.env.PAPERCLIP_API_KEY = previousApiKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
1223
packages/adapters/acpx-local/src/server/execute.ts
Normal file
5
packages/adapters/acpx-local/src/server/index.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
export { execute, createAcpxLocalExecutor } from "./execute.js";
|
||||
export { testEnvironment } from "./test.js";
|
||||
export { getConfigSchema } from "./config-schema.js";
|
||||
export { sessionCodec } from "./session-codec.js";
|
||||
export { listAcpxSkills, syncAcpxSkills } from "./skills.js";
|
||||
50
packages/adapters/acpx-local/src/server/session-codec.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import type { AdapterSessionCodec } from "@paperclipai/adapter-utils";
|
||||
|
||||
function readString(value: unknown): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function readRecord(value: unknown): Record<string, unknown> | null {
|
||||
return typeof value === "object" && value !== null && !Array.isArray(value) ? { ...(value as Record<string, unknown>) } : null;
|
||||
}
|
||||
|
||||
export const sessionCodec: AdapterSessionCodec = {
|
||||
deserialize(raw: unknown) {
|
||||
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) return null;
|
||||
const record = raw as Record<string, unknown>;
|
||||
const runtimeSessionName = readString(record.runtimeSessionName);
|
||||
const acpSessionId = readString(record.acpSessionId);
|
||||
const agentSessionId = readString(record.agentSessionId);
|
||||
const remoteExecution = readRecord(record.remoteExecution);
|
||||
if (!runtimeSessionName && !acpSessionId && !agentSessionId) return null;
|
||||
|
||||
return {
|
||||
...(runtimeSessionName ? { runtimeSessionName } : {}),
|
||||
...(readString(record.sessionKey) ? { sessionKey: readString(record.sessionKey) } : {}),
|
||||
...(readString(record.acpxRecordId) ? { acpxRecordId: readString(record.acpxRecordId) } : {}),
|
||||
...(acpSessionId ? { acpSessionId } : {}),
|
||||
...(agentSessionId ? { agentSessionId } : {}),
|
||||
...(readString(record.agent) ? { agent: readString(record.agent) } : {}),
|
||||
...(readString(record.cwd) ? { cwd: readString(record.cwd) } : {}),
|
||||
...(readString(record.mode) ? { mode: readString(record.mode) } : {}),
|
||||
...(readString(record.stateDir) ? { stateDir: readString(record.stateDir) } : {}),
|
||||
...(readString(record.configFingerprint) ? { configFingerprint: readString(record.configFingerprint) } : {}),
|
||||
...(readString(record.workspaceId) ? { workspaceId: readString(record.workspaceId) } : {}),
|
||||
...(readString(record.repoUrl) ? { repoUrl: readString(record.repoUrl) } : {}),
|
||||
...(readString(record.repoRef) ? { repoRef: readString(record.repoRef) } : {}),
|
||||
...(remoteExecution ? { remoteExecution } : {}),
|
||||
};
|
||||
},
|
||||
serialize(params: Record<string, unknown> | null) {
|
||||
if (!params) return null;
|
||||
return this.deserialize(params);
|
||||
},
|
||||
getDisplayId(params: Record<string, unknown> | null) {
|
||||
if (!params) return null;
|
||||
return (
|
||||
readString(params.runtimeSessionName) ??
|
||||
readString(params.acpSessionId) ??
|
||||
readString(params.agentSessionId)
|
||||
);
|
||||
},
|
||||
};
|
||||
106
packages/adapters/acpx-local/src/server/skills.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import type {
|
||||
AdapterSkillContext,
|
||||
AdapterSkillEntry,
|
||||
AdapterSkillSnapshot,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const __moduleDir = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
type AcpxSkillAgent = "claude" | "codex" | "custom";
|
||||
|
||||
function normalizeAcpxSkillAgent(config: Record<string, unknown>): AcpxSkillAgent {
|
||||
const configured = typeof config.agent === "string" ? config.agent.trim() : "";
|
||||
if (configured === "codex" || configured === "custom") return configured;
|
||||
if (configured === "claude" || configured === "") return "claude";
|
||||
return "claude";
|
||||
}
|
||||
|
||||
function configuredDetail(agent: AcpxSkillAgent): string {
|
||||
if (agent === "codex") {
|
||||
return "Will be linked into the effective CODEX_HOME/skills/ directory for the next ACPX Codex session.";
|
||||
}
|
||||
return "Will be mounted into the next ACPX Claude session.";
|
||||
}
|
||||
|
||||
function unsupportedDetail(): string {
|
||||
return "Desired state is stored in Paperclip only; custom ACP commands need an explicit skill integration contract before runtime sync is available.";
|
||||
}
|
||||
|
||||
async function buildAcpxSkillSnapshot(config: Record<string, unknown>): Promise<AdapterSkillSnapshot> {
|
||||
const acpxAgent = normalizeAcpxSkillAgent(config);
|
||||
const availableEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const availableByKey = new Map(availableEntries.map((entry) => [entry.key, entry]));
|
||||
const desiredSkills = resolvePaperclipDesiredSkillNames(config, availableEntries);
|
||||
const desiredSet = new Set(desiredSkills);
|
||||
const supported = acpxAgent !== "custom";
|
||||
const warnings: string[] = supported
|
||||
? []
|
||||
: [
|
||||
"Custom ACP commands do not expose a Paperclip skill integration contract yet; selected skills are tracked only.",
|
||||
];
|
||||
|
||||
const entries: AdapterSkillEntry[] = availableEntries.map((entry) => {
|
||||
const desired = desiredSet.has(entry.key);
|
||||
return {
|
||||
key: entry.key,
|
||||
runtimeName: entry.runtimeName,
|
||||
desired,
|
||||
managed: true,
|
||||
state: desired ? "configured" : "available",
|
||||
origin: entry.required ? "paperclip_required" : "company_managed",
|
||||
originLabel: entry.required ? "Required by Paperclip" : "Managed by Paperclip",
|
||||
readOnly: false,
|
||||
sourcePath: entry.source,
|
||||
targetPath: null,
|
||||
detail: desired ? (supported ? configuredDetail(acpxAgent) : unsupportedDetail()) : null,
|
||||
required: Boolean(entry.required),
|
||||
requiredReason: entry.requiredReason ?? null,
|
||||
};
|
||||
});
|
||||
|
||||
for (const desiredSkill of desiredSkills) {
|
||||
if (availableByKey.has(desiredSkill)) continue;
|
||||
warnings.push(`Desired skill "${desiredSkill}" is not available from the Paperclip skills directory.`);
|
||||
entries.push({
|
||||
key: desiredSkill,
|
||||
runtimeName: null,
|
||||
desired: true,
|
||||
managed: true,
|
||||
state: "missing",
|
||||
origin: "external_unknown",
|
||||
originLabel: "External or unavailable",
|
||||
readOnly: false,
|
||||
sourcePath: null,
|
||||
targetPath: null,
|
||||
detail: "Paperclip cannot find this skill in the local runtime skills directory.",
|
||||
});
|
||||
}
|
||||
|
||||
entries.sort((left, right) => left.key.localeCompare(right.key));
|
||||
|
||||
return {
|
||||
adapterType: "acpx_local",
|
||||
supported,
|
||||
mode: supported ? "ephemeral" : "unsupported",
|
||||
desiredSkills,
|
||||
entries,
|
||||
warnings,
|
||||
};
|
||||
}
|
||||
|
||||
export async function listAcpxSkills(ctx: AdapterSkillContext): Promise<AdapterSkillSnapshot> {
|
||||
return buildAcpxSkillSnapshot(ctx.config);
|
||||
}
|
||||
|
||||
export async function syncAcpxSkills(
|
||||
ctx: AdapterSkillContext,
|
||||
_desiredSkills: string[],
|
||||
): Promise<AdapterSkillSnapshot> {
|
||||
return buildAcpxSkillSnapshot(ctx.config);
|
||||
}
|
||||
49
packages/adapters/acpx-local/src/server/test.test.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import { testEnvironment } from "./test.js";
|
||||
|
||||
const originalNodeVersion = process.version;
|
||||
|
||||
function setNodeVersion(version: string): void {
|
||||
Object.defineProperty(process, "version", {
|
||||
configurable: true,
|
||||
enumerable: true,
|
||||
value: version,
|
||||
});
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
setNodeVersion(originalNodeVersion);
|
||||
});
|
||||
|
||||
describe("acpx_local environment diagnostics", () => {
|
||||
it("does not force healthy default Claude diagnostics to warn", async () => {
|
||||
setNodeVersion("v22.12.0");
|
||||
|
||||
const result = await testEnvironment({
|
||||
adapterType: "acpx_local",
|
||||
companyId: "test-company",
|
||||
config: { agent: "claude" },
|
||||
});
|
||||
|
||||
expect(result.status).toBe("pass");
|
||||
expect(result.checks).toContainEqual(
|
||||
expect.objectContaining({
|
||||
code: "acpx_agent_selected",
|
||||
level: "info",
|
||||
message: "ACP agent selected: claude",
|
||||
}),
|
||||
);
|
||||
expect(result.checks).toContainEqual(
|
||||
expect.objectContaining({
|
||||
code: "acpx_runtime_scaffold",
|
||||
level: "info",
|
||||
}),
|
||||
);
|
||||
expect(result.checks).not.toContainEqual(
|
||||
expect.objectContaining({
|
||||
code: "acpx_runtime_scaffold",
|
||||
level: "warn",
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
295
packages/adapters/acpx-local/src/server/test.ts
Normal file
@@ -0,0 +1,295 @@
|
||||
import { createRequire } from "node:module";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type {
|
||||
AdapterEnvironmentCheck,
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterEnvironmentTestResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
asString,
|
||||
parseObject,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
const require = createRequire(import.meta.url);
|
||||
const MIN_NODE_MAJOR = 22;
|
||||
const MIN_NODE_MINOR = 12;
|
||||
const MIN_NODE_PATCH = 0;
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
if (checks.some((check) => check.level === "error")) return "fail";
|
||||
if (checks.some((check) => check.level === "warn")) return "warn";
|
||||
return "pass";
|
||||
}
|
||||
|
||||
function nodeVersionMeetsMinimum(version: string): boolean {
|
||||
const [major = 0, minor = 0, patch = 0] = version
|
||||
.replace(/^v/, "")
|
||||
.split(".")
|
||||
.map((part) => Number.parseInt(part, 10));
|
||||
if (major > MIN_NODE_MAJOR) return true;
|
||||
if (major < MIN_NODE_MAJOR) return false;
|
||||
if (minor > MIN_NODE_MINOR) return true;
|
||||
if (minor < MIN_NODE_MINOR) return false;
|
||||
return patch >= MIN_NODE_PATCH;
|
||||
}
|
||||
|
||||
function isNonEmpty(value: unknown): value is string {
|
||||
return typeof value === "string" && value.trim().length > 0;
|
||||
}
|
||||
|
||||
function getStringEnv(configEnv: Record<string, string>, key: string): string | undefined {
|
||||
const configured = configEnv[key];
|
||||
if (typeof configured === "string") return configured;
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
function credentialSource(configEnv: Record<string, string>, key: string): string {
|
||||
return typeof configEnv[key] === "string" ? "adapter config env" : "server environment";
|
||||
}
|
||||
|
||||
async function readJsonObject(filePath: string): Promise<Record<string, unknown> | null> {
|
||||
try {
|
||||
const parsed = JSON.parse(await fs.readFile(filePath, "utf8")) as unknown;
|
||||
return typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)
|
||||
? parsed as Record<string, unknown>
|
||||
: null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readNestedString(record: Record<string, unknown>, pathSegments: string[]): string | null {
|
||||
let current: unknown = record;
|
||||
for (const segment of pathSegments) {
|
||||
if (typeof current !== "object" || current === null || Array.isArray(current)) return null;
|
||||
current = (current as Record<string, unknown>)[segment];
|
||||
}
|
||||
return isNonEmpty(current) ? current.trim() : null;
|
||||
}
|
||||
|
||||
async function hasClaudeSubscriptionCredentials(configDir: string): Promise<boolean> {
|
||||
for (const filename of [".credentials.json", "credentials.json"]) {
|
||||
const credentials = await readJsonObject(path.join(configDir, filename));
|
||||
if (!credentials) continue;
|
||||
if (readNestedString(credentials, ["claudeAiOauth", "accessToken"])) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function hasCodexNativeCredentials(codexHome: string): Promise<boolean> {
|
||||
const auth = await readJsonObject(path.join(codexHome, "auth.json"));
|
||||
if (!auth) return false;
|
||||
return Boolean(
|
||||
readNestedString(auth, ["accessToken"]) ||
|
||||
readNestedString(auth, ["tokens", "access_token"]) ||
|
||||
readNestedString(auth, ["OPENAI_API_KEY"]),
|
||||
);
|
||||
}
|
||||
|
||||
async function buildCredentialHintChecks(
|
||||
agent: string,
|
||||
configEnv: Record<string, string>,
|
||||
): Promise<AdapterEnvironmentCheck[]> {
|
||||
if (agent === "claude") {
|
||||
const bedrockFlag = getStringEnv(configEnv, "CLAUDE_CODE_USE_BEDROCK");
|
||||
const bedrockBaseUrl = getStringEnv(configEnv, "ANTHROPIC_BEDROCK_BASE_URL");
|
||||
const hasBedrock =
|
||||
bedrockFlag === "1" ||
|
||||
/^true$/i.test(bedrockFlag ?? "") ||
|
||||
isNonEmpty(bedrockBaseUrl);
|
||||
const bedrockSourceKey = isNonEmpty(bedrockFlag)
|
||||
? "CLAUDE_CODE_USE_BEDROCK"
|
||||
: "ANTHROPIC_BEDROCK_BASE_URL";
|
||||
const anthropicApiKey = getStringEnv(configEnv, "ANTHROPIC_API_KEY");
|
||||
const claudeConfigDir = isNonEmpty(getStringEnv(configEnv, "CLAUDE_CONFIG_DIR"))
|
||||
? path.resolve(getStringEnv(configEnv, "CLAUDE_CONFIG_DIR") as string)
|
||||
: path.join(os.homedir(), ".claude");
|
||||
|
||||
if (hasBedrock) {
|
||||
return [{
|
||||
code: "acpx_claude_bedrock_auth_detected",
|
||||
level: "info",
|
||||
message: "Claude credential hint: Bedrock auth indicators are configured.",
|
||||
detail: `Detected in ${credentialSource(configEnv, bedrockSourceKey)}.`,
|
||||
hint: "Ensure AWS credentials and AWS_REGION are available to the ACPX-launched Claude agent.",
|
||||
}];
|
||||
}
|
||||
|
||||
if (isNonEmpty(anthropicApiKey)) {
|
||||
return [{
|
||||
code: "acpx_claude_anthropic_api_key_detected",
|
||||
level: "info",
|
||||
message: "Claude credential hint: ANTHROPIC_API_KEY is set.",
|
||||
detail: `Detected in ${credentialSource(configEnv, "ANTHROPIC_API_KEY")}.`,
|
||||
}];
|
||||
}
|
||||
|
||||
if (await hasClaudeSubscriptionCredentials(claudeConfigDir)) {
|
||||
return [{
|
||||
code: "acpx_claude_subscription_auth_detected",
|
||||
level: "info",
|
||||
message: "Claude credential hint: local Claude subscription credentials were found.",
|
||||
detail: `Credentials found in ${claudeConfigDir}.`,
|
||||
}];
|
||||
}
|
||||
|
||||
return [{
|
||||
code: "acpx_claude_credentials_missing",
|
||||
level: "info",
|
||||
message: "Claude credential hint: no Claude API, Bedrock, or local subscription credentials were detected.",
|
||||
hint: "Set ANTHROPIC_API_KEY, configure Bedrock, or run `claude login` before starting an ACPX Claude agent.",
|
||||
}];
|
||||
}
|
||||
|
||||
if (agent === "codex") {
|
||||
const openAiApiKey = getStringEnv(configEnv, "OPENAI_API_KEY");
|
||||
const codexHome = isNonEmpty(getStringEnv(configEnv, "CODEX_HOME"))
|
||||
? path.resolve(getStringEnv(configEnv, "CODEX_HOME") as string)
|
||||
: path.join(os.homedir(), ".codex");
|
||||
|
||||
if (isNonEmpty(openAiApiKey)) {
|
||||
return [{
|
||||
code: "acpx_codex_openai_api_key_detected",
|
||||
level: "info",
|
||||
message: "Codex credential hint: OPENAI_API_KEY is set.",
|
||||
detail: `Detected in ${credentialSource(configEnv, "OPENAI_API_KEY")}.`,
|
||||
}];
|
||||
}
|
||||
|
||||
if (await hasCodexNativeCredentials(codexHome)) {
|
||||
return [{
|
||||
code: "acpx_codex_native_auth_detected",
|
||||
level: "info",
|
||||
message: "Codex credential hint: local Codex auth configuration was found.",
|
||||
detail: `Credentials found in ${path.join(codexHome, "auth.json")}.`,
|
||||
}];
|
||||
}
|
||||
|
||||
return [{
|
||||
code: "acpx_codex_credentials_missing",
|
||||
level: "info",
|
||||
message: "Codex credential hint: no OpenAI API key or local Codex auth configuration was detected.",
|
||||
hint: "Set OPENAI_API_KEY or run `codex login` before starting an ACPX Codex agent.",
|
||||
}];
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
function resolvePackage(name: string): AdapterEnvironmentCheck {
|
||||
try {
|
||||
const resolved = require.resolve(`${name}/package.json`);
|
||||
return {
|
||||
code: `acpx_package_${name.replace(/[^a-z0-9]+/gi, "_").toLowerCase()}_present`,
|
||||
level: "info",
|
||||
message: `${name} is resolvable.`,
|
||||
detail: resolved,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
code: `acpx_package_${name.replace(/[^a-z0-9]+/gi, "_").toLowerCase()}_missing`,
|
||||
level: "error",
|
||||
message: `${name} is not resolvable from the acpx_local adapter package.`,
|
||||
hint: "Run pnpm install so the ACPX adapter dependencies are installed.",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async function checkDirectory(pathValue: string, code: string, label: string): Promise<AdapterEnvironmentCheck | null> {
|
||||
const dir = pathValue.trim();
|
||||
if (!dir) return null;
|
||||
try {
|
||||
await fs.mkdir(dir, { recursive: true });
|
||||
await fs.access(dir);
|
||||
return {
|
||||
code,
|
||||
level: "info",
|
||||
message: `${label} is writable: ${dir}`,
|
||||
};
|
||||
} catch (err) {
|
||||
return {
|
||||
code: `${code}_invalid`,
|
||||
level: "error",
|
||||
message: err instanceof Error ? err.message : `${label} is not writable.`,
|
||||
detail: dir,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const config = parseObject(ctx.config);
|
||||
const envConfig = parseObject(config.env);
|
||||
const configEnv: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") configEnv[key] = value;
|
||||
}
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const nodeVersion = process.version;
|
||||
|
||||
checks.push({
|
||||
code: nodeVersionMeetsMinimum(nodeVersion) ? "acpx_node_supported" : "acpx_node_unsupported",
|
||||
level: nodeVersionMeetsMinimum(nodeVersion) ? "info" : "error",
|
||||
message: nodeVersionMeetsMinimum(nodeVersion)
|
||||
? `Node ${nodeVersion} satisfies ACPX's >=22.12.0 requirement.`
|
||||
: `Node ${nodeVersion} does not satisfy ACPX's >=22.12.0 requirement.`,
|
||||
hint: nodeVersionMeetsMinimum(nodeVersion)
|
||||
? undefined
|
||||
: "Run acpx_local agents with Node >=22.12.0 or use claude_local/codex_local on Node 20.",
|
||||
});
|
||||
|
||||
checks.push(resolvePackage("acpx"));
|
||||
checks.push(resolvePackage("@agentclientprotocol/claude-agent-acp"));
|
||||
checks.push(resolvePackage("@zed-industries/codex-acp"));
|
||||
|
||||
const agent = asString(config.agent, "claude");
|
||||
if (!["claude", "codex", "custom"].includes(agent)) {
|
||||
checks.push({
|
||||
code: "acpx_agent_invalid",
|
||||
level: "error",
|
||||
message: `Unsupported ACP agent: ${agent}`,
|
||||
hint: "Use agent=claude, agent=codex, or agent=custom.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "acpx_agent_selected",
|
||||
level: "info",
|
||||
message: `ACP agent selected: ${agent}`,
|
||||
});
|
||||
checks.push(...await buildCredentialHintChecks(agent, configEnv));
|
||||
}
|
||||
|
||||
if (agent === "custom" && !asString(config.agentCommand, "")) {
|
||||
checks.push({
|
||||
code: "acpx_custom_command_missing",
|
||||
level: "error",
|
||||
message: "agentCommand is required when agent=custom.",
|
||||
});
|
||||
}
|
||||
|
||||
const stateDirCheck = await checkDirectory(asString(config.stateDir, ""), "acpx_state_dir_writable", "ACPX state directory");
|
||||
if (stateDirCheck) checks.push(stateDirCheck);
|
||||
|
||||
const permissionMode = asString(config.permissionMode, "approve-all");
|
||||
checks.push({
|
||||
code: "acpx_permission_mode",
|
||||
level: "info",
|
||||
message: `Effective permission mode: ${permissionMode || "approve-all"}`,
|
||||
});
|
||||
|
||||
checks.push({
|
||||
code: "acpx_runtime_scaffold",
|
||||
level: "info",
|
||||
message: "acpx_local runtime execution is available through the bundled ACPX runtime.",
|
||||
});
|
||||
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
139
packages/adapters/acpx-local/src/ui/build-config.ts
Normal file
@@ -0,0 +1,139 @@
|
||||
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
DEFAULT_ACPX_LOCAL_AGENT,
|
||||
DEFAULT_ACPX_LOCAL_MODE,
|
||||
DEFAULT_ACPX_LOCAL_NON_INTERACTIVE_PERMISSIONS,
|
||||
DEFAULT_ACPX_LOCAL_PERMISSION_MODE,
|
||||
DEFAULT_ACPX_LOCAL_TIMEOUT_SEC,
|
||||
} from "../index.js";
|
||||
|
||||
function parseCommaArgs(value: string): string[] {
|
||||
return value
|
||||
.split(",")
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
function parseEnvVars(text: string): Record<string, string> {
|
||||
const env: Record<string, string> = {};
|
||||
for (const line of text.split(/\r?\n/)) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith("#")) continue;
|
||||
const eq = trimmed.indexOf("=");
|
||||
if (eq <= 0) continue;
|
||||
const key = trimmed.slice(0, eq).trim();
|
||||
const value = trimmed.slice(eq + 1);
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
env[key] = value;
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function parseEnvBindings(bindings: unknown): Record<string, unknown> {
|
||||
if (typeof bindings !== "object" || bindings === null || Array.isArray(bindings)) return {};
|
||||
const env: Record<string, unknown> = {};
|
||||
for (const [key, raw] of Object.entries(bindings)) {
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
if (typeof raw === "string") {
|
||||
env[key] = { type: "plain", value: raw };
|
||||
continue;
|
||||
}
|
||||
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) continue;
|
||||
const rec = raw as Record<string, unknown>;
|
||||
if (rec.type === "plain" && typeof rec.value === "string") {
|
||||
env[key] = { type: "plain", value: rec.value };
|
||||
continue;
|
||||
}
|
||||
if (rec.type === "secret_ref" && typeof rec.secretId === "string") {
|
||||
env[key] = {
|
||||
type: "secret_ref",
|
||||
secretId: rec.secretId,
|
||||
...(typeof rec.version === "number" || rec.version === "latest"
|
||||
? { version: rec.version }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function parseJsonObject(text: string): Record<string, unknown> | null {
|
||||
const trimmed = text.trim();
|
||||
if (!trimmed) return null;
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) return null;
|
||||
return parsed as Record<string, unknown>;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readNumber(value: unknown, fallback: number): number {
|
||||
if (typeof value === "number" && Number.isFinite(value)) return value;
|
||||
if (typeof value === "string" && value.trim()) {
|
||||
const parsed = Number(value);
|
||||
if (Number.isFinite(parsed)) return parsed;
|
||||
}
|
||||
return fallback;
|
||||
}
|
||||
|
||||
export function buildAcpxLocalConfig(v: CreateConfigValues): Record<string, unknown> {
|
||||
const schemaValues = v.adapterSchemaValues ?? {};
|
||||
const ac: Record<string, unknown> = {
|
||||
agent: schemaValues.agent || DEFAULT_ACPX_LOCAL_AGENT,
|
||||
mode: schemaValues.mode || DEFAULT_ACPX_LOCAL_MODE,
|
||||
permissionMode: schemaValues.permissionMode || DEFAULT_ACPX_LOCAL_PERMISSION_MODE,
|
||||
nonInteractivePermissions:
|
||||
schemaValues.nonInteractivePermissions || DEFAULT_ACPX_LOCAL_NON_INTERACTIVE_PERMISSIONS,
|
||||
timeoutSec: readNumber(schemaValues.timeoutSec, DEFAULT_ACPX_LOCAL_TIMEOUT_SEC),
|
||||
};
|
||||
|
||||
for (const key of [
|
||||
"agentCommand",
|
||||
"cwd",
|
||||
"stateDir",
|
||||
"instructionsFilePath",
|
||||
"promptTemplate",
|
||||
"bootstrapPromptTemplate",
|
||||
]) {
|
||||
const value = schemaValues[key];
|
||||
if (typeof value === "string" && value.trim()) ac[key] = value.trim();
|
||||
}
|
||||
|
||||
if (!ac.cwd && v.cwd) ac.cwd = v.cwd;
|
||||
if (!ac.instructionsFilePath && v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (!ac.promptTemplate && v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (!ac.bootstrapPromptTemplate && v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
|
||||
const env = parseEnvBindings(v.envBindings);
|
||||
const legacy = parseEnvVars(v.envVars);
|
||||
for (const [key, value] of Object.entries(legacy)) {
|
||||
if (!Object.prototype.hasOwnProperty.call(env, key)) {
|
||||
env[key] = { type: "plain", value };
|
||||
}
|
||||
}
|
||||
if (typeof schemaValues.env === "string") {
|
||||
const schemaEnv = parseJsonObject(schemaValues.env);
|
||||
if (schemaEnv) Object.assign(env, schemaEnv);
|
||||
} else if (typeof schemaValues.env === "object" && schemaValues.env !== null && !Array.isArray(schemaValues.env)) {
|
||||
Object.assign(env, schemaValues.env as Record<string, unknown>);
|
||||
}
|
||||
if (Object.keys(env).length > 0) ac.env = env;
|
||||
|
||||
if (v.workspaceStrategyType === "git_worktree") {
|
||||
ac.workspaceStrategy = {
|
||||
type: "git_worktree",
|
||||
...(v.workspaceBaseRef ? { baseRef: v.workspaceBaseRef } : {}),
|
||||
...(v.workspaceBranchTemplate ? { branchTemplate: v.workspaceBranchTemplate } : {}),
|
||||
...(v.worktreeParentDir ? { worktreeParentDir: v.worktreeParentDir } : {}),
|
||||
};
|
||||
}
|
||||
const runtimeServices = parseJsonObject(v.runtimeServicesJson ?? "");
|
||||
if (runtimeServices && Array.isArray(runtimeServices.services)) {
|
||||
ac.workspaceRuntime = runtimeServices;
|
||||
}
|
||||
if (v.command) ac.command = v.command;
|
||||
if (v.extraArgs) ac.extraArgs = parseCommaArgs(v.extraArgs);
|
||||
return ac;
|
||||
}
|
||||
2
packages/adapters/acpx-local/src/ui/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { parseAcpxStdoutLine } from "./parse-stdout.js";
|
||||
export { buildAcpxLocalConfig } from "./build-config.js";
|
||||
160
packages/adapters/acpx-local/src/ui/parse-stdout.test.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { parseAcpxStdoutLine } from "./parse-stdout.js";
|
||||
|
||||
const TS = "2026-04-30T00:00:00.000Z";
|
||||
|
||||
function emit(payload: Record<string, unknown>): string {
|
||||
return JSON.stringify(payload);
|
||||
}
|
||||
|
||||
describe("parseAcpxStdoutLine", () => {
|
||||
it("renders an init entry from acpx.session", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({
|
||||
type: "acpx.session",
|
||||
agent: "claude",
|
||||
acpSessionId: "acp-1",
|
||||
runtimeSessionName: "runtime-1",
|
||||
mode: "persistent",
|
||||
permissionMode: "approve-all",
|
||||
}),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([
|
||||
{
|
||||
kind: "init",
|
||||
ts: TS,
|
||||
model: "claude (persistent / approve-all)",
|
||||
sessionId: "acp-1",
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("routes output text_delta to the assistant transcript", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.text_delta", text: "hello", channel: "output", tag: "agent_message_chunk" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([
|
||||
{ kind: "assistant", ts: TS, text: "hello", delta: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it("routes thought text_delta to the thinking transcript", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.text_delta", text: "thinking…", channel: "thought" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([
|
||||
{ kind: "thinking", ts: TS, text: "thinking…", delta: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it("falls back to stream when channel is missing", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.text_delta", text: "thinking…", stream: "thought" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries[0]).toMatchObject({ kind: "thinking" });
|
||||
});
|
||||
|
||||
it("renders status events as system text with optional ctx usage", () => {
|
||||
expect(
|
||||
parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.status", text: "thinking", tag: "agent_thought_chunk" }),
|
||||
TS,
|
||||
),
|
||||
).toEqual([{ kind: "system", ts: TS, text: "thinking" }]);
|
||||
|
||||
expect(
|
||||
parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.status", tag: "context_window", used: 12000, size: 200000 }),
|
||||
TS,
|
||||
),
|
||||
).toEqual([{ kind: "system", ts: TS, text: "context_window (12000/200000 ctx)" }]);
|
||||
});
|
||||
|
||||
it("emits a tool_call entry that preserves toolCallId, status, and input", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({
|
||||
type: "acpx.tool_call",
|
||||
name: "read",
|
||||
toolCallId: "tool-1",
|
||||
status: "running",
|
||||
text: "read README.md",
|
||||
}),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([
|
||||
{
|
||||
kind: "tool_call",
|
||||
ts: TS,
|
||||
name: "read",
|
||||
toolUseId: "tool-1",
|
||||
input: { text: "read README.md", status: "running" },
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("emits a paired tool_result entry when a tool_call reports terminal status", () => {
|
||||
const completed = parseAcpxStdoutLine(
|
||||
emit({
|
||||
type: "acpx.tool_call",
|
||||
name: "read",
|
||||
toolCallId: "tool-1",
|
||||
status: "completed",
|
||||
text: "ok",
|
||||
}),
|
||||
TS,
|
||||
);
|
||||
expect(completed[1]).toEqual({
|
||||
kind: "tool_result",
|
||||
ts: TS,
|
||||
toolUseId: "tool-1",
|
||||
toolName: "read",
|
||||
content: "ok",
|
||||
isError: false,
|
||||
});
|
||||
|
||||
const failed = parseAcpxStdoutLine(
|
||||
emit({
|
||||
type: "acpx.tool_call",
|
||||
name: "edit",
|
||||
toolCallId: "tool-2",
|
||||
status: "failed",
|
||||
text: "permission denied",
|
||||
}),
|
||||
TS,
|
||||
);
|
||||
expect(failed[1]).toMatchObject({ kind: "tool_result", isError: true, content: "permission denied" });
|
||||
});
|
||||
|
||||
it("renders acpx.result with summary fallback to stopReason", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.result", summary: "completed", stopReason: "end_turn" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries[0]).toMatchObject({ kind: "result", text: "completed", subtype: "end_turn", isError: false });
|
||||
});
|
||||
|
||||
it("treats acpx.error as a stderr entry", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.error", message: "auth required", code: "ACP_AUTH" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([{ kind: "stderr", ts: TS, text: "auth required" }]);
|
||||
});
|
||||
|
||||
it("renders unknown acpx.* events as system entries", () => {
|
||||
const entries = parseAcpxStdoutLine(
|
||||
emit({ type: "acpx.misc", message: "unhandled" }),
|
||||
TS,
|
||||
);
|
||||
expect(entries).toEqual([{ kind: "system", ts: TS, text: "unhandled" }]);
|
||||
});
|
||||
|
||||
it("falls back to a stdout entry for non-JSON lines", () => {
|
||||
const entries = parseAcpxStdoutLine("not json", TS);
|
||||
expect(entries).toEqual([{ kind: "stdout", ts: TS, text: "not json" }]);
|
||||
});
|
||||
});
|
||||
158
packages/adapters/acpx-local/src/ui/parse-stdout.ts
Normal file
@@ -0,0 +1,158 @@
|
||||
import type { TranscriptEntry } from "@paperclipai/adapter-utils";
|
||||
|
||||
function parseJson(line: string): Record<string, unknown> | null {
|
||||
try {
|
||||
const parsed = JSON.parse(line);
|
||||
if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) return null;
|
||||
return parsed as Record<string, unknown>;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function asString(value: unknown, fallback = ""): string {
|
||||
return typeof value === "string" ? value : fallback;
|
||||
}
|
||||
|
||||
function asNumber(value: unknown, fallback = 0): number {
|
||||
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
|
||||
}
|
||||
|
||||
function stringify(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
if (value === null || value === undefined) return "";
|
||||
try {
|
||||
return JSON.stringify(value, null, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
function pickToolUseId(parsed: Record<string, unknown>): string {
|
||||
return (
|
||||
asString(parsed.toolCallId) ||
|
||||
asString(parsed.toolUseId) ||
|
||||
asString(parsed.id)
|
||||
);
|
||||
}
|
||||
|
||||
function statusText(parsed: Record<string, unknown>): string {
|
||||
const text = asString(parsed.text).trim();
|
||||
const tag = asString(parsed.tag).trim();
|
||||
const used = asNumber(parsed.used, -1);
|
||||
const size = asNumber(parsed.size, -1);
|
||||
const parts: string[] = [];
|
||||
if (text) parts.push(text);
|
||||
if (tag && !text) parts.push(tag);
|
||||
if (used >= 0 && size > 0) parts.push(`(${used}/${size} ctx)`);
|
||||
return parts.join(" ") || tag || "status";
|
||||
}
|
||||
|
||||
export function parseAcpxStdoutLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const parsed = parseJson(line);
|
||||
if (!parsed) return [{ kind: "stdout", ts, text: line }];
|
||||
|
||||
const type = asString(parsed.type);
|
||||
if (type === "acpx.session") {
|
||||
const agent = asString(parsed.agent, "acpx");
|
||||
const mode = asString(parsed.mode);
|
||||
const permissionMode = asString(parsed.permissionMode);
|
||||
const tail = [mode, permissionMode].filter(Boolean).join(" / ");
|
||||
return [{
|
||||
kind: "init",
|
||||
ts,
|
||||
model: tail ? `${agent} (${tail})` : agent,
|
||||
sessionId:
|
||||
asString(parsed.acpSessionId) ||
|
||||
asString(parsed.sessionId) ||
|
||||
asString(parsed.runtimeSessionName),
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "acpx.text_delta") {
|
||||
const text = asString(parsed.text);
|
||||
if (!text) return [];
|
||||
const channel = asString(parsed.channel) || asString(parsed.stream);
|
||||
return [{
|
||||
kind: channel === "thought" || channel === "thinking" ? "thinking" : "assistant",
|
||||
ts,
|
||||
text,
|
||||
delta: true,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "acpx.tool_call") {
|
||||
const status = asString(parsed.status);
|
||||
const text = asString(parsed.text);
|
||||
const name = asString(parsed.name, "acp_tool");
|
||||
const toolUseId = pickToolUseId(parsed);
|
||||
const input =
|
||||
parsed.input !== undefined
|
||||
? parsed.input
|
||||
: text || status
|
||||
? { ...(text ? { text } : {}), ...(status ? { status } : {}) }
|
||||
: {};
|
||||
const entries: TranscriptEntry[] = [
|
||||
{
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name,
|
||||
toolUseId: toolUseId || undefined,
|
||||
input,
|
||||
},
|
||||
];
|
||||
if (status === "completed" || status === "failed" || status === "cancelled") {
|
||||
entries.push({
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: toolUseId || name,
|
||||
toolName: name,
|
||||
content: text || status,
|
||||
isError: status !== "completed",
|
||||
});
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
if (type === "acpx.tool_result") {
|
||||
return [{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: pickToolUseId(parsed) || asString(parsed.name, "acp_tool"),
|
||||
toolName: asString(parsed.name) || undefined,
|
||||
content: stringify(parsed.content ?? parsed.output ?? parsed.error),
|
||||
isError: parsed.isError === true || parsed.error !== undefined,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "acpx.status") {
|
||||
return [{ kind: "system", ts, text: statusText(parsed) }];
|
||||
}
|
||||
|
||||
if (type === "acpx.result") {
|
||||
return [{
|
||||
kind: "result",
|
||||
ts,
|
||||
text: asString(parsed.summary, asString(parsed.stopReason, asString(parsed.text))),
|
||||
inputTokens: asNumber(parsed.inputTokens),
|
||||
outputTokens: asNumber(parsed.outputTokens),
|
||||
cachedTokens: asNumber(parsed.cachedTokens),
|
||||
costUsd: asNumber(parsed.costUsd),
|
||||
subtype: asString(parsed.subtype, asString(parsed.stopReason, "acpx.result")),
|
||||
isError: parsed.isError === true,
|
||||
errors: Array.isArray(parsed.errors)
|
||||
? parsed.errors.map((error) => stringify(error)).filter(Boolean)
|
||||
: [],
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "acpx.error") {
|
||||
return [{ kind: "stderr", ts, text: asString(parsed.message, line) }];
|
||||
}
|
||||
|
||||
if (type.startsWith("acpx.")) {
|
||||
return [{ kind: "system", ts, text: asString(parsed.message, type) }];
|
||||
}
|
||||
|
||||
return [{ kind: "stdout", ts, text: line }];
|
||||
}
|
||||
8
packages/adapters/acpx-local/tsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
7
packages/adapters/acpx-local/vitest.config.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { defineConfig } from "vitest/config";
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
environment: "node",
|
||||
},
|
||||
});
|
||||
@@ -1,3 +1,5 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "claude_local";
|
||||
export const label = "Claude Code (local)";
|
||||
|
||||
@@ -10,6 +12,19 @@ export const models = [
|
||||
{ id: "claude-haiku-4-5-20251001", label: "Claude Haiku 4.5" },
|
||||
];
|
||||
|
||||
export const modelProfiles: AdapterModelProfileDefinition[] = [
|
||||
{
|
||||
key: "cheap",
|
||||
label: "Cheap",
|
||||
description: "Use Claude Sonnet as the lower-cost Claude Code lane while preserving the agent's primary model.",
|
||||
adapterConfig: {
|
||||
model: "claude-sonnet-4-6",
|
||||
effort: "low",
|
||||
},
|
||||
source: "adapter_default",
|
||||
},
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# claude_local agent configuration
|
||||
|
||||
Adapter: claude_local
|
||||
|
||||
@@ -10,6 +10,7 @@ const {
|
||||
prepareWorkspaceForSshExecution,
|
||||
restoreWorkspaceFromSshExecution,
|
||||
syncDirectoryToSsh,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} = vi.hoisted(() => ({
|
||||
runChildProcess: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
@@ -29,6 +30,14 @@ const {
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
env: {
|
||||
PAPERCLIP_API_URL: "http://127.0.0.1:4310",
|
||||
PAPERCLIP_API_KEY: "bridge-token",
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {},
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/server-utils", async () => {
|
||||
@@ -55,6 +64,16 @@ vi.mock("@paperclipai/adapter-utils/ssh", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
};
|
||||
});
|
||||
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
describe("claude remote execution", () => {
|
||||
@@ -73,8 +92,10 @@ describe("claude remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-claude-remote-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
const instructionsPath = path.join(rootDir, "instructions.md");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
await writeFile(instructionsPath, "Use the remote workspace.\n", "utf8");
|
||||
|
||||
await execute({
|
||||
@@ -100,7 +121,27 @@ describe("claude remote execution", () => {
|
||||
paperclipWorkspace: {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
strategy: "git_worktree",
|
||||
workspaceId: "workspace-1",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
branchName: "feature/remote-claude",
|
||||
worktreePath: workspaceDir,
|
||||
},
|
||||
paperclipWorkspaces: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: workspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: alternateWorkspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
],
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
@@ -112,7 +153,6 @@ describe("claude remote execution", () => {
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
},
|
||||
onLog: async () => {},
|
||||
@@ -136,8 +176,25 @@ describe("claude remote execution", () => {
|
||||
expect(call?.[2]).toContain("/remote/workspace/.paperclip-runtime/claude/skills/agent-instructions.md");
|
||||
expect(call?.[2]).toContain("--add-dir");
|
||||
expect(call?.[2]).toContain("/remote/workspace/.paperclip-runtime/claude/skills");
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://198.51.100.10:3102");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_WORKTREE_PATH).toBeUndefined();
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
|
||||
@@ -5,7 +5,6 @@ import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclip
|
||||
import type { RunProcessResult } from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetPaperclipApiUrl,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
@@ -13,6 +12,7 @@ import {
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
@@ -36,6 +36,7 @@ import {
|
||||
ensurePathInEnv,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
@@ -61,8 +62,10 @@ interface ClaudeExecutionInput {
|
||||
agent: AdapterExecutionContext["agent"];
|
||||
config: Record<string, unknown>;
|
||||
context: Record<string, unknown>;
|
||||
runtimeCommandSpec?: AdapterExecutionContext["runtimeCommandSpec"];
|
||||
executionTarget?: ReturnType<typeof readAdapterExecutionTarget>;
|
||||
authToken?: string;
|
||||
onLog?: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
}
|
||||
|
||||
interface ClaudeRuntimeConfig {
|
||||
@@ -112,7 +115,8 @@ function resolveClaudeBillingType(env: Record<string, string>): "api" | "subscri
|
||||
}
|
||||
|
||||
async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<ClaudeRuntimeConfig> {
|
||||
const { runId, agent, config, context, executionTarget, authToken } = input;
|
||||
const { runId, agent, config, context, runtimeCommandSpec, executionTarget, authToken } = input;
|
||||
const onLog = input.onLog ?? (async () => {});
|
||||
|
||||
const command = asString(config.command, "claude");
|
||||
const workspaceContext = parseObject(context.paperclipWorkspace);
|
||||
@@ -145,6 +149,15 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const executionTargetIsRemote = adapterExecutionTargetIsRemote(executionTarget);
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
|
||||
const envConfig = parseObject(config.env);
|
||||
@@ -200,18 +213,18 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath,
|
||||
workspaceWorktreePath: shapedWorkspaceEnv.workspaceWorktreePath,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
if (runtimeServiceIntents.length > 0) {
|
||||
env.PAPERCLIP_RUNTIME_SERVICE_INTENTS_JSON = JSON.stringify(runtimeServiceIntents);
|
||||
@@ -222,11 +235,6 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
if (runtimePrimaryUrl) {
|
||||
env.PAPERCLIP_RUNTIME_PRIMARY_URL = runtimePrimaryUrl;
|
||||
}
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) {
|
||||
env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
}
|
||||
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
@@ -235,7 +243,24 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
const runtimeEnv = Object.fromEntries(
|
||||
Object.entries(ensurePathInEnv({ ...process.env, ...env })).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
installCommand: runtimeCommandSpec?.installCommand,
|
||||
detectCommand: runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env: runtimeEnv,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
@@ -244,8 +269,6 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
resolvedCommand,
|
||||
});
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
@@ -331,8 +354,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
agent,
|
||||
config,
|
||||
context,
|
||||
runtimeCommandSpec: ctx.runtimeCommandSpec,
|
||||
executionTarget,
|
||||
authToken,
|
||||
onLog,
|
||||
});
|
||||
const {
|
||||
command,
|
||||
@@ -777,6 +802,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const transientUpstream =
|
||||
failed &&
|
||||
!loginMeta.requiresLogin &&
|
||||
!clearSessionForMaxTurns &&
|
||||
isClaudeTransientUpstreamError({
|
||||
parsed,
|
||||
stdout: proc.stdout,
|
||||
@@ -793,11 +819,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
: null;
|
||||
const resolvedErrorCode = loginMeta.requiresLogin
|
||||
? "claude_auth_required"
|
||||
: failed && clearSessionForMaxTurns
|
||||
? "max_turns_exhausted"
|
||||
: transientUpstream
|
||||
? "claude_transient_upstream"
|
||||
: null;
|
||||
const mergedResultJson: Record<string, unknown> = {
|
||||
...parsed,
|
||||
...(failed && clearSessionForMaxTurns ? { stopReason: "max_turns_exhausted" } : {}),
|
||||
...(transientUpstream ? { errorFamily: "transient_upstream" } : {}),
|
||||
...(transientRetryNotBefore ? { retryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
...(transientRetryNotBefore ? { transientRetryNotBefore: transientRetryNotBefore.toISOString() } : {}),
|
||||
|
||||
@@ -170,11 +170,19 @@ export function isClaudeMaxTurnsResult(parsed: Record<string, unknown> | null |
|
||||
const subtype = asString(parsed.subtype, "").trim().toLowerCase();
|
||||
if (subtype === "error_max_turns") return true;
|
||||
|
||||
const stopReason = asString(parsed.stop_reason, "").trim().toLowerCase();
|
||||
if (stopReason === "max_turns") return true;
|
||||
const structuredStopReasons = [
|
||||
parsed.stop_reason,
|
||||
parsed.stopReason,
|
||||
parsed.error_code,
|
||||
parsed.errorCode,
|
||||
].map((value) => asString(value, "").trim().toLowerCase());
|
||||
|
||||
const resultText = asString(parsed.result, "").trim();
|
||||
return /max(?:imum)?\s+turns?/i.test(resultText);
|
||||
return structuredStopReasons.some((reason) =>
|
||||
reason === "max_turns" ||
|
||||
reason === "max_turns_exhausted" ||
|
||||
reason === "turn_limit" ||
|
||||
reason === "turn_limit_exhausted",
|
||||
);
|
||||
}
|
||||
|
||||
export function isClaudeUnknownSessionError(parsed: Record<string, unknown>): boolean {
|
||||
|
||||
@@ -66,8 +66,6 @@ export function buildClaudeLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
if (v.model) ac.model = v.model;
|
||||
if (v.thinkingEffort) ac.effort = v.thinkingEffort;
|
||||
if (v.chrome) ac.chrome = true;
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "codex_local";
|
||||
export const label = "Codex (local)";
|
||||
|
||||
export const DEFAULT_CODEX_LOCAL_MODEL = "gpt-5.3-codex";
|
||||
export const DEFAULT_CODEX_LOCAL_BYPASS_APPROVALS_AND_SANDBOX = true;
|
||||
export const CODEX_LOCAL_FAST_MODE_SUPPORTED_MODELS = ["gpt-5.4"] as const;
|
||||
@@ -40,6 +43,19 @@ export const models = [
|
||||
{ id: "codex-mini-latest", label: "Codex Mini" },
|
||||
];
|
||||
|
||||
export const modelProfiles: AdapterModelProfileDefinition[] = [
|
||||
{
|
||||
key: "cheap",
|
||||
label: "Cheap",
|
||||
description: "Use the lowest-cost known Codex local model lane without changing the primary model.",
|
||||
adapterConfig: {
|
||||
model: "gpt-5.3-codex-spark",
|
||||
modelReasoningEffort: "low",
|
||||
},
|
||||
source: "adapter_default",
|
||||
},
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# codex_local agent configuration
|
||||
|
||||
Adapter: codex_local
|
||||
|
||||
@@ -10,6 +10,7 @@ const {
|
||||
prepareWorkspaceForSshExecution,
|
||||
restoreWorkspaceFromSshExecution,
|
||||
syncDirectoryToSsh,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} = vi.hoisted(() => ({
|
||||
runChildProcess: vi.fn(async () => ({
|
||||
exitCode: 1,
|
||||
@@ -25,6 +26,14 @@ const {
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
restoreWorkspaceFromSshExecution: vi.fn(async () => undefined),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
env: {
|
||||
PAPERCLIP_API_URL: "http://127.0.0.1:4310",
|
||||
PAPERCLIP_API_KEY: "bridge-token",
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {},
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/server-utils", async () => {
|
||||
@@ -51,6 +60,16 @@ vi.mock("@paperclipai/adapter-utils/ssh", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
};
|
||||
});
|
||||
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
describe("codex remote execution", () => {
|
||||
@@ -74,6 +93,8 @@ describe("codex remote execution", () => {
|
||||
await mkdir(codexHomeDir, { recursive: true });
|
||||
await writeFile(path.join(rootDir, "instructions.md"), "Use the remote workspace.\n", "utf8");
|
||||
await writeFile(path.join(codexHomeDir, "auth.json"), "{}", "utf8");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "alternate-workspace");
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
await execute({
|
||||
runId: "run-1",
|
||||
@@ -100,7 +121,27 @@ describe("codex remote execution", () => {
|
||||
paperclipWorkspace: {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
strategy: "git_worktree",
|
||||
workspaceId: "workspace-1",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
branchName: "feature/remote-codex",
|
||||
worktreePath: workspaceDir,
|
||||
},
|
||||
paperclipWorkspaces: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: workspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: alternateWorkspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
],
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
@@ -134,7 +175,25 @@ describe("codex remote execution", () => {
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.CODEX_HOME).toBe("/remote/workspace/.paperclip-runtime/codex/home");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_WORKTREE_PATH).toBeUndefined();
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledWith(expect.objectContaining({
|
||||
localDir: workspaceDir,
|
||||
|
||||
@@ -4,13 +4,13 @@ import { fileURLToPath } from "node:url";
|
||||
import { inferOpenAiCompatibleBiller, type AdapterExecutionContext, type AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetPaperclipApiUrl,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
resolveAdapterExecutionTargetCommandForLogs,
|
||||
@@ -31,6 +31,7 @@ import {
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
joinPromptSections,
|
||||
@@ -348,6 +349,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
},
|
||||
);
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceWorktreePath,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
const preparedExecutionTargetRuntime = executionTargetIsRemote
|
||||
? await (async () => {
|
||||
await onLog(
|
||||
@@ -426,18 +434,18 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceStrategy,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
workspaceBranch,
|
||||
workspaceWorktreePath,
|
||||
workspaceWorktreePath: shapedWorkspaceEnv.workspaceWorktreePath,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
if (runtimeServiceIntents.length > 0) {
|
||||
env.PAPERCLIP_RUNTIME_SERVICE_INTENTS_JSON = JSON.stringify(runtimeServiceIntents);
|
||||
@@ -448,10 +456,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (runtimePrimaryUrl) {
|
||||
env.PAPERCLIP_RUNTIME_PRIMARY_URL = runtimePrimaryUrl;
|
||||
}
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) {
|
||||
env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
}
|
||||
for (const [k, v] of Object.entries(envConfig)) {
|
||||
if (typeof v === "string") env[k] = v;
|
||||
}
|
||||
@@ -478,7 +482,22 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
),
|
||||
);
|
||||
const billingType = resolveCodexBillingType(effectiveEnv);
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
const runtimeEnv = Object.fromEntries(
|
||||
Object.entries(ensurePathInEnv(effectiveEnv)).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
installCommand: ctx.runtimeCommandSpec?.installCommand,
|
||||
detectCommand: ctx.runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env: runtimeEnv,
|
||||
timeoutSec: asNumber(config.timeoutSec, 0),
|
||||
graceSec: asNumber(config.graceSec, 20),
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
const loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
|
||||
@@ -70,8 +70,6 @@ export function buildCodexLocalConfig(v: CreateConfigValues): Record<string, unk
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_CODEX_LOCAL_MODEL;
|
||||
if (v.thinkingEffort) ac.modelReasoningEffort = v.thinkingEffort;
|
||||
ac.timeoutSec = 0;
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "cursor";
|
||||
export const label = "Cursor CLI (local)";
|
||||
|
||||
export const DEFAULT_CURSOR_LOCAL_MODEL = "auto";
|
||||
|
||||
const CURSOR_FALLBACK_MODEL_IDS = [
|
||||
@@ -46,6 +49,18 @@ const CURSOR_FALLBACK_MODEL_IDS = [
|
||||
|
||||
export const models = CURSOR_FALLBACK_MODEL_IDS.map((id) => ({ id, label: id }));
|
||||
|
||||
export const modelProfiles: AdapterModelProfileDefinition[] = [
|
||||
{
|
||||
key: "cheap",
|
||||
label: "Cheap",
|
||||
description: "Use Cursor's known Codex mini model as the budget lane instead of assuming auto is cheap.",
|
||||
adapterConfig: {
|
||||
model: "gpt-5.1-codex-mini",
|
||||
},
|
||||
source: "adapter_default",
|
||||
},
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# cursor agent configuration
|
||||
|
||||
Adapter: cursor
|
||||
|
||||
@@ -11,6 +11,7 @@ const {
|
||||
restoreWorkspaceFromSshExecution,
|
||||
runSshCommand,
|
||||
syncDirectoryToSsh,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} = vi.hoisted(() => ({
|
||||
runChildProcess: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
@@ -35,6 +36,14 @@ const {
|
||||
exitCode: 0,
|
||||
})),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
env: {
|
||||
PAPERCLIP_API_URL: "http://127.0.0.1:4310",
|
||||
PAPERCLIP_API_KEY: "bridge-token",
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {},
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/server-utils", async () => {
|
||||
@@ -62,6 +71,16 @@ vi.mock("@paperclipai/adapter-utils/ssh", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
};
|
||||
});
|
||||
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
describe("cursor remote execution", () => {
|
||||
@@ -80,7 +99,9 @@ describe("cursor remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-cursor-remote-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
@@ -105,6 +126,20 @@ describe("cursor remote execution", () => {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
},
|
||||
paperclipWorkspaces: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: workspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: alternateWorkspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
],
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
@@ -116,7 +151,6 @@ describe("cursor remote execution", () => {
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
},
|
||||
onLog: async () => {},
|
||||
@@ -131,7 +165,6 @@ describe("cursor remote execution", () => {
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
});
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
@@ -150,8 +183,24 @@ describe("cursor remote execution", () => {
|
||||
| undefined;
|
||||
expect(call?.[2]).toContain("--workspace");
|
||||
expect(call?.[2]).toContain("/remote/workspace");
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://198.51.100.10:3102");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import { fileURLToPath } from "node:url";
|
||||
import { inferOpenAiCompatibleBiller, type AdapterExecutionContext, type AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetPaperclipApiUrl,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
@@ -13,6 +12,7 @@ import {
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
readAdapterExecutionTargetHomeDir,
|
||||
@@ -37,6 +37,7 @@ import {
|
||||
removeMaintainerOnlySkillSymlinks,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
joinPromptSections,
|
||||
@@ -222,6 +223,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
const cursorSkillEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const desiredCursorSkillNames = resolvePaperclipDesiredSkillNames(config, cursorSkillEntries);
|
||||
@@ -282,19 +290,15 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
}
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) {
|
||||
env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
for (const [k, v] of Object.entries(envConfig)) {
|
||||
if (typeof v === "string") env[k] = v;
|
||||
@@ -304,6 +308,17 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
installCommand: ctx.runtimeCommandSpec?.installCommand,
|
||||
detectCommand: ctx.runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
// Probe the sandbox before the managed-home override so we discover
|
||||
// cursor-agent from the real system HOME (e.g. ~/.local/bin/cursor-agent).
|
||||
// The managed HOME set later is for runtime isolation, not for finding the CLI.
|
||||
@@ -339,7 +354,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
return asStringArray(config.args);
|
||||
})();
|
||||
const autoTrustEnabled = !hasCursorTrustBypassArg(extraArgs);
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
|
||||
@@ -61,8 +61,6 @@ export function buildCursorLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_CURSOR_LOCAL_MODEL;
|
||||
const mode = normalizeMode(v.thinkingEffort);
|
||||
if (mode) ac.mode = mode;
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "gemini_local";
|
||||
export const label = "Gemini CLI (local)";
|
||||
|
||||
export const DEFAULT_GEMINI_LOCAL_MODEL = "auto";
|
||||
|
||||
export const models = [
|
||||
@@ -11,6 +14,18 @@ export const models = [
|
||||
{ id: "gemini-2.0-flash-lite", label: "Gemini 2.0 Flash Lite" },
|
||||
];
|
||||
|
||||
export const modelProfiles: AdapterModelProfileDefinition[] = [
|
||||
{
|
||||
key: "cheap",
|
||||
label: "Cheap",
|
||||
description: "Use Gemini Flash Lite as the budget Gemini CLI lane while preserving the primary model.",
|
||||
adapterConfig: {
|
||||
model: "gemini-2.5-flash-lite",
|
||||
},
|
||||
source: "adapter_default",
|
||||
},
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# gemini_local agent configuration
|
||||
|
||||
Adapter: gemini_local
|
||||
|
||||
@@ -11,6 +11,7 @@ const {
|
||||
restoreWorkspaceFromSshExecution,
|
||||
runSshCommand,
|
||||
syncDirectoryToSsh,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} = vi.hoisted(() => ({
|
||||
runChildProcess: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
@@ -41,6 +42,14 @@ const {
|
||||
exitCode: 0,
|
||||
})),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
env: {
|
||||
PAPERCLIP_API_URL: "http://127.0.0.1:4310",
|
||||
PAPERCLIP_API_KEY: "bridge-token",
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {},
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/server-utils", async () => {
|
||||
@@ -68,6 +77,16 @@ vi.mock("@paperclipai/adapter-utils/ssh", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
};
|
||||
});
|
||||
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
describe("gemini remote execution", () => {
|
||||
@@ -86,7 +105,9 @@ describe("gemini remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-gemini-remote-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
@@ -111,6 +132,20 @@ describe("gemini remote execution", () => {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
},
|
||||
paperclipWorkspaces: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: workspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: alternateWorkspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
],
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
@@ -122,7 +157,6 @@ describe("gemini remote execution", () => {
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
},
|
||||
onLog: async () => {},
|
||||
@@ -137,7 +171,6 @@ describe("gemini remote execution", () => {
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
});
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
@@ -154,8 +187,24 @@ describe("gemini remote execution", () => {
|
||||
const call = runChildProcess.mock.calls[0] as unknown as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://198.51.100.10:3102");
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import { fileURLToPath } from "node:url";
|
||||
import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetPaperclipApiUrl,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
@@ -14,6 +13,7 @@ import {
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
readAdapterExecutionTargetHomeDir,
|
||||
@@ -40,6 +40,7 @@ import {
|
||||
parseObject,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
runChildProcess,
|
||||
@@ -200,6 +201,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
const geminiSkillEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const desiredGeminiSkillNames = resolvePaperclipDesiredSkillNames(config, geminiSkillEntries);
|
||||
@@ -244,17 +252,16 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
@@ -267,7 +274,24 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
),
|
||||
);
|
||||
const billingType = resolveGeminiBillingType(effectiveEnv);
|
||||
const runtimeEnv = ensurePathInEnv(effectiveEnv);
|
||||
const runtimeEnv = Object.fromEntries(
|
||||
Object.entries(ensurePathInEnv(effectiveEnv)).filter(
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
installCommand: ctx.runtimeCommandSpec?.installCommand,
|
||||
detectCommand: ctx.runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env: runtimeEnv,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
let loggedEnv = buildInvocationEnvForLogs(env, {
|
||||
@@ -276,14 +300,11 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
resolvedCommand,
|
||||
});
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
return asStringArray(config.args);
|
||||
})();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let remoteSkillsDir: string | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
@@ -531,7 +552,21 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
};
|
||||
}
|
||||
|
||||
const clearSessionForTurnLimit = isGeminiTurnLimitResult(attempt.parsed.resultEvent, attempt.proc.exitCode);
|
||||
const parsedError = typeof attempt.parsed.errorMessage === "string" ? attempt.parsed.errorMessage.trim() : "";
|
||||
const stderrLine = firstNonEmptyLine(attempt.proc.stderr);
|
||||
const structuredFailure = attempt.parsed.resultEvent
|
||||
? describeGeminiFailure(attempt.parsed.resultEvent)
|
||||
: null;
|
||||
const fallbackErrorMessage =
|
||||
parsedError ||
|
||||
structuredFailure ||
|
||||
stderrLine ||
|
||||
`Gemini exited with code ${attempt.proc.exitCode ?? -1}`;
|
||||
const failed = (attempt.proc.exitCode ?? 0) !== 0;
|
||||
const clearSessionForTurnLimit = isGeminiTurnLimitResult(
|
||||
attempt.parsed.resultEvent,
|
||||
attempt.proc.exitCode,
|
||||
);
|
||||
|
||||
// On retry, don't fall back to old session ID — the old session was stale
|
||||
const canFallbackToRuntimeSession = !isRetry;
|
||||
@@ -551,23 +586,24 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
: {}),
|
||||
} as Record<string, unknown>)
|
||||
: null;
|
||||
const parsedError = typeof attempt.parsed.errorMessage === "string" ? attempt.parsed.errorMessage.trim() : "";
|
||||
const stderrLine = firstNonEmptyLine(attempt.proc.stderr);
|
||||
const structuredFailure = attempt.parsed.resultEvent
|
||||
? describeGeminiFailure(attempt.parsed.resultEvent)
|
||||
: null;
|
||||
const fallbackErrorMessage =
|
||||
parsedError ||
|
||||
structuredFailure ||
|
||||
stderrLine ||
|
||||
`Gemini exited with code ${attempt.proc.exitCode ?? -1}`;
|
||||
const resultJson: Record<string, unknown> = {
|
||||
...(attempt.parsed.resultEvent ?? {
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
}),
|
||||
...(failed && clearSessionForTurnLimit ? { stopReason: "max_turns_exhausted" } : {}),
|
||||
};
|
||||
|
||||
return {
|
||||
exitCode: attempt.proc.exitCode,
|
||||
signal: attempt.proc.signal,
|
||||
timedOut: false,
|
||||
errorMessage: (attempt.proc.exitCode ?? 0) === 0 ? null : fallbackErrorMessage,
|
||||
errorCode: (attempt.proc.exitCode ?? 0) !== 0 && authMeta.requiresAuth ? "gemini_auth_required" : null,
|
||||
errorMessage: failed ? fallbackErrorMessage : null,
|
||||
errorCode: failed && authMeta.requiresAuth
|
||||
? "gemini_auth_required"
|
||||
: failed && clearSessionForTurnLimit
|
||||
? "max_turns_exhausted"
|
||||
: null,
|
||||
usage: attempt.parsed.usage,
|
||||
sessionId: resolvedSessionId,
|
||||
sessionParams: resolvedSessionParams,
|
||||
@@ -577,10 +613,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
model,
|
||||
billingType,
|
||||
costUsd: attempt.parsed.costUsd,
|
||||
resultJson: attempt.parsed.resultEvent ?? {
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
},
|
||||
resultJson,
|
||||
summary: attempt.parsed.summary,
|
||||
question: attempt.parsed.question,
|
||||
clearSession: clearSessionForTurnLimit || Boolean(clearSessionOnMissingSession && !resolvedSessionId),
|
||||
|
||||
46
packages/adapters/gemini-local/src/server/parse.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { parseGeminiJsonl } from "./parse.js";
|
||||
|
||||
describe("parseGeminiJsonl", () => {
|
||||
it("collects assistant text from message events with string content", () => {
|
||||
const stdout = [
|
||||
'{"type":"init","session_id":"session-1"}',
|
||||
'{"type":"message","role":"user","content":"Respond with hello."}',
|
||||
'{"type":"message","role":"assistant","content":"hello","delta":true}',
|
||||
'{"type":"result","status":"success"}',
|
||||
].join("\n");
|
||||
|
||||
const parsed = parseGeminiJsonl(stdout);
|
||||
|
||||
expect(parsed.sessionId).toBe("session-1");
|
||||
expect(parsed.summary).toBe("hello");
|
||||
expect(parsed.errorMessage).toBeNull();
|
||||
});
|
||||
|
||||
it("collects assistant text from message events with structured object content", () => {
|
||||
const stdout = [
|
||||
'{"type":"init","session_id":"session-2"}',
|
||||
'{"type":"message","role":"assistant","content":{"content":[{"type":"text","text":"first part"},{"type":"text","text":"second part"}]}}',
|
||||
'{"type":"result","status":"success"}',
|
||||
].join("\n");
|
||||
|
||||
const parsed = parseGeminiJsonl(stdout);
|
||||
|
||||
expect(parsed.sessionId).toBe("session-2");
|
||||
expect(parsed.summary).toBe("first part\n\nsecond part");
|
||||
expect(parsed.errorMessage).toBeNull();
|
||||
});
|
||||
|
||||
it("ignores non-assistant message events", () => {
|
||||
const stdout = [
|
||||
'{"type":"message","role":"user","content":"hidden user input"}',
|
||||
'{"type":"message","role":"system","content":"hidden system note"}',
|
||||
'{"type":"message","role":"assistant","content":"visible response"}',
|
||||
'{"type":"result","status":"success"}',
|
||||
].join("\n");
|
||||
|
||||
const parsed = parseGeminiJsonl(stdout);
|
||||
|
||||
expect(parsed.summary).toBe("visible response");
|
||||
});
|
||||
});
|
||||
@@ -121,6 +121,19 @@ export function parseGeminiJsonl(stdout: string) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "message") {
|
||||
const role = asString(event.role, "").trim().toLowerCase();
|
||||
if (role === "assistant") {
|
||||
// Mirror the assistant-event handling above: collect every assistant
|
||||
// message including deltas. Gemini CLI emits these as discrete final
|
||||
// messages (one per assistant turn), not as cumulative streaming
|
||||
// tokens, so collecting all of them produces the expected concatenated
|
||||
// turn-by-turn summary rather than duplicated text.
|
||||
messages.push(...collectMessageText(event.content));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "result") {
|
||||
resultEvent = event;
|
||||
accumulateUsage(usage, event.usage ?? event.usageMetadata);
|
||||
@@ -273,9 +286,18 @@ export function isGeminiTurnLimitResult(
|
||||
if (exitCode === 53) return true;
|
||||
if (!parsed) return false;
|
||||
|
||||
const status = asString(parsed.status, "").trim().toLowerCase();
|
||||
if (status === "turn_limit" || status === "max_turns") return true;
|
||||
const structuredStopReasons = [
|
||||
parsed.status,
|
||||
parsed.stopReason,
|
||||
parsed.stop_reason,
|
||||
parsed.errorCode,
|
||||
parsed.error_code,
|
||||
].map((value) => asString(value, "").trim().toLowerCase());
|
||||
|
||||
const error = asString(parsed.error, "").trim();
|
||||
return /turn\s*limit|max(?:imum)?\s+turns?/i.test(error);
|
||||
return structuredStopReasons.some((reason) =>
|
||||
reason === "turn_limit" ||
|
||||
reason === "max_turns" ||
|
||||
reason === "max_turns_exhausted" ||
|
||||
reason === "turn_limit_exhausted",
|
||||
);
|
||||
}
|
||||
|
||||
@@ -55,8 +55,6 @@ export function buildGeminiLocalConfig(v: CreateConfigValues): Record<string, un
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
if (v.bootstrapPrompt) ac.bootstrapPromptTemplate = v.bootstrapPrompt;
|
||||
ac.model = v.model || DEFAULT_GEMINI_LOCAL_MODEL;
|
||||
ac.timeoutSec = 0;
|
||||
ac.graceSec = 15;
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
import type { AdapterModelProfileDefinition } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const type = "opencode_local";
|
||||
export const label = "OpenCode (local)";
|
||||
|
||||
export const DEFAULT_OPENCODE_LOCAL_MODEL = "openai/gpt-5.2-codex";
|
||||
|
||||
export function isValidOpenCodeModelId(value: unknown): value is string {
|
||||
if (typeof value !== "string") return false;
|
||||
const trimmed = value.trim();
|
||||
const slashIndex = trimmed.indexOf("/");
|
||||
return Boolean(trimmed) && slashIndex > 0 && slashIndex !== trimmed.length - 1;
|
||||
}
|
||||
|
||||
export const models: Array<{ id: string; label: string }> = [
|
||||
{ id: DEFAULT_OPENCODE_LOCAL_MODEL, label: DEFAULT_OPENCODE_LOCAL_MODEL },
|
||||
{ id: "openai/gpt-5.4", label: "openai/gpt-5.4" },
|
||||
@@ -11,6 +20,19 @@ export const models: Array<{ id: string; label: string }> = [
|
||||
{ id: "openai/gpt-5.1-codex-mini", label: "openai/gpt-5.1-codex-mini" },
|
||||
];
|
||||
|
||||
export const modelProfiles: AdapterModelProfileDefinition[] = [
|
||||
{
|
||||
key: "cheap",
|
||||
label: "Cheap",
|
||||
description: "Use OpenCode's known Codex mini model as the budget lane.",
|
||||
adapterConfig: {
|
||||
model: "openai/gpt-5.1-codex-mini",
|
||||
variant: "low",
|
||||
},
|
||||
source: "adapter_default",
|
||||
},
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# opencode_local agent configuration
|
||||
|
||||
Adapter: opencode_local
|
||||
|
||||
@@ -11,24 +11,38 @@ const {
|
||||
restoreWorkspaceFromSshExecution,
|
||||
runSshCommand,
|
||||
syncDirectoryToSsh,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
} = vi.hoisted(() => ({
|
||||
runChildProcess: vi.fn(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: [
|
||||
JSON.stringify({ type: "step_start", sessionID: "session_123" }),
|
||||
JSON.stringify({ type: "text", sessionID: "session_123", part: { text: "hello" } }),
|
||||
JSON.stringify({
|
||||
type: "step_finish",
|
||||
sessionID: "session_123",
|
||||
part: { cost: 0.001, tokens: { input: 1, output: 1, reasoning: 0, cache: { read: 0, write: 0 } } },
|
||||
}),
|
||||
].join("\n"),
|
||||
stderr: "",
|
||||
pid: 123,
|
||||
startedAt: new Date().toISOString(),
|
||||
})),
|
||||
runChildProcess: vi.fn(async (_runId: string, _command: string, args: string[]) => {
|
||||
if (args.includes("models")) {
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "opencode/gpt-5-nano\nopenai/gpt-4.1\n",
|
||||
stderr: "",
|
||||
pid: 122,
|
||||
startedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
return {
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: [
|
||||
JSON.stringify({ type: "step_start", sessionID: "session_123" }),
|
||||
JSON.stringify({ type: "text", sessionID: "session_123", part: { text: "hello" } }),
|
||||
JSON.stringify({
|
||||
type: "step_finish",
|
||||
sessionID: "session_123",
|
||||
part: { cost: 0.001, tokens: { input: 1, output: 1, reasoning: 0, cache: { read: 0, write: 0 } } },
|
||||
}),
|
||||
].join("\n"),
|
||||
stderr: "",
|
||||
pid: 123,
|
||||
startedAt: new Date().toISOString(),
|
||||
};
|
||||
}),
|
||||
ensureCommandResolvable: vi.fn(async () => undefined),
|
||||
resolveCommandForLogs: vi.fn(async () => "ssh://fixture@127.0.0.1:2222/remote/workspace :: opencode"),
|
||||
prepareWorkspaceForSshExecution: vi.fn(async () => undefined),
|
||||
@@ -39,6 +53,14 @@ const {
|
||||
exitCode: 0,
|
||||
})),
|
||||
syncDirectoryToSsh: vi.fn(async () => undefined),
|
||||
startAdapterExecutionTargetPaperclipBridge: vi.fn(async () => ({
|
||||
env: {
|
||||
PAPERCLIP_API_URL: "http://127.0.0.1:4310",
|
||||
PAPERCLIP_API_KEY: "bridge-token",
|
||||
PAPERCLIP_API_BRIDGE_MODE: "queue_v1",
|
||||
},
|
||||
stop: async () => {},
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/server-utils", async () => {
|
||||
@@ -66,6 +88,16 @@ vi.mock("@paperclipai/adapter-utils/ssh", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@paperclipai/adapter-utils/execution-target", async () => {
|
||||
const actual = await vi.importActual<typeof import("@paperclipai/adapter-utils/execution-target")>(
|
||||
"@paperclipai/adapter-utils/execution-target",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
startAdapterExecutionTargetPaperclipBridge,
|
||||
};
|
||||
});
|
||||
|
||||
import { execute } from "./execute.js";
|
||||
|
||||
describe("opencode remote execution", () => {
|
||||
@@ -84,7 +116,9 @@ describe("opencode remote execution", () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-opencode-remote-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
const alternateWorkspaceDir = path.join(rootDir, "workspace-other");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
await mkdir(alternateWorkspaceDir, { recursive: true });
|
||||
|
||||
const result = await execute({
|
||||
runId: "run-1",
|
||||
@@ -110,6 +144,20 @@ describe("opencode remote execution", () => {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
},
|
||||
paperclipWorkspaces: [
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: workspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
cwd: alternateWorkspaceDir,
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
],
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
@@ -121,7 +169,6 @@ describe("opencode remote execution", () => {
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
},
|
||||
onLog: async () => {},
|
||||
@@ -136,7 +183,6 @@ describe("opencode remote execution", () => {
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteCwd: "/remote/workspace",
|
||||
paperclipApiUrl: "http://198.51.100.10:3102",
|
||||
},
|
||||
});
|
||||
expect(prepareWorkspaceForSshExecution).toHaveBeenCalledTimes(1);
|
||||
@@ -153,15 +199,105 @@ describe("opencode remote execution", () => {
|
||||
expect.stringContaining(".claude/skills"),
|
||||
expect.anything(),
|
||||
);
|
||||
const call = runChildProcess.mock.calls[0] as unknown as
|
||||
const runCall = runChildProcess.mock.calls.find((entry) => Array.isArray(entry[2]) && entry[2].includes("run")) as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://198.51.100.10:3102");
|
||||
const modelProbeCall = runChildProcess.mock.calls.find((entry) => Array.isArray(entry[2]) && entry[2].includes("models")) as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(modelProbeCall?.[2]).toEqual(["models"]);
|
||||
expect(modelProbeCall?.[3].env.XDG_CONFIG_HOME).toBe(
|
||||
"/remote/workspace/.paperclip-runtime/opencode/xdgConfig",
|
||||
);
|
||||
expect(modelProbeCall?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
const call = runCall as
|
||||
| [string, string, string[], { env: Record<string, string>; remoteExecution?: { remoteCwd: string } | null }]
|
||||
| undefined;
|
||||
expect(call?.[3].env.PAPERCLIP_WORKSPACE_CWD).toBe("/remote/workspace");
|
||||
expect(JSON.parse(call?.[3].env.PAPERCLIP_WORKSPACES_JSON ?? "[]")).toEqual([
|
||||
{
|
||||
workspaceId: "workspace-1",
|
||||
cwd: "/remote/workspace",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "main",
|
||||
},
|
||||
{
|
||||
workspaceId: "workspace-2",
|
||||
repoUrl: "https://github.com/paperclipai/paperclip.git",
|
||||
repoRef: "feature/other",
|
||||
},
|
||||
]);
|
||||
expect(call?.[3].env.PAPERCLIP_API_URL).toBe("http://127.0.0.1:4310");
|
||||
expect(call?.[3].env.PAPERCLIP_API_BRIDGE_MODE).toBe("queue_v1");
|
||||
expect(call?.[3].env.XDG_CONFIG_HOME).toBe("/remote/workspace/.paperclip-runtime/opencode/xdgConfig");
|
||||
expect(call?.[3].remoteExecution?.remoteCwd).toBe("/remote/workspace");
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).toHaveBeenCalledTimes(1);
|
||||
expect(restoreWorkspaceFromSshExecution).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("fails before the remote run when the configured model is unavailable on the SSH target", async () => {
|
||||
runChildProcess.mockImplementationOnce(async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
stdout: "openai/gpt-4.1\n",
|
||||
stderr: "",
|
||||
pid: 456,
|
||||
startedAt: new Date().toISOString(),
|
||||
}));
|
||||
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-opencode-remote-model-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
const workspaceDir = path.join(rootDir, "workspace");
|
||||
await mkdir(workspaceDir, { recursive: true });
|
||||
|
||||
await expect(() =>
|
||||
execute({
|
||||
runId: "run-ssh-model-missing",
|
||||
agent: {
|
||||
id: "agent-1",
|
||||
companyId: "company-1",
|
||||
name: "OpenCode Builder",
|
||||
adapterType: "opencode_local",
|
||||
adapterConfig: {},
|
||||
},
|
||||
runtime: {
|
||||
sessionId: null,
|
||||
sessionParams: null,
|
||||
sessionDisplayId: null,
|
||||
taskKey: null,
|
||||
},
|
||||
config: {
|
||||
command: "opencode",
|
||||
model: "opencode/gpt-5-nano",
|
||||
},
|
||||
context: {
|
||||
paperclipWorkspace: {
|
||||
cwd: workspaceDir,
|
||||
source: "project_primary",
|
||||
},
|
||||
},
|
||||
executionTransport: {
|
||||
remoteExecution: {
|
||||
host: "127.0.0.1",
|
||||
port: 2222,
|
||||
username: "fixture",
|
||||
remoteWorkspacePath: "/remote/workspace",
|
||||
remoteCwd: "/remote/workspace",
|
||||
privateKey: "PRIVATE KEY",
|
||||
knownHosts: "[127.0.0.1]:2222 ssh-ed25519 AAAA",
|
||||
strictHostKeyChecking: true,
|
||||
},
|
||||
},
|
||||
onLog: async () => {},
|
||||
}),
|
||||
).rejects.toThrow("Configured OpenCode model is unavailable on the remote execution target");
|
||||
|
||||
expect(runChildProcess).toHaveBeenCalledTimes(1);
|
||||
expect((runChildProcess.mock.calls[0]?.[2] as string[] | undefined) ?? []).toEqual(["models"]);
|
||||
expect(startAdapterExecutionTargetPaperclipBridge).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("resumes saved OpenCode sessions for remote SSH execution only when the identity matches", async () => {
|
||||
const rootDir = await mkdtemp(path.join(os.tmpdir(), "paperclip-opencode-remote-resume-"));
|
||||
cleanupDirs.push(rootDir);
|
||||
@@ -218,7 +354,9 @@ describe("opencode remote execution", () => {
|
||||
onLog: async () => {},
|
||||
});
|
||||
|
||||
const call = runChildProcess.mock.calls[0] as unknown as [string, string, string[]] | undefined;
|
||||
const call = runChildProcess.mock.calls.find((entry) => Array.isArray(entry[2]) && entry[2].includes("run")) as
|
||||
| [string, string, string[]]
|
||||
| undefined;
|
||||
expect(call?.[2]).toContain("--session");
|
||||
expect(call?.[2]).toContain("session-123");
|
||||
});
|
||||
|
||||
@@ -5,7 +5,6 @@ import { fileURLToPath } from "node:url";
|
||||
import { inferOpenAiCompatibleBiller, type AdapterExecutionContext, type AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
adapterExecutionTargetIsRemote,
|
||||
adapterExecutionTargetPaperclipApiUrl,
|
||||
adapterExecutionTargetRemoteCwd,
|
||||
adapterExecutionTargetSessionIdentity,
|
||||
adapterExecutionTargetSessionMatches,
|
||||
@@ -13,6 +12,7 @@ import {
|
||||
adapterExecutionTargetUsesPaperclipBridge,
|
||||
describeAdapterExecutionTarget,
|
||||
ensureAdapterExecutionTargetCommandResolvable,
|
||||
ensureAdapterExecutionTargetRuntimeCommandInstalled,
|
||||
prepareAdapterExecutionTargetRuntime,
|
||||
readAdapterExecutionTarget,
|
||||
readAdapterExecutionTargetHomeDir,
|
||||
@@ -35,6 +35,7 @@ import {
|
||||
ensurePathInEnv,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
shapePaperclipWorkspaceEnvForExecution,
|
||||
stringifyPaperclipWakePayload,
|
||||
DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE,
|
||||
runChildProcess,
|
||||
@@ -42,7 +43,11 @@ import {
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { isOpenCodeUnknownSessionError, parseOpenCodeJsonl } from "./parse.js";
|
||||
import { ensureOpenCodeModelConfiguredAndAvailable } from "./models.js";
|
||||
import {
|
||||
ensureOpenCodeModelConfiguredAndAvailable,
|
||||
parseOpenCodeModelsOutput,
|
||||
requireOpenCodeModelId,
|
||||
} from "./models.js";
|
||||
import { removeMaintainerOnlySkillSymlinks } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { prepareOpenCodeRuntimeConfig } from "./runtime-config.js";
|
||||
|
||||
@@ -68,6 +73,64 @@ function resolveOpenCodeBiller(env: Record<string, string>, provider: string | n
|
||||
return inferOpenAiCompatibleBiller(env, null) ?? provider ?? "unknown";
|
||||
}
|
||||
|
||||
const REMOTE_OPENCODE_MODELS_PROBE_DEFAULT_TIMEOUT_SEC = 20;
|
||||
|
||||
async function ensureRemoteOpenCodeModelConfiguredAndAvailable(input: {
|
||||
runId: string;
|
||||
executionTarget: NonNullable<AdapterExecutionContext["executionTarget"]>;
|
||||
command: string;
|
||||
model: string;
|
||||
cwd: string;
|
||||
env: Record<string, string>;
|
||||
timeoutSec: number;
|
||||
graceSec: number;
|
||||
}) {
|
||||
const model = requireOpenCodeModelId(input.model);
|
||||
const probeTimeoutSec = input.timeoutSec > 0
|
||||
? Math.min(input.timeoutSec, REMOTE_OPENCODE_MODELS_PROBE_DEFAULT_TIMEOUT_SEC)
|
||||
: REMOTE_OPENCODE_MODELS_PROBE_DEFAULT_TIMEOUT_SEC;
|
||||
const probe = await runAdapterExecutionTargetProcess(
|
||||
input.runId,
|
||||
input.executionTarget,
|
||||
input.command,
|
||||
["models"],
|
||||
{
|
||||
cwd: input.cwd,
|
||||
env: input.env,
|
||||
timeoutSec: probeTimeoutSec,
|
||||
graceSec: input.graceSec,
|
||||
onLog: async () => {},
|
||||
},
|
||||
);
|
||||
|
||||
if (probe.timedOut) {
|
||||
throw new Error(`\`opencode models\` timed out on the remote execution target after ${probeTimeoutSec}s.`);
|
||||
}
|
||||
|
||||
if ((probe.exitCode ?? 1) !== 0) {
|
||||
const detail = firstNonEmptyLine(probe.stderr) || firstNonEmptyLine(probe.stdout);
|
||||
throw new Error(
|
||||
detail
|
||||
? `\`opencode models\` failed on the remote execution target: ${detail}`
|
||||
: "`opencode models` failed on the remote execution target.",
|
||||
);
|
||||
}
|
||||
|
||||
const models = parseOpenCodeModelsOutput(probe.stdout);
|
||||
if (models.length === 0) {
|
||||
throw new Error(
|
||||
"OpenCode returned no models on the remote execution target. Run `opencode models` there and verify provider auth.",
|
||||
);
|
||||
}
|
||||
|
||||
if (!models.some((entry) => entry.id === model)) {
|
||||
const sample = models.slice(0, 12).map((entry) => entry.id).join(", ");
|
||||
throw new Error(
|
||||
`Configured OpenCode model is unavailable on the remote execution target: ${model}. Available models: ${sample}${models.length > 12 ? ", ..." : ""}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function claudeSkillsHome(): string {
|
||||
return path.join(os.homedir(), ".claude", "skills");
|
||||
}
|
||||
@@ -155,6 +218,13 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
const shapedWorkspaceEnv = shapePaperclipWorkspaceEnvForExecution({
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceHints,
|
||||
executionTargetIsRemote,
|
||||
executionCwd: effectiveExecutionCwd,
|
||||
});
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
const openCodeSkillEntries = await readPaperclipRuntimeSkillEntries(config, __moduleDir);
|
||||
const desiredOpenCodeSkillNames = resolvePaperclipDesiredSkillNames(config, openCodeSkillEntries);
|
||||
@@ -203,17 +273,16 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
applyPaperclipWorkspaceEnv(env, {
|
||||
workspaceCwd: effectiveWorkspaceCwd,
|
||||
workspaceCwd: shapedWorkspaceEnv.workspaceCwd,
|
||||
workspaceSource,
|
||||
workspaceId,
|
||||
workspaceRepoUrl,
|
||||
workspaceRepoRef,
|
||||
agentHome,
|
||||
});
|
||||
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
const targetPaperclipApiUrl = adapterExecutionTargetPaperclipApiUrl(executionTarget);
|
||||
if (targetPaperclipApiUrl) env.PAPERCLIP_API_URL = targetPaperclipApiUrl;
|
||||
|
||||
if (shapedWorkspaceEnv.workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(shapedWorkspaceEnv.workspaceHints);
|
||||
}
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
@@ -234,6 +303,19 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
(entry): entry is [string, string] => typeof entry[1] === "string",
|
||||
),
|
||||
);
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
await ensureAdapterExecutionTargetRuntimeCommandInstalled({
|
||||
runId,
|
||||
target: executionTarget,
|
||||
installCommand: ctx.runtimeCommandSpec?.installCommand,
|
||||
detectCommand: ctx.runtimeCommandSpec?.detectCommand,
|
||||
cwd,
|
||||
env: runtimeEnv,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
onLog,
|
||||
});
|
||||
await ensureAdapterExecutionTargetCommandResolvable(command, executionTarget, cwd, runtimeEnv);
|
||||
const resolvedCommand = await resolveAdapterExecutionTargetCommandForLogs(command, executionTarget, cwd, runtimeEnv);
|
||||
let loggedEnv = buildInvocationEnvForLogs(preparedRuntimeConfig.env, {
|
||||
@@ -241,7 +323,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
includeRuntimeKeys: ["HOME"],
|
||||
resolvedCommand,
|
||||
});
|
||||
|
||||
if (!executionTargetIsRemote) {
|
||||
await ensureOpenCodeModelConfiguredAndAvailable({
|
||||
model,
|
||||
@@ -251,20 +332,17 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
});
|
||||
}
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
return asStringArray(config.args);
|
||||
})();
|
||||
const effectiveExecutionCwd = adapterExecutionTargetRemoteCwd(executionTarget, cwd);
|
||||
let restoreRemoteWorkspace: (() => Promise<void>) | null = null;
|
||||
let localSkillsDir: string | null = null;
|
||||
let remoteRuntimeRootDir: string | null = null;
|
||||
let paperclipBridge: Awaited<ReturnType<typeof startAdapterExecutionTargetPaperclipBridge>> = null;
|
||||
|
||||
if (executionTargetIsRemote) {
|
||||
if (executionTarget?.kind === "remote") {
|
||||
localSkillsDir = await buildOpenCodeSkillsDir(config);
|
||||
await onLog(
|
||||
"stdout",
|
||||
@@ -315,6 +393,16 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
{ cwd, env: preparedRuntimeConfig.env, timeoutSec, graceSec, onLog },
|
||||
);
|
||||
}
|
||||
await ensureRemoteOpenCodeModelConfiguredAndAvailable({
|
||||
runId,
|
||||
executionTarget,
|
||||
command,
|
||||
model,
|
||||
cwd,
|
||||
env: preparedRuntimeConfig.env,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
});
|
||||
}
|
||||
if (executionTargetIsRemote && adapterExecutionTargetUsesPaperclipBridge(executionTarget)) {
|
||||
paperclipBridge = await startAdapterExecutionTargetPaperclipBridge({
|
||||
|
||||
@@ -67,6 +67,7 @@ export {
|
||||
listOpenCodeModels,
|
||||
discoverOpenCodeModels,
|
||||
ensureOpenCodeModelConfiguredAndAvailable,
|
||||
requireOpenCodeModelId,
|
||||
resetOpenCodeModelsCacheForTests,
|
||||
} from "./models.js";
|
||||
export { parseOpenCodeJsonl, isOpenCodeUnknownSessionError } from "./parse.js";
|
||||
|
||||