mirror of
https://github.com/paperclipai/paperclip
synced 2026-04-26 01:35:18 +02:00
Compare commits
190 Commits
PAPA-45-up
...
pap-1177-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6fe9ebcbc | ||
|
|
26ebe3b002 | ||
|
|
aec88f10dd | ||
|
|
45f18d1bee | ||
|
|
2329a33f32 | ||
|
|
74481b1d1e | ||
|
|
cae7cda463 | ||
|
|
2c2e13eac2 | ||
|
|
502d60b2a8 | ||
|
|
f3e5c55f45 | ||
|
|
448e9f2be3 | ||
|
|
48704c6586 | ||
|
|
3e0ab97b12 | ||
|
|
bb980bfb33 | ||
|
|
1e4d252661 | ||
|
|
ac473820a3 | ||
|
|
2c8cb7f519 | ||
|
|
51414be269 | ||
|
|
1de1393413 | ||
|
|
669e5c87cc | ||
|
|
9a150eee65 | ||
|
|
a3ecc086d9 | ||
|
|
85ca675311 | ||
|
|
622a8e44bf | ||
|
|
d71ff903e4 | ||
|
|
492e49e1c0 | ||
|
|
f1bb175584 | ||
|
|
4b654fc81e | ||
|
|
5136381d8f | ||
|
|
0edac73a68 | ||
|
|
b3b9d99519 | ||
|
|
c414790404 | ||
|
|
1b55474a9b | ||
|
|
bf3fba36f2 | ||
|
|
dc842ff7ea | ||
|
|
758219d53f | ||
|
|
2775a5652b | ||
|
|
bd0f56e523 | ||
|
|
977e9f3e9a | ||
|
|
365b6d9bd8 | ||
|
|
6b4f3b56e4 | ||
|
|
c1d0c52985 | ||
|
|
5d6217b70b | ||
|
|
eda127a2b2 | ||
|
|
93e8e6447d | ||
|
|
13ada98e78 | ||
|
|
54ac2c6fe9 | ||
|
|
962a882799 | ||
|
|
2ac1c62ab1 | ||
|
|
2278d96d5a | ||
|
|
aff56c2297 | ||
|
|
612bab1eb6 | ||
|
|
68499eb2f4 | ||
|
|
e9c8bd4805 | ||
|
|
517fe5093e | ||
|
|
bdc8e27bf4 | ||
|
|
8cdba3ce18 | ||
|
|
1a3aee9ee1 | ||
|
|
9a8a169e95 | ||
|
|
bfa60338cc | ||
|
|
1e76bbe38c | ||
|
|
42b326bcc6 | ||
|
|
8f23270f35 | ||
|
|
97d4ce41b3 | ||
|
|
0a9a8b5a44 | ||
|
|
37d2d5ef02 | ||
|
|
55d756f9a3 | ||
|
|
7e34d6c66b | ||
|
|
8be6fe987b | ||
|
|
15bd2ef349 | ||
|
|
08fea10ce1 | ||
|
|
b74d94ba1e | ||
|
|
8f722c5751 | ||
|
|
b6e40fec54 | ||
|
|
f3ad1fc301 | ||
|
|
eefe9f39f1 | ||
|
|
5a252020d5 | ||
|
|
4c01a45d2a | ||
|
|
467f3a749a | ||
|
|
9499d0df97 | ||
|
|
dde4cc070e | ||
|
|
2f73346a64 | ||
|
|
785ce54e5e | ||
|
|
73e7007e7c | ||
|
|
c5f3b8e40a | ||
|
|
47299c511e | ||
|
|
ed97432fae | ||
|
|
0593b9b0c5 | ||
|
|
855d895a12 | ||
|
|
39d001c9b5 | ||
|
|
89ad6767c7 | ||
|
|
c171ff901c | ||
|
|
2e09570ce0 | ||
|
|
23eea392c8 | ||
|
|
3513b60dbc | ||
|
|
42989115a7 | ||
|
|
7623f679cf | ||
|
|
9be1b3f8a9 | ||
|
|
b380d6000f | ||
|
|
e23d148be1 | ||
|
|
58a1a20f5b | ||
|
|
12011fa9de | ||
|
|
11643941e6 | ||
|
|
8cdb65febb | ||
|
|
6c8569156c | ||
|
|
c19208010a | ||
|
|
8ae4c0e765 | ||
|
|
22af797ca3 | ||
|
|
27accb1bdb | ||
|
|
b9b2bf3b5b | ||
|
|
4dea302791 | ||
|
|
b825a121cb | ||
|
|
91e040a696 | ||
|
|
e75960f284 | ||
|
|
94d4a01b76 | ||
|
|
fe61e650c2 | ||
|
|
c89349687f | ||
|
|
f515f2aa12 | ||
|
|
5a9a2a9112 | ||
|
|
65818c3447 | ||
|
|
4993b5338c | ||
|
|
d3401c0518 | ||
|
|
dbb5f0c4a9 | ||
|
|
3d685335eb | ||
|
|
2615450afc | ||
|
|
35f2fc7230 | ||
|
|
d9476abecb | ||
|
|
d12650e5ac | ||
|
|
d202631016 | ||
|
|
cd2be692e9 | ||
|
|
c6d2dc8b56 | ||
|
|
80b81459a7 | ||
|
|
a07237779b | ||
|
|
21dd6acb81 | ||
|
|
b81d765d2e | ||
|
|
4efe018a8f | ||
|
|
0651f48f6c | ||
|
|
01c05b5f1b | ||
|
|
c36ea1de6f | ||
|
|
3c4b8711ec | ||
|
|
ef2cbb838f | ||
|
|
fb3aabb743 | ||
|
|
2a2fa31a03 | ||
|
|
8adae848e4 | ||
|
|
00898e8194 | ||
|
|
199a2178cf | ||
|
|
ed95fc1dda | ||
|
|
c757a07708 | ||
|
|
acfd7c260a | ||
|
|
388650afc7 | ||
|
|
d7a7bda209 | ||
|
|
47f3cdc1bb | ||
|
|
69a1593ff8 | ||
|
|
f884cbab78 | ||
|
|
14d59da316 | ||
|
|
e13c3f7c6c | ||
|
|
f8452a4520 | ||
|
|
68b2fe20bb | ||
|
|
1ce800c158 | ||
|
|
aa256fee03 | ||
|
|
112eeafd62 | ||
|
|
258c7ccd21 | ||
|
|
728fbdd199 | ||
|
|
8e42c6cdac | ||
|
|
2af64b6068 | ||
|
|
9b3ad6e616 | ||
|
|
f749efd412 | ||
|
|
f2925ae0df | ||
|
|
37b6ad42ea | ||
|
|
6d73a8a1cb | ||
|
|
acb2bc6b3b | ||
|
|
21ee44e29c | ||
|
|
58db67c318 | ||
|
|
87d46bba57 | ||
|
|
045a3d54b9 | ||
|
|
f467f3d826 | ||
|
|
2ac40aba56 | ||
|
|
8db0c7fd2f | ||
|
|
993a3262f6 | ||
|
|
a13a67de54 | ||
|
|
422dd51a87 | ||
|
|
a80edfd6d9 | ||
|
|
931678db83 | ||
|
|
dda63a4324 | ||
|
|
43fa9c3a9a | ||
|
|
c9ee8e7a7e | ||
|
|
620a5395d7 | ||
|
|
1350753f5f | ||
|
|
77faf8c668 | ||
|
|
2fca400dd9 |
33
.github/workflows/refresh-lockfile.yml
vendored
33
.github/workflows/refresh-lockfile.yml
vendored
@@ -54,10 +54,11 @@ jobs:
|
||||
id: upsert-pr
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
REPO_OWNER: ${{ github.repository_owner }}
|
||||
run: |
|
||||
if git diff --quiet -- pnpm-lock.yaml; then
|
||||
echo "Lockfile unchanged, nothing to do."
|
||||
echo "pr_created=false" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_url=" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -70,28 +71,26 @@ jobs:
|
||||
git commit -m "chore(lockfile): refresh pnpm-lock.yaml"
|
||||
git push --force origin "$BRANCH"
|
||||
|
||||
# Create PR if one doesn't already exist
|
||||
existing=$(gh pr list --head "$BRANCH" --json number --jq '.[0].number')
|
||||
if [ -z "$existing" ]; then
|
||||
gh pr create \
|
||||
# Only reuse an open PR from this repository owner, not a fork with the same branch name.
|
||||
pr_url="$(
|
||||
gh pr list --state open --head "$BRANCH" --json url,headRepositoryOwner \
|
||||
--jq ".[] | select(.headRepositoryOwner.login == \"$REPO_OWNER\") | .url" |
|
||||
head -n 1
|
||||
)"
|
||||
if [ -z "$pr_url" ]; then
|
||||
pr_url="$(gh pr create \
|
||||
--head "$BRANCH" \
|
||||
--title "chore(lockfile): refresh pnpm-lock.yaml" \
|
||||
--body "Auto-generated lockfile refresh after dependencies changed on master. This PR only updates pnpm-lock.yaml."
|
||||
echo "Created new PR."
|
||||
--body "Auto-generated lockfile refresh after dependencies changed on master. This PR only updates pnpm-lock.yaml.")"
|
||||
echo "Created new PR: $pr_url"
|
||||
else
|
||||
echo "PR #$existing already exists, branch updated via force push."
|
||||
echo "PR already exists: $pr_url"
|
||||
fi
|
||||
echo "pr_created=true" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_url=$pr_url" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Enable auto-merge for lockfile PR
|
||||
if: steps.upsert-pr.outputs.pr_created == 'true'
|
||||
if: steps.upsert-pr.outputs.pr_url != ''
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
pr_url="$(gh pr list --head chore/refresh-lockfile --json url --jq '.[0].url')"
|
||||
if [ -z "$pr_url" ]; then
|
||||
echo "Error: lockfile PR was not found." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gh pr merge --auto --squash --delete-branch "$pr_url"
|
||||
gh pr merge --auto --squash --delete-branch "${{ steps.upsert-pr.outputs.pr_url }}"
|
||||
|
||||
55
AGENTS.md
55
AGENTS.md
@@ -138,7 +138,18 @@ When adding endpoints:
|
||||
- Use company selection context for company-scoped pages
|
||||
- Surface failures clearly; do not silently ignore API errors
|
||||
|
||||
## 10. Definition of Done
|
||||
## 10. Pull Request Requirements
|
||||
|
||||
When creating a pull request (via `gh pr create` or any other method), you **must** read and fill in every section of [`.github/PULL_REQUEST_TEMPLATE.md`](.github/PULL_REQUEST_TEMPLATE.md). Do not craft ad-hoc PR bodies — use the template as the structure for your PR description. Required sections:
|
||||
|
||||
- **Thinking Path** — trace reasoning from project context to this change (see `CONTRIBUTING.md` for examples)
|
||||
- **What Changed** — bullet list of concrete changes
|
||||
- **Verification** — how a reviewer can confirm it works
|
||||
- **Risks** — what could go wrong
|
||||
- **Model Used** — the AI model that produced or assisted with the change (provider, exact model ID, context window, capabilities). Write "None — human-authored" if no AI was used.
|
||||
- **Checklist** — all items checked
|
||||
|
||||
## 11. Definition of Done
|
||||
|
||||
A change is done when all are true:
|
||||
|
||||
@@ -146,3 +157,45 @@ A change is done when all are true:
|
||||
2. Typecheck, tests, and build pass
|
||||
3. Contracts are synced across db/shared/server/ui
|
||||
4. Docs updated when behavior or commands change
|
||||
5. PR description follows the [PR template](.github/PULL_REQUEST_TEMPLATE.md) with all sections filled in (including Model Used)
|
||||
|
||||
## 11. Fork-Specific: HenkDz/paperclip
|
||||
|
||||
This is a fork of `paperclipai/paperclip` with QoL patches and an **external-only** Hermes adapter story on branch `feat/externalize-hermes-adapter` ([tree](https://github.com/HenkDz/paperclip/tree/feat/externalize-hermes-adapter)).
|
||||
|
||||
### Branch Strategy
|
||||
|
||||
- `feat/externalize-hermes-adapter` → core has **no** `hermes-paperclip-adapter` dependency and **no** built-in `hermes_local` registration. Install Hermes via the Adapter Plugin manager (`@henkey/hermes-paperclip-adapter` or a `file:` path).
|
||||
- Older fork branches may still document built-in Hermes; treat this file as authoritative for the externalize branch.
|
||||
|
||||
### Hermes (plugin only)
|
||||
|
||||
- Register through **Board → Adapter manager** (same as Droid). Type remains `hermes_local` once the package is loaded.
|
||||
- UI uses generic **config-schema** + **ui-parser.js** from the package — no Hermes imports in `server/` or `ui/` source.
|
||||
- Optional: `file:` entry in `~/.paperclip/adapter-plugins.json` for local dev of the adapter repo.
|
||||
|
||||
### Local Dev
|
||||
|
||||
- Fork runs on port 3101+ (auto-detects if 3100 is taken by upstream instance)
|
||||
- `npx vite build` hangs on NTFS — use `node node_modules/vite/bin/vite.js build` instead
|
||||
- Server startup from NTFS takes 30-60s — don't assume failure immediately
|
||||
- Kill ALL paperclip processes before starting: `pkill -f "paperclip"; pkill -f "tsx.*index.ts"`
|
||||
- Vite cache survives `rm -rf dist` — delete both: `rm -rf ui/dist ui/node_modules/.vite`
|
||||
|
||||
### Fork QoL Patches (not in upstream)
|
||||
|
||||
These are local modifications in the fork's UI. If re-copying source, these must be re-applied:
|
||||
|
||||
1. **stderr_group** — amber accordion for MCP init noise in `RunTranscriptView.tsx`
|
||||
2. **tool_group** — accordion for consecutive non-terminal tools (write, read, search, browser)
|
||||
3. **Dashboard excerpt** — `LatestRunCard` strips markdown, shows first 3 lines/280 chars
|
||||
|
||||
### Plugin System
|
||||
|
||||
PR #2218 (`feat/external-adapter-phase1`) adds external adapter support. See root `AGENTS.md` for full details.
|
||||
|
||||
- Adapters can be loaded as external plugins via `~/.paperclip/adapter-plugins.json`
|
||||
- The plugin-loader should have ZERO hardcoded adapter imports — pure dynamic loading
|
||||
- `createServerAdapter()` must include ALL optional fields (especially `detectModel`)
|
||||
- Built-in UI adapters can shadow external plugin parsers — remove built-in when fully externalizing
|
||||
- Reference external adapters: Hermes (`@henkey/hermes-paperclip-adapter` or `file:`) and Droid (npm)
|
||||
|
||||
@@ -11,8 +11,9 @@ We really appreciate both small fixes and thoughtful larger changes.
|
||||
- Pick **one** clear thing to fix/improve
|
||||
- Touch the **smallest possible number of files**
|
||||
- Make sure the change is very targeted and easy to review
|
||||
- All automated checks pass (including Greptile comments)
|
||||
- No new lint/test failures
|
||||
- All tests pass and CI is green
|
||||
- Greptile score is 5/5 with all comments addressed
|
||||
- Use the [PR template](.github/PULL_REQUEST_TEMPLATE.md)
|
||||
|
||||
These almost always get merged quickly when they're clean.
|
||||
|
||||
@@ -26,11 +27,30 @@ These almost always get merged quickly when they're clean.
|
||||
- Before / After screenshots (or short video if UI/behavior change)
|
||||
- Clear description of what & why
|
||||
- Proof it works (manual testing notes)
|
||||
- All tests passing
|
||||
- All Greptile + other PR comments addressed
|
||||
- All tests passing and CI green
|
||||
- Greptile score 5/5 with all comments addressed
|
||||
- [PR template](.github/PULL_REQUEST_TEMPLATE.md) fully filled out
|
||||
|
||||
PRs that follow this path are **much** more likely to be accepted, even when they're large.
|
||||
|
||||
## PR Requirements (all PRs)
|
||||
|
||||
### Use the PR Template
|
||||
|
||||
Every pull request **must** follow the PR template at [`.github/PULL_REQUEST_TEMPLATE.md`](.github/PULL_REQUEST_TEMPLATE.md). If you create a PR via the GitHub API or other tooling that bypasses the template, copy its contents into your PR description manually. The template includes required sections: Thinking Path, What Changed, Verification, Risks, Model Used, and a Checklist.
|
||||
|
||||
### Model Used (Required)
|
||||
|
||||
Every PR must include a **Model Used** section specifying which AI model produced or assisted with the change. Include the provider, exact model ID/version, context window size, and any relevant capability details (e.g., reasoning mode, tool use). If no AI was used, write "None — human-authored". This applies to all contributors — human and AI alike.
|
||||
|
||||
### Tests Must Pass
|
||||
|
||||
All tests must pass before a PR can be merged. Run them locally first and verify CI is green after pushing.
|
||||
|
||||
### Greptile Review
|
||||
|
||||
We use [Greptile](https://greptile.com) for automated code review. Your PR must achieve a **5/5 Greptile score** with **all Greptile comments addressed** before it can be merged. If Greptile leaves comments, fix or respond to each one and request a re-review.
|
||||
|
||||
## General Rules (both paths)
|
||||
|
||||
- Write clear commit messages
|
||||
@@ -41,7 +61,7 @@ PRs that follow this path are **much** more likely to be accepted, even when the
|
||||
|
||||
## Writing a Good PR message
|
||||
|
||||
Please include a "thinking path" at the top of your PR message that explains from the top of the project down to what you fixed. E.g.:
|
||||
Your PR description must follow the [PR template](.github/PULL_REQUEST_TEMPLATE.md). All sections are required. The "thinking path" at the top explains from the top of the project down to what you fixed. E.g.:
|
||||
|
||||
### Thinking Path Example 1:
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ COPY ui/package.json ui/
|
||||
COPY packages/shared/package.json packages/shared/
|
||||
COPY packages/db/package.json packages/db/
|
||||
COPY packages/adapter-utils/package.json packages/adapter-utils/
|
||||
COPY packages/mcp-server/package.json packages/mcp-server/
|
||||
COPY packages/adapters/claude-local/package.json packages/adapters/claude-local/
|
||||
COPY packages/adapters/codex-local/package.json packages/adapters/codex-local/
|
||||
COPY packages/adapters/cursor-local/package.json packages/adapters/cursor-local/
|
||||
|
||||
143
adapter-plugin.md
Normal file
143
adapter-plugin.md
Normal file
@@ -0,0 +1,143 @@
|
||||
- Created branch: feat/external-adapter-phase1
|
||||
|
||||
I started phase 1 in the most merge-friendly way I could: small central changes, reusing existing registry patterns instead of inventing a whole new plugin system up front.
|
||||
|
||||
What I changed
|
||||
|
||||
1. Server adapter registry is now mutable
|
||||
Files:
|
||||
- server/src/adapters/registry.ts
|
||||
- server/src/adapters/index.ts
|
||||
|
||||
Added:
|
||||
- registerServerAdapter(adapter)
|
||||
- unregisterServerAdapter(type)
|
||||
- requireServerAdapter(type)
|
||||
|
||||
Kept the existing built-in registry shape, but changed initialization so built-ins are registered into a mutable map on startup.
|
||||
|
||||
Why this is merge-friendly:
|
||||
- existing built-in adapter definitions stay where they already are
|
||||
- existing lookup helpers still exist
|
||||
- no big architectural rewrite yet
|
||||
|
||||
1. Runtime adapter validation moved to server routes
|
||||
File:
|
||||
- server/src/routes/agents.ts
|
||||
|
||||
Added:
|
||||
- assertKnownAdapterType(...)
|
||||
|
||||
Used it in:
|
||||
- /companies/:companyId/adapters/:type/models
|
||||
- /companies/:companyId/adapters/:type/detect-model
|
||||
- /companies/:companyId/adapters/:type/test-environment
|
||||
- POST /companies/:companyId/agents
|
||||
- POST /companies/:companyId/agent-hires
|
||||
- PATCH /agents/:id when adapterType is touched
|
||||
|
||||
Why:
|
||||
- shared schemas can now allow external adapter strings
|
||||
- server becomes the real source of truth for “is this adapter actually registered?”
|
||||
|
||||
1. Shared adapterType validation is now open-ended for inputs
|
||||
Files:
|
||||
- packages/shared/src/adapter-type.ts
|
||||
- packages/shared/src/validators/agent.ts
|
||||
- packages/shared/src/validators/access.ts
|
||||
- packages/shared/src/index.ts
|
||||
|
||||
Changed input validation from hardcoded z.enum(AGENT_ADAPTER_TYPES) to:
|
||||
- agentAdapterTypeSchema
|
||||
- optionalAgentAdapterTypeSchema
|
||||
|
||||
These accept any non-empty string.
|
||||
|
||||
Important:
|
||||
- I did not remove AGENT_ADAPTER_TYPES from constants
|
||||
- I did not change the built-in type unions yet
|
||||
- this keeps the change minimal and limits blast radius
|
||||
|
||||
So:
|
||||
- input payloads can carry external adapter types
|
||||
- actual acceptance still depends on server registry validation
|
||||
|
||||
1. UI adapter registry is now mutable too
|
||||
Files:
|
||||
- ui/src/adapters/registry.ts
|
||||
- ui/src/adapters/index.ts
|
||||
|
||||
Added:
|
||||
- registerUIAdapter(adapter)
|
||||
- unregisterUIAdapter(type)
|
||||
- findUIAdapter(type)
|
||||
|
||||
Built-ins are still registered from the same file, same pattern as server.
|
||||
|
||||
1. Began switching UI adapter lists toward the registry
|
||||
Files:
|
||||
- ui/src/adapters/metadata.ts
|
||||
- ui/src/components/AgentConfigForm.tsx
|
||||
- ui/src/pages/NewAgent.tsx
|
||||
|
||||
Changed:
|
||||
- AgentConfigForm adapter list now comes from registered UI adapters via metadata helper
|
||||
- NewAgent supported adapter set now derives from listUIAdapters()
|
||||
|
||||
Why:
|
||||
- phase 1 should start reducing hardcoded UI lists
|
||||
- but without rewriting every page at once
|
||||
|
||||
1. Tests added first, then implementation
|
||||
New tests:
|
||||
- server/src/__tests__/adapter-registry.test.ts
|
||||
- server/src/__tests__/agent-adapter-validation-routes.test.ts
|
||||
- ui/src/adapters/registry.test.ts
|
||||
- packages/shared/src/adapter-types.test.ts
|
||||
|
||||
Small test mock fix:
|
||||
- server/src/__tests__/agent-skills-routes.test.ts
|
||||
|
||||
Current status
|
||||
|
||||
The branch is created and the phase-1 code is started.
|
||||
|
||||
I also followed TDD in spirit:
|
||||
- wrote failing tests first for missing register/unregister APIs
|
||||
- then implemented the registry changes
|
||||
|
||||
What is not finished yet
|
||||
|
||||
I have not finished the green/stabilization pass yet.
|
||||
|
||||
I still need to:
|
||||
- run the targeted tests again after the implementation changes
|
||||
- fix any TS / mocking fallout
|
||||
- decide how far to take phase-1 UI wiring in this branch before stopping
|
||||
|
||||
Also, I intentionally did not yet touch the bigger hardcoded surfaces like:
|
||||
- NewAgentDialog
|
||||
- OnboardingWizard
|
||||
- InviteLanding
|
||||
- duplicated adapterLabels maps
|
||||
- plugin manifest / plugin loader adapter contributions
|
||||
|
||||
That is deliberate.
|
||||
For merge safety, I started with the smallest useful seam:
|
||||
- dynamic server registry
|
||||
- dynamic UI registry
|
||||
- runtime validation based on registry
|
||||
- first couple of UI consumers
|
||||
|
||||
One note
|
||||
There was already an unrelated dirty change in:
|
||||
- server/scripts/dev-watch.ts
|
||||
|
||||
I left that alone and did not make phase-1 changes there.
|
||||
|
||||
Recommended next move
|
||||
I should keep going on this same branch and do the stabilization pass:
|
||||
1. rerun server/ui/shared targeted tests
|
||||
2. fix failures
|
||||
3. run typechecks
|
||||
4. then extend the same registry-driven approach to the next safest UI surfaces
|
||||
@@ -220,6 +220,7 @@ describe("renderCompanyImportPreview", () => {
|
||||
status: null,
|
||||
executionWorkspacePolicy: null,
|
||||
workspaces: [],
|
||||
env: null,
|
||||
metadata: null,
|
||||
},
|
||||
],
|
||||
@@ -250,6 +251,7 @@ describe("renderCompanyImportPreview", () => {
|
||||
key: "OPENAI_API_KEY",
|
||||
description: null,
|
||||
agentSlug: "ceo",
|
||||
projectSlug: null,
|
||||
kind: "secret",
|
||||
requirement: "required",
|
||||
defaultValue: null,
|
||||
@@ -265,6 +267,7 @@ describe("renderCompanyImportPreview", () => {
|
||||
key: "OPENAI_API_KEY",
|
||||
description: null,
|
||||
agentSlug: "ceo",
|
||||
projectSlug: null,
|
||||
kind: "secret",
|
||||
requirement: "required",
|
||||
defaultValue: null,
|
||||
@@ -432,6 +435,7 @@ describe("import selection catalog", () => {
|
||||
status: null,
|
||||
executionWorkspacePolicy: null,
|
||||
workspaces: [],
|
||||
env: null,
|
||||
metadata: null,
|
||||
},
|
||||
],
|
||||
|
||||
@@ -184,6 +184,11 @@ Invariant: at least one root `company` level goal per company.
|
||||
- `status` enum: `backlog | planned | in_progress | completed | cancelled`
|
||||
- `lead_agent_id` uuid fk `agents.id` null
|
||||
- `target_date` date null
|
||||
- `env` jsonb null (same secret-aware env binding format used by agent config)
|
||||
|
||||
Invariant:
|
||||
|
||||
- project env is merged into run environment for issues in that project and overrides conflicting agent env keys before Paperclip runtime-owned keys are injected
|
||||
|
||||
## 7.6 `issues` (core task entity)
|
||||
|
||||
@@ -491,7 +496,7 @@ All endpoints are under `/api` and return JSON.
|
||||
```json
|
||||
{
|
||||
"agentId": "uuid",
|
||||
"expectedStatuses": ["todo", "backlog", "blocked"]
|
||||
"expectedStatuses": ["todo", "backlog", "blocked", "in_review"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
362
doc/plans/2026-04-06-smart-model-routing.md
Normal file
362
doc/plans/2026-04-06-smart-model-routing.md
Normal file
@@ -0,0 +1,362 @@
|
||||
# 2026-04-06 Smart Model Routing
|
||||
|
||||
Status: Proposed
|
||||
Date: 2026-04-06
|
||||
Audience: Product and engineering
|
||||
Related:
|
||||
- `doc/SPEC-implementation.md`
|
||||
- `doc/PRODUCT.md`
|
||||
- `doc/plans/2026-03-14-adapter-skill-sync-rollout.md`
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
This document defines a V1 plan for "smart model routing" in Paperclip.
|
||||
|
||||
The goal is not to build a generic cross-provider router in the server. The goal is:
|
||||
|
||||
- let supported adapters use a cheaper model for lightweight heartbeat orchestration work
|
||||
- keep the main task execution on the adapter's normal primary model
|
||||
- preserve Paperclip's existing task, session, and audit invariants
|
||||
- report cost and model usage truthfully when more than one model participates in a single heartbeat
|
||||
|
||||
The motivating use case is a local coding adapter where a cheap model can handle the first fast pass:
|
||||
|
||||
- read the wake context
|
||||
- orient to the task and workspace
|
||||
- leave an immediate progress comment when appropriate
|
||||
- perform bounded lightweight triage
|
||||
|
||||
Then the primary model does the substantive work.
|
||||
|
||||
## 2. Hermes Findings
|
||||
|
||||
Hermes does have a real "smart model routing" feature, but it is narrower than the name suggests.
|
||||
|
||||
Observed behavior:
|
||||
|
||||
- `agent/smart_model_routing.py` implements a conservative classifier for "simple" turns
|
||||
- the cheap path only triggers for short, single-line, non-code, non-URL, non-tool-heavy messages
|
||||
- complexity is detected with hardcoded thresholds plus a keyword denylist like `debug`, `implement`, `test`, `plan`, `tool`, `docker`, and similar terms
|
||||
- if the cheap route cannot be resolved, Hermes silently falls back to the primary model
|
||||
|
||||
Important architectural detail:
|
||||
|
||||
- Hermes applies this routing before constructing the agent for that turn
|
||||
- the route is resolved in `cron/scheduler.py` and passed into agent creation as the active provider/model/runtime
|
||||
|
||||
More useful than the routing heuristic itself is Hermes' broader model-slot design:
|
||||
|
||||
- main conversational model
|
||||
- fallback model for failover
|
||||
- auxiliary model slots for side tasks like compression and classification
|
||||
|
||||
That separation is a better fit for Paperclip than copying Hermes' exact keyword heuristic.
|
||||
|
||||
## 3. Current Paperclip State
|
||||
|
||||
Paperclip already has the right execution shape for adapter-specific routing, but it currently assumes one model per heartbeat run.
|
||||
|
||||
Current implementation facts:
|
||||
|
||||
- `server/src/services/heartbeat.ts` builds rich run context, including `paperclipWake`, workspace metadata, and session handoff context
|
||||
- each adapter receives a single resolved `config` object and executes once
|
||||
- built-in local adapters read one `config.model` and pass it directly to the underlying CLI
|
||||
- UI config today exposes one main `model` field plus adapter-specific thinking-effort controls
|
||||
- cost accounting currently records one provider/model tuple per run via `AdapterExecutionResult`
|
||||
|
||||
What this means:
|
||||
|
||||
- there is no shared routing layer in the server today
|
||||
- model choice already lives at the adapter boundary, which is good
|
||||
- multi-model execution in a single heartbeat needs explicit contract work or cost reporting will become misleading
|
||||
|
||||
## 4. Product Decision
|
||||
|
||||
Paperclip should implement smart model routing as an adapter-local, opt-in execution pattern.
|
||||
|
||||
V1 decision:
|
||||
|
||||
1. Do not add a global server-side router that tries to understand every adapter.
|
||||
2. Do not copy Hermes' prompt-keyword classifier as Paperclip's default routing policy.
|
||||
3. Add an adapter-specific "cheap preflight" phase for supported adapters.
|
||||
4. Keep the primary model as the canonical work model.
|
||||
5. Persist only the primary session unless an adapter can prove that cross-model session resume is safe.
|
||||
|
||||
Rationale:
|
||||
|
||||
- Paperclip heartbeats are structured, issue-scoped, and already include wake metadata
|
||||
- routing by execution phase is more reliable than routing by free-text prompt complexity
|
||||
- session semantics differ by adapter, so resume behavior must stay adapter-owned
|
||||
|
||||
## 5. Proposed V1 Behavior
|
||||
|
||||
## 5.1 Config shape
|
||||
|
||||
Supported adapters should add an optional routing block to `adapterConfig`.
|
||||
|
||||
Proposed shape:
|
||||
|
||||
```ts
|
||||
smartModelRouting?: {
|
||||
enabled: boolean;
|
||||
cheapModel: string;
|
||||
cheapThinkingEffort?: string;
|
||||
maxPreflightTurns?: number;
|
||||
allowInitialProgressComment?: boolean;
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- keep existing `model` as the primary model
|
||||
- `cheapModel` is adapter-specific, not global
|
||||
- adapters that cannot safely support this block simply ignore it
|
||||
|
||||
For adapters with provider-specific model fields later, the shape can expand to include provider/base-url overrides. V1 should start simple.
|
||||
|
||||
## 5.2 Routing policy
|
||||
|
||||
Supported adapters should run cheap preflight only when all are true:
|
||||
|
||||
- `smartModelRouting.enabled` is true
|
||||
- `cheapModel` is configured
|
||||
- the run is issue-scoped
|
||||
- the adapter is starting a fresh session, not resuming a persisted one
|
||||
- the run is expected to do real task work rather than just resume an existing thread
|
||||
|
||||
Supported adapters should skip cheap preflight when any are true:
|
||||
|
||||
- a persisted task session already exists
|
||||
- the adapter cannot safely isolate preflight from the primary session
|
||||
- the issue or wake type implies the task is already mid-flight and continuity matters more than first-response speed
|
||||
|
||||
This is intentionally phase-based, not text-heuristic-based.
|
||||
|
||||
## 5.3 Cheap preflight responsibilities
|
||||
|
||||
The cheap phase should be narrow and bounded.
|
||||
|
||||
Allowed responsibilities:
|
||||
|
||||
- ingest wake context and issue summary
|
||||
- inspect the workspace at a shallow level
|
||||
- leave a short "starting investigation" style comment when appropriate
|
||||
- collect a compact handoff summary for the primary phase
|
||||
|
||||
Not allowed in V1:
|
||||
|
||||
- long tool loops
|
||||
- risky file mutations
|
||||
- being the canonical persisted task session
|
||||
- deciding final completion without either explicit adapter support or a trivial success case
|
||||
|
||||
Implementation detail:
|
||||
|
||||
- the adapter should inject an explicit preflight prompt telling the model this is a bounded orchestration pass
|
||||
- preflight should use a very small turn budget, for example 1-2 turns
|
||||
|
||||
## 5.4 Primary execution responsibilities
|
||||
|
||||
After preflight, the adapter launches the normal primary execution using the existing prompt and primary model.
|
||||
|
||||
The primary phase should receive:
|
||||
|
||||
- the normal Paperclip prompt
|
||||
- any preflight-generated handoff summary
|
||||
- normal workspace and wake context
|
||||
|
||||
The primary phase remains the source of truth for:
|
||||
|
||||
- persisted session state
|
||||
- final task completion
|
||||
- most file changes
|
||||
- most cost
|
||||
|
||||
## 6. Required Contract Changes
|
||||
|
||||
The current `AdapterExecutionResult` is too narrow for truthful multi-model accounting.
|
||||
|
||||
Add an optional segmented execution report, for example:
|
||||
|
||||
```ts
|
||||
executionSegments?: Array<{
|
||||
phase: "cheap_preflight" | "primary";
|
||||
provider?: string | null;
|
||||
biller?: string | null;
|
||||
model?: string | null;
|
||||
billingType?: AdapterBillingType | null;
|
||||
usage?: UsageSummary;
|
||||
costUsd?: number | null;
|
||||
summary?: string | null;
|
||||
}>
|
||||
```
|
||||
|
||||
V1 server behavior:
|
||||
|
||||
- if `executionSegments` is absent, keep current single-result behavior unchanged
|
||||
- if present, write one `cost_events` row per segment that has cost or token usage
|
||||
- store the segment array in run usage/result metadata for later UI inspection
|
||||
- keep the existing top-level `provider` / `model` fields as a summary, preferably the primary phase when present
|
||||
|
||||
This avoids breaking existing adapters while giving routed adapters truthful reporting.
|
||||
|
||||
## 7. Adapter Rollout Plan
|
||||
|
||||
## 7.1 Phase 1: contract and server plumbing
|
||||
|
||||
Work:
|
||||
|
||||
1. Extend adapter result types with segmented execution metadata.
|
||||
2. Update heartbeat cost recording to emit multiple cost events when segments are present.
|
||||
3. Include segment summaries in run metadata for transcript/debug views.
|
||||
|
||||
Success criteria:
|
||||
|
||||
- existing adapters behave exactly as before
|
||||
- a routed adapter can report cheap plus primary usage without collapsing them into one fake model
|
||||
|
||||
## 7.2 Phase 2: `codex_local`
|
||||
|
||||
Why first:
|
||||
|
||||
- Codex already has rich prompt/handoff handling
|
||||
- the adapter already injects Paperclip skills and workspace metadata cleanly
|
||||
- the current implementation already distinguishes bootstrap, wake delta, and handoff prompt sections
|
||||
|
||||
Implementation work:
|
||||
|
||||
1. Add config support for `smartModelRouting`.
|
||||
2. Add a cheap-preflight prompt builder.
|
||||
3. Run cheap preflight only on fresh sessions.
|
||||
4. Pass a compact preflight handoff note into the primary prompt.
|
||||
5. Report segmented usage and model metadata.
|
||||
|
||||
Important guardrail:
|
||||
|
||||
- do not resume the cheap-model session as the primary session in V1
|
||||
|
||||
## 7.3 Phase 3: `claude_local`
|
||||
|
||||
Implementation work is similar, but the session model-switch risk is even less attractive.
|
||||
|
||||
Same rule:
|
||||
|
||||
- cheap preflight is ephemeral
|
||||
- primary Claude session remains canonical
|
||||
|
||||
## 7.4 Phase 4: other adapters
|
||||
|
||||
Candidates:
|
||||
|
||||
- `cursor`
|
||||
- `gemini_local`
|
||||
- `opencode_local`
|
||||
- external plugin adapters through `createServerAdapter()`
|
||||
|
||||
These should come later because each runtime has different session and model-switch semantics.
|
||||
|
||||
## 8. UI and Config Changes
|
||||
|
||||
For supported built-in adapters, the agent config UI should expose:
|
||||
|
||||
- `model` as the primary model
|
||||
- `smart model routing` toggle
|
||||
- `cheap model`
|
||||
- optional cheap thinking effort
|
||||
- optional `allow initial progress comment` toggle
|
||||
|
||||
The run detail UI should also show when routing occurred, for example:
|
||||
|
||||
- cheap preflight model
|
||||
- primary model
|
||||
- token/cost split
|
||||
|
||||
This matters because Paperclip's board UI is supposed to make cost and behavior legible.
|
||||
|
||||
## 9. Why Not Copy Hermes Exactly
|
||||
|
||||
Hermes' cheap-route heuristic is useful precedent, but Paperclip should not start there.
|
||||
|
||||
Reasons:
|
||||
|
||||
- Hermes is optimizing free-form conversational turns
|
||||
- Paperclip agents run structured, issue-scoped heartbeats with explicit task and workspace context
|
||||
- Paperclip already knows whether a run is fresh vs resumed, issue-scoped vs approval follow-up, and what workspace/session exists
|
||||
- those execution facts are stronger routing signals than prompt keyword matching
|
||||
|
||||
If Paperclip later wants a cheap-only completion path for trivial runs, that can be a second-stage feature built on observed run data, not the first implementation.
|
||||
|
||||
## 10. Risks
|
||||
|
||||
## 10.1 Duplicate or noisy comments
|
||||
|
||||
If the cheap phase posts an update and the primary phase posts another near-identical update, the issue thread gets worse.
|
||||
|
||||
Mitigation:
|
||||
|
||||
- keep cheap comments optional
|
||||
- make the preflight prompt explicitly avoid repeating status if a useful comment was already posted
|
||||
|
||||
## 10.2 Misleading cost reporting
|
||||
|
||||
If we only record the primary model, the board loses visibility into the routing cost tradeoff.
|
||||
|
||||
Mitigation:
|
||||
|
||||
- add segmented execution reporting before shipping adapter behavior
|
||||
|
||||
## 10.3 Session corruption
|
||||
|
||||
Cross-model session reuse may fail or degrade context quality.
|
||||
|
||||
Mitigation:
|
||||
|
||||
- V1 does not persist or resume cheap preflight sessions
|
||||
|
||||
## 10.4 Cheap model overreach
|
||||
|
||||
A cheap model with full tools and permissions may do too much low-quality work.
|
||||
|
||||
Mitigation:
|
||||
|
||||
- hard cap preflight turns
|
||||
- use an explicit orchestration-only prompt
|
||||
- start with supported adapters where we can test the behavior well
|
||||
|
||||
## 11. Verification Plan
|
||||
|
||||
Required tests:
|
||||
|
||||
- adapter unit tests for route eligibility
|
||||
- adapter unit tests for "fresh session -> cheap preflight + primary"
|
||||
- adapter unit tests for "resumed session -> primary only"
|
||||
- heartbeat tests for segmented cost-event creation
|
||||
- UI tests for config save/load of cheap-model fields
|
||||
|
||||
Manual checks:
|
||||
|
||||
- create a fresh issue for a routed Codex or Claude agent
|
||||
- verify the run metadata shows both phases
|
||||
- verify only the primary session is persisted
|
||||
- verify cost rows reflect both models
|
||||
- verify the issue thread does not get duplicate kickoff comments
|
||||
|
||||
## 12. Recommended Sequence
|
||||
|
||||
1. Add segmented execution reporting to the adapter/server contract.
|
||||
2. Implement `codex_local` cheap preflight.
|
||||
3. Validate cost visibility and transcript UX.
|
||||
4. Implement `claude_local` cheap preflight.
|
||||
5. Decide later whether any adapters need Hermes-style text heuristics in addition to phase-based routing.
|
||||
|
||||
## 13. Recommendation
|
||||
|
||||
Paperclip should ship smart model routing as:
|
||||
|
||||
- adapter-specific
|
||||
- opt-in
|
||||
- phase-based
|
||||
- session-safe
|
||||
- cost-truthful
|
||||
|
||||
The right V1 is not "choose the cheapest model for simple prompts." The right V1 is "use a cheap model for bounded orchestration work on fresh runs, then hand off to the primary model for the real task."
|
||||
209
doc/plans/2026-04-06-subissue-creation-on-issue-detail.md
Normal file
209
doc/plans/2026-04-06-subissue-creation-on-issue-detail.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# 2026-04-06 Sub-issue Creation On Issue Detail Plan
|
||||
|
||||
Status: Proposed
|
||||
Date: 2026-04-06
|
||||
Audience: Product and engineering
|
||||
Related:
|
||||
- `ui/src/pages/IssueDetail.tsx`
|
||||
- `ui/src/components/IssueProperties.tsx`
|
||||
- `ui/src/components/NewIssueDialog.tsx`
|
||||
- `ui/src/context/DialogContext.tsx`
|
||||
- `packages/shared/src/validators/issue.ts`
|
||||
- `server/src/services/issues.ts`
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
This document defines the implementation plan for adding manual sub-issue creation from the issue detail page.
|
||||
|
||||
Requested UX:
|
||||
|
||||
- the `Sub-issues` tab should always show an `Add sub-issue` action, even when there are no children yet
|
||||
- the properties pane should also expose a `Sub-issues` section with the same `Add sub-issue` entry point
|
||||
- both entry points should open the existing new-issue dialog in a "create sub-issue" mode
|
||||
- the dialog should only show sub-issue-specific UI when it was opened from one of those entry points
|
||||
|
||||
This is a UI-first change. The backend already supports child issue creation with `parentId`.
|
||||
|
||||
## 2. Current State
|
||||
|
||||
### 2.1 Existing child issue display
|
||||
|
||||
`ui/src/pages/IssueDetail.tsx` already derives `childIssues` by filtering the company issue list on `parentId === issue.id`.
|
||||
|
||||
Current limitation:
|
||||
|
||||
- the `Sub-issues` tab only renders the empty state or the child issue list
|
||||
- there is no action to create a child issue from that tab
|
||||
|
||||
### 2.2 Existing properties pane
|
||||
|
||||
`ui/src/components/IssueProperties.tsx` shows `Blocked by`, `Blocking`, and `Parent`, but it has no sub-issue section or child issue affordance.
|
||||
|
||||
### 2.3 Existing dialog state
|
||||
|
||||
`ui/src/context/DialogContext.tsx` can open the global new-issue dialog with defaults such as status, priority, project, assignee, title, and description.
|
||||
|
||||
Current limitation:
|
||||
|
||||
- there is no way to pass sub-issue context like `parentId`
|
||||
- `ui/src/components/NewIssueDialog.tsx` therefore cannot submit a child issue or render parent-specific context
|
||||
|
||||
### 2.4 Backend contract already exists
|
||||
|
||||
The create-issue validator already accepts `parentId`.
|
||||
|
||||
`server/src/services/issues.ts` already uses:
|
||||
|
||||
- `parentId` for parent-child issue relationships
|
||||
- `parentId` as the default workspace inheritance source when `inheritExecutionWorkspaceFromIssueId` is not provided
|
||||
|
||||
That means the required API and workspace inheritance behavior already exist. No server or schema change is required for the first pass.
|
||||
|
||||
## 3. Proposed Implementation
|
||||
|
||||
## 3.1 Extend dialog defaults for sub-issue context
|
||||
|
||||
Extend `NewIssueDefaults` in `ui/src/context/DialogContext.tsx` with:
|
||||
|
||||
- `parentId?: string`
|
||||
- optional parent display metadata for the dialog header, for example:
|
||||
- `parentIdentifier?: string`
|
||||
- `parentTitle?: string`
|
||||
|
||||
This keeps the dialog self-contained and avoids re-fetching parent context purely for presentation.
|
||||
|
||||
## 3.2 Add issue-detail entry points
|
||||
|
||||
Use `openNewIssue(...)` from `ui/src/pages/IssueDetail.tsx` in two places:
|
||||
|
||||
1. `Sub-issues` tab
|
||||
2. properties pane via props passed into `IssueProperties`
|
||||
|
||||
Both entry points should pass:
|
||||
|
||||
- `parentId: issue.id`
|
||||
- `parentIdentifier: issue.identifier ?? issue.id`
|
||||
- `parentTitle: issue.title`
|
||||
- `projectId: issue.projectId ?? undefined`
|
||||
|
||||
Using the current issue's `projectId` preserves the common expectation that sub-issues stay inside the same project unless the operator changes it in the dialog.
|
||||
|
||||
No special assignee default should be forced in V1.
|
||||
|
||||
## 3.3 Add a dedicated properties-pane section
|
||||
|
||||
Extend `IssueProperties` to accept:
|
||||
|
||||
- `childIssues: Issue[]`
|
||||
- `onCreateSubissue: () => void`
|
||||
|
||||
Render a new `Sub-issues` section near `Blocked by` / `Blocking`:
|
||||
|
||||
- if children exist, show compact links or pills to the existing sub-issues
|
||||
- always show an `Add sub-issue` button
|
||||
|
||||
This keeps the child issue affordance visible in the property area without requiring a generic parent selector.
|
||||
|
||||
## 3.4 Update the sub-issues tab layout
|
||||
|
||||
Refactor the `Sub-issues` tab in `IssueDetail` to render:
|
||||
|
||||
- a small header row with child count
|
||||
- an `Add sub-issue` button
|
||||
- the existing empty state or child issue list beneath it
|
||||
|
||||
This satisfies the requirement that the action is visible whether or not sub-issues already exist.
|
||||
|
||||
## 3.5 Add sub-issue mode to the new-issue dialog
|
||||
|
||||
Update `ui/src/components/NewIssueDialog.tsx` so that when `newIssueDefaults.parentId` is present:
|
||||
|
||||
- the dialog submits `parentId`
|
||||
- the header/button copy can switch to `New sub-issue` / `Create sub-issue`
|
||||
- a compact parent context row is shown, for example `Parent: PAP-1150 add the ability...`
|
||||
|
||||
Important constraint:
|
||||
|
||||
- this parent context row should only render when the dialog was opened with sub-issue defaults
|
||||
- opening the dialog from global create actions should remain unchanged and should not expose a generic parent control
|
||||
|
||||
That preserves the requested UX boundary: sub-issue creation is intentional, not part of the default create-issue surface.
|
||||
|
||||
## 3.6 Query invalidation and refresh behavior
|
||||
|
||||
No new data-fetch path is needed.
|
||||
|
||||
The existing create success handler in `NewIssueDialog` already invalidates:
|
||||
|
||||
- `queryKeys.issues.list(companyId)`
|
||||
- issue-related list badges
|
||||
|
||||
That should be enough for the parent `IssueDetail` view to recompute `childIssues` after creation because it derives children from the company issue list query.
|
||||
|
||||
If the detail page ever moves away from the full company issue list, this should be revisited, but it does not require additional work for the current architecture.
|
||||
|
||||
## 4. Implementation Order
|
||||
|
||||
1. Extend `DialogContext` issue defaults with sub-issue fields.
|
||||
2. Wire `IssueDetail` to open the dialog in sub-issue mode from the `Sub-issues` tab.
|
||||
3. Extend `IssueProperties` to display child issues and the `Add sub-issue` action.
|
||||
4. Update `NewIssueDialog` submission and header UI for sub-issue mode.
|
||||
5. Add UI tests for the new entry points and payload behavior.
|
||||
|
||||
## 5. Testing Plan
|
||||
|
||||
Add focused UI tests covering:
|
||||
|
||||
1. `IssueDetail`
|
||||
- `Sub-issues` tab shows `Add sub-issue` when there are zero children
|
||||
- clicking the action opens the dialog with parent defaults
|
||||
|
||||
2. `IssueProperties`
|
||||
- the properties pane renders the sub-issue section
|
||||
- `Add sub-issue` remains available when there are no child issues
|
||||
|
||||
3. `NewIssueDialog`
|
||||
- when opened with `parentId`, submit payload includes `parentId`
|
||||
- sub-issue-specific copy appears only in that mode
|
||||
- when opened normally, no parent UI is shown and payload is unchanged
|
||||
|
||||
No backend test expansion is required unless implementation discovers a client/server contract gap.
|
||||
|
||||
## 6. Risks And Decisions
|
||||
|
||||
### 6.1 Parent metadata source
|
||||
|
||||
Decision: pass parent label metadata through dialog defaults instead of making `NewIssueDialog` fetch the parent issue.
|
||||
|
||||
Reason:
|
||||
|
||||
- less coupling
|
||||
- no loading state inside the dialog
|
||||
- simpler tests
|
||||
|
||||
### 6.2 Project inheritance
|
||||
|
||||
Decision: prefill `projectId` from the parent issue, but keep it editable.
|
||||
|
||||
Reason:
|
||||
|
||||
- matches expected operator behavior
|
||||
- avoids silently moving a sub-issue outside the current project by default
|
||||
|
||||
### 6.3 Keep parent selection out of the generic dialog
|
||||
|
||||
Decision: do not add a freeform parent picker in this change.
|
||||
|
||||
Reason:
|
||||
|
||||
- the request explicitly wants sub-issue controls only when the flow starts from a sub-issue action
|
||||
- this keeps the default issue creation surface simpler
|
||||
|
||||
## 7. Success Criteria
|
||||
|
||||
This plan is complete when an operator can:
|
||||
|
||||
1. open any issue detail page
|
||||
2. click `Add sub-issue` from either the `Sub-issues` tab or the properties pane
|
||||
3. land in the existing new-issue dialog with clear parent context
|
||||
4. create the child issue and see it appear under the parent without a page reload
|
||||
287
docs/adapters/adapter-ui-parser.md
Normal file
287
docs/adapters/adapter-ui-parser.md
Normal file
@@ -0,0 +1,287 @@
|
||||
---
|
||||
title: Adapter UI Parser Contract
|
||||
summary: Ship a custom run-log parser so the Paperclip UI renders your adapter's output correctly
|
||||
---
|
||||
|
||||
When Paperclip runs an agent, stdout is streamed to the UI in real time. The UI needs a **parser** to convert raw stdout lines into structured transcript entries (tool calls, tool results, assistant messages, system events). Without a custom parser, the UI falls back to a generic shell parser that treats every non-system line as `assistant` output — tool commands leak as plain text, durations are lost, and errors are invisible.
|
||||
|
||||
## The Problem
|
||||
|
||||
Most agent CLIs emit structured stdout with tool calls, progress indicators, and multi-line output. For example:
|
||||
|
||||
```
|
||||
[hermes] Session resumed: abc123
|
||||
┊ 💬 Thinking about how to approach this...
|
||||
┊ $ ls /home/user/project
|
||||
┊ [done] $ ls /home/user/project — /src /README.md 0.3s
|
||||
┊ 💬 I see the project structure. Let me read the README.
|
||||
┊ read /home/user/project/README.md
|
||||
┊ [done] read — Project Overview: A CLI tool for... 1.2s
|
||||
The project is a CLI tool. Here's what I found:
|
||||
- It uses TypeScript
|
||||
- Tests are in /tests
|
||||
```
|
||||
|
||||
Without a parser, the UI shows all of this as raw `assistant` text — the tool calls and results are indistinguishable from the agent's actual response.
|
||||
|
||||
With a parser, the UI renders:
|
||||
|
||||
- `Thinking about how to approach this...` as a collapsible thinking block
|
||||
- `$ ls /home/user/project` as a tool call card (collapsed)
|
||||
- `0.3s` duration as a tool result card
|
||||
- `The project is a CLI tool...` as the assistant's response
|
||||
|
||||
## How It Works
|
||||
|
||||
```
|
||||
┌──────────────────┐ package.json ┌──────────────────┐
|
||||
│ Adapter Package │─── exports["./ui-parser"] ──→│ dist/ui-parser.js │
|
||||
│ (npm / local) │ │ (zero imports) │
|
||||
└──────────────────┘ └────────┬─────────┘
|
||||
│ plugin-loader reads at startup
|
||||
▼
|
||||
┌──────────────────┐ GET /api/:type/ui-parser.js ┌──────────────────┐
|
||||
│ Paperclip Server │◄────────────────────────────────│ uiParserCache │
|
||||
│ (in-memory) │ └──────────────────┘
|
||||
└────────┬─────────┘
|
||||
│ serves JS to browser
|
||||
▼
|
||||
┌──────────────────┐ fetch() + eval ┌──────────────────┐
|
||||
│ Paperclip UI │─────────────────────→│ parseStdoutLine │
|
||||
│ (dynamic loader) │ registers parser │ (per-adapter) │
|
||||
└──────────────────┘ └──────────────────┘
|
||||
```
|
||||
|
||||
1. **Build time** — You compile `src/ui-parser.ts` to `dist/ui-parser.js` (zero runtime imports)
|
||||
2. **Server startup** — Plugin loader reads the file and caches it in memory
|
||||
3. **UI load** — When the user opens a run, the UI fetches the parser from `GET /api/:type/ui-parser.js`
|
||||
4. **Runtime** — The fetched module is eval'd and registered. All subsequent lines use the real parser
|
||||
|
||||
## Contract: package.json
|
||||
|
||||
### 1. `paperclip.adapterUiParser` — contract version
|
||||
|
||||
```json
|
||||
{
|
||||
"paperclip": {
|
||||
"adapterUiParser": "1.0.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The Paperclip host checks this field. If the major version is unsupported, the host logs a warning and falls back to the generic parser instead of executing potentially incompatible code.
|
||||
|
||||
| Host expects | Adapter declares | Result |
|
||||
|---|---|---|
|
||||
| `1.x` | `1.0.0` | Parser loaded |
|
||||
| `1.x` | `2.0.0` | Warning logged, generic parser used |
|
||||
| `1.x` | (missing) | Parser loaded (grace period — future versions may require it) |
|
||||
|
||||
### 2. `exports["./ui-parser"]` — file path
|
||||
|
||||
```json
|
||||
{
|
||||
"exports": {
|
||||
".": "./dist/server/index.js",
|
||||
"./ui-parser": "./dist/ui-parser.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Contract: Module Exports
|
||||
|
||||
Your `dist/ui-parser.js` must export **at least one** of:
|
||||
|
||||
### `parseStdoutLine(line: string, ts: string): TranscriptEntry[]`
|
||||
|
||||
Static parser. Called for each line of adapter stdout.
|
||||
|
||||
```ts
|
||||
export function parseStdoutLine(line: string, ts: string): TranscriptEntry[] {
|
||||
if (line.startsWith("[my-agent]")) {
|
||||
return [{ kind: "system", ts, text: line }];
|
||||
}
|
||||
return [{ kind: "assistant", ts, text: line }];
|
||||
}
|
||||
```
|
||||
|
||||
### `createStdoutParser(): { parseLine(line, ts): TranscriptEntry[]; reset(): void }`
|
||||
|
||||
Stateful parser factory. Preferred if your parser needs to track multi-line continuation, command nesting, or other cross-call state.
|
||||
|
||||
```ts
|
||||
let counter = 0;
|
||||
|
||||
export function createStdoutParser() {
|
||||
let suppressContinuation = false;
|
||||
|
||||
function parseLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) return [];
|
||||
|
||||
if (suppressContinuation) {
|
||||
if (/^[\d.]+s$/.test(trimmed)) {
|
||||
suppressContinuation = false;
|
||||
return [];
|
||||
}
|
||||
return []; // swallow continuation lines
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("[tool-done]")) {
|
||||
const id = `tool-${++counter}`;
|
||||
suppressContinuation = true;
|
||||
return [
|
||||
{ kind: "tool_call", ts, name: "shell", input: {}, toolUseId: id },
|
||||
{ kind: "tool_result", ts, toolUseId: id, content: trimmed, isError: false },
|
||||
];
|
||||
}
|
||||
|
||||
return [{ kind: "assistant", ts, text: trimmed }];
|
||||
}
|
||||
|
||||
function reset() {
|
||||
suppressContinuation = false;
|
||||
}
|
||||
|
||||
return { parseLine, reset };
|
||||
}
|
||||
```
|
||||
|
||||
If both are exported, `createStdoutParser` takes priority.
|
||||
|
||||
## Contract: TranscriptEntry
|
||||
|
||||
Each entry must match one of these discriminated union shapes:
|
||||
|
||||
```ts
|
||||
// Assistant message
|
||||
{ kind: "assistant"; ts: string; text: string; delta?: boolean }
|
||||
|
||||
// Thinking / reasoning
|
||||
{ kind: "thinking"; ts: string; text: string; delta?: boolean }
|
||||
|
||||
// User message (rare — usually from agent-initiated prompts)
|
||||
{ kind: "user"; ts: string; text: string }
|
||||
|
||||
// Tool invocation
|
||||
{ kind: "tool_call"; ts: string; name: string; input: unknown; toolUseId?: string }
|
||||
|
||||
// Tool result
|
||||
{ kind: "tool_result"; ts: string; toolUseId: string; content: string; isError: boolean }
|
||||
|
||||
// System / adapter messages
|
||||
{ kind: "system"; ts: string; text: string }
|
||||
|
||||
// Stderr / errors
|
||||
{ kind: "stderr"; ts: string; text: string }
|
||||
|
||||
// Raw stdout (fallback)
|
||||
{ kind: "stdout"; ts: string; text: string }
|
||||
```
|
||||
|
||||
### Linking tool calls to results
|
||||
|
||||
Use `toolUseId` to pair `tool_call` and `tool_result` entries. The UI renders them as collapsible cards.
|
||||
|
||||
```ts
|
||||
const id = `my-tool-${++counter}`;
|
||||
return [
|
||||
{ kind: "tool_call", ts, name: "read", input: { path: "/src/main.ts" }, toolUseId: id },
|
||||
{ kind: "tool_result", ts, toolUseId: id, content: "const main = () => {...}", isError: false },
|
||||
];
|
||||
```
|
||||
|
||||
### Error handling
|
||||
|
||||
Set `isError: true` on tool results to show a red indicator:
|
||||
|
||||
```ts
|
||||
{ kind: "tool_result", ts, toolUseId: id, content: "ENOENT: no such file", isError: true }
|
||||
```
|
||||
|
||||
## Constraints
|
||||
|
||||
1. **Zero runtime imports.** Your file is loaded via `URL.createObjectURL` + dynamic `import()` in the browser. No `import`, no `require`, no top-level `await`.
|
||||
|
||||
2. **No DOM / Node.js APIs.** Runs in a browser sandbox. Use only vanilla JS (ES2020+).
|
||||
|
||||
3. **No side effects.** Module-level code must not modify globals, access `window`, or perform I/O. Only declare and export functions.
|
||||
|
||||
4. **Deterministic.** Given the same `(line, ts)` input, the same output must be produced. This matters for log replay.
|
||||
|
||||
5. **Error-tolerant.** Never throw. Return `[{ kind: "stdout", ts, text: line }]` for any line you can't parse, rather than crashing the transcript.
|
||||
|
||||
6. **File size.** Keep under 50 KB. This is served per-request and eval'd in the browser.
|
||||
|
||||
## Lifecycle
|
||||
|
||||
| Event | What happens |
|
||||
|---|---|
|
||||
| Server starts | Plugin loader reads `exports["./ui-parser"]`, reads the file, caches in memory |
|
||||
| UI opens run | `getUIAdapter(type)` called. If no built-in parser, kicks off async `fetch(/api/:type/ui-parser.js)` |
|
||||
| First lines arrive | Generic process parser handles them immediately (no blocking). Dynamic parser loads in background |
|
||||
| Parser loads | `registerUIAdapter()` called. All subsequent line parsing uses the real parser |
|
||||
| Parser fails (404, eval error) | Warning logged to console. Generic parser continues. Failed type is cached — no retries |
|
||||
| Server restart | In-memory cache is repopulated from adapter packages |
|
||||
|
||||
## Error Behavior
|
||||
|
||||
| Failure | What happens |
|
||||
|---|---|
|
||||
| Module syntax error (import fails) | Caught, logged, falls back to generic parser. No retries. |
|
||||
| Returns wrong shape | Individual entries with missing fields are silently ignored by the transcript builder. |
|
||||
| Throws at runtime | Caught per-line. That line falls back to generic. Parser stays registered for future lines. |
|
||||
| 404 (no ui-parser export) | Type added to failed-loads set. Generic parser from first call onward. |
|
||||
| Contract version mismatch | Server logs warning, skips loading. Generic parser used. |
|
||||
|
||||
## Building
|
||||
|
||||
```sh
|
||||
# Compile TypeScript to JavaScript
|
||||
tsc src/ui-parser.ts --outDir dist --target ES2020 --module ES2020 --declaration false
|
||||
```
|
||||
|
||||
Your `tsconfig.json` can handle this automatically — just make sure `ui-parser.ts` is included in the build and outputs to `dist/ui-parser.js`.
|
||||
|
||||
## Testing
|
||||
|
||||
Test your parser locally by running it against sample stdout:
|
||||
|
||||
```ts
|
||||
// test-parser.ts
|
||||
import { createStdoutParser } from "./dist/ui-parser.js";
|
||||
|
||||
const parser = createStdoutParser();
|
||||
const sampleLines = [
|
||||
"[my-agent] Starting session abc123",
|
||||
"Thinking about the task...",
|
||||
"$ ls /home/user/project",
|
||||
"[done] $ ls — /src /README.md 0.3s",
|
||||
"I'll read the README now.",
|
||||
"Error: file not found",
|
||||
];
|
||||
|
||||
for (const line of sampleLines) {
|
||||
const entries = parser.parseLine(line, new Date().toISOString());
|
||||
for (const entry of entries) {
|
||||
console.log(` ${entry.kind}:`, entry.text ?? entry.name ?? entry.content);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Run with: `npx tsx test-parser.ts`
|
||||
|
||||
## Skipping the UI Parser
|
||||
|
||||
If your adapter's stdout is simple (no tool markers, no special formatting), you can skip the UI parser entirely. The generic `process` parser will handle it — every non-system line becomes `assistant` output. This is fine for:
|
||||
|
||||
- Agents that output plain text responses
|
||||
- Custom scripts that just print results
|
||||
- Simple CLIs without structured output
|
||||
|
||||
To skip it, simply don't include `exports["./ui-parser"]` in your `package.json`.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [External Adapters](/adapters/external-adapters) — full guide to building adapter packages
|
||||
- [Creating an Adapter](/adapters/creating-an-adapter) — adapter internals and built-in integration
|
||||
@@ -20,8 +20,8 @@ The `claude_local` adapter runs Anthropic's Claude Code CLI locally. It supports
|
||||
| `env` | object | No | Environment variables (supports secret refs) |
|
||||
| `timeoutSec` | number | No | Process timeout (0 = no timeout) |
|
||||
| `graceSec` | number | No | Grace period before force-kill |
|
||||
| `maxTurnsPerRun` | number | No | Max agentic turns per heartbeat (defaults to `1000`) |
|
||||
| `dangerouslySkipPermissions` | boolean | No | Skip permission prompts (dev only) |
|
||||
| `maxTurnsPerRun` | number | No | Max agentic turns per heartbeat (defaults to `300`) |
|
||||
| `dangerouslySkipPermissions` | boolean | No | Skip permission prompts (default: `true`); required for headless runs where interactive approval is impossible |
|
||||
|
||||
## Prompt Templates
|
||||
|
||||
|
||||
@@ -9,23 +9,40 @@ Build a custom adapter to connect Paperclip to any agent runtime.
|
||||
If you're using Claude Code, the `.agents/skills/create-agent-adapter` skill can guide you through the full adapter creation process interactively. Just ask Claude to create a new adapter and it will walk you through each step.
|
||||
</Tip>
|
||||
|
||||
## Two Paths
|
||||
|
||||
| | Built-in | External Plugin |
|
||||
|---|---|---|
|
||||
| Source | Inside `paperclip-fork` | Separate npm package |
|
||||
| Distribution | Ships with Paperclip | Independent npm publish |
|
||||
| UI parser | Static import | Dynamic load from API |
|
||||
| Registration | Edit 3 registries | Auto-loaded at startup |
|
||||
| Best for | Core adapters, contributors | Third-party adapters, internal tools |
|
||||
|
||||
For most cases, **build an external adapter plugin**. It's cleaner, independently versioned, and doesn't require modifying Paperclip's source. See [External Adapters](/adapters/external-adapters) for the full guide.
|
||||
|
||||
The rest of this page covers the shared internals that both paths use.
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
packages/adapters/<name>/
|
||||
packages/adapters/<name>/ # built-in
|
||||
── or ──
|
||||
my-adapter/ # external plugin
|
||||
package.json
|
||||
tsconfig.json
|
||||
src/
|
||||
index.ts # Shared metadata
|
||||
server/
|
||||
index.ts # Server exports
|
||||
index.ts # Server exports (createServerAdapter)
|
||||
execute.ts # Core execution logic
|
||||
parse.ts # Output parsing
|
||||
test.ts # Environment diagnostics
|
||||
ui/
|
||||
index.ts # UI exports
|
||||
parse-stdout.ts # Transcript parser
|
||||
index.ts # UI exports (built-in only)
|
||||
parse-stdout.ts # Transcript parser (built-in only)
|
||||
build-config.ts # Config builder
|
||||
ui-parser.ts # Self-contained UI parser (external — see [UI Parser Contract](/adapters/adapter-ui-parser))
|
||||
cli/
|
||||
index.ts # CLI exports
|
||||
format-event.ts # Terminal formatter
|
||||
@@ -46,6 +63,9 @@ Use when: ...
|
||||
Don't use when: ...
|
||||
Core fields: ...
|
||||
`;
|
||||
|
||||
// Required for external adapters (plugin-loader convention)
|
||||
export { createServerAdapter } from "./server/index.js";
|
||||
```
|
||||
|
||||
## Step 2: Server Execute
|
||||
@@ -54,7 +74,7 @@ Core fields: ...
|
||||
|
||||
Key responsibilities:
|
||||
|
||||
1. Read config using safe helpers (`asString`, `asNumber`, etc.)
|
||||
1. Read config using safe helpers (`asString`, `asNumber`, etc.) from `@paperclipai/adapter-utils/server-utils`
|
||||
2. Build environment with `buildPaperclipEnv(agent)` plus context vars
|
||||
3. Resolve session state from `runtime.sessionParams`
|
||||
4. Render prompt with `renderTemplate(template, data)`
|
||||
@@ -62,27 +82,102 @@ Key responsibilities:
|
||||
6. Parse output for usage, costs, session state, errors
|
||||
7. Handle unknown session errors (retry fresh, set `clearSession: true`)
|
||||
|
||||
### Available Helpers
|
||||
|
||||
| Helper | Source | Purpose |
|
||||
|--------|--------|---------|
|
||||
| `runChildProcess(cmd, opts)` | `@paperclipai/adapter-utils/server-utils` | Spawn with timeout, grace, streaming |
|
||||
| `buildPaperclipEnv(agent)` | `@paperclipai/adapter-utils/server-utils` | Inject `PAPERCLIP_*` env vars |
|
||||
| `renderTemplate(tpl, data)` | `@paperclipai/adapter-utils/server-utils` | `{{variable}}` substitution |
|
||||
| `asString(v)` | `@paperclipai/adapter-utils` | Safe config value extraction |
|
||||
| `asNumber(v)` | `@paperclipai/adapter-utils` | Safe number extraction |
|
||||
|
||||
### AdapterExecutionContext
|
||||
|
||||
```ts
|
||||
interface AdapterExecutionContext {
|
||||
runId: string;
|
||||
agent: { id: string; companyId: string; name: string; adapterConfig: unknown };
|
||||
runtime: { sessionId: string | null; sessionParams: Record<string, unknown> | null };
|
||||
config: Record<string, unknown>; // agent's adapterConfig
|
||||
context: Record<string, unknown>; // task, wake reason, etc.
|
||||
onLog: (stream: "stdout" | "stderr", chunk: string) => Promise<void>;
|
||||
onMeta?: (meta: AdapterInvocationMeta) => Promise<void>;
|
||||
onSpawn?: (meta: { pid: number; startedAt: string }) => Promise<void>;
|
||||
}
|
||||
```
|
||||
|
||||
### AdapterExecutionResult
|
||||
|
||||
```ts
|
||||
interface AdapterExecutionResult {
|
||||
exitCode: number | null;
|
||||
signal: string | null;
|
||||
timedOut: boolean;
|
||||
errorMessage?: string | null;
|
||||
usage?: { inputTokens: number; outputTokens: number };
|
||||
sessionParams?: Record<string, unknown> | null; // persist across heartbeats
|
||||
sessionDisplayId?: string | null;
|
||||
provider?: string | null;
|
||||
model?: string | null;
|
||||
costUsd?: number | null;
|
||||
clearSession?: boolean; // set true to force fresh session on next wake
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3: Environment Test
|
||||
|
||||
`src/server/test.ts` validates the adapter config before running.
|
||||
|
||||
Return structured diagnostics:
|
||||
|
||||
- `error` for invalid/unusable setup
|
||||
- `warn` for non-blocking issues
|
||||
- `info` for successful checks
|
||||
| Level | Meaning | Effect |
|
||||
|-------|---------|--------|
|
||||
| `error` | Invalid or unusable setup | Blocks execution |
|
||||
| `warn` | Non-blocking issue | Shown with yellow indicator |
|
||||
| `info` | Successful check | Shown in test results |
|
||||
|
||||
## Step 4: UI Module
|
||||
```ts
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: "pass", // "pass" | "warn" | "fail"
|
||||
checks: [
|
||||
{ level: "info", message: "CLI v1.2.0 detected", code: "cli_detected" },
|
||||
{ level: "warn", message: "No API key found", hint: "Set ANTHROPIC_API_KEY", code: "no_key" },
|
||||
],
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Step 4: UI Module (Built-in Only)
|
||||
|
||||
For built-in adapters registered in Paperclip's source:
|
||||
|
||||
- `parse-stdout.ts` — converts stdout lines to `TranscriptEntry[]` for the run viewer
|
||||
- `build-config.ts` — converts form values to `adapterConfig` JSON
|
||||
- Config fields React component in `ui/src/adapters/<name>/config-fields.tsx`
|
||||
|
||||
For external adapters, use a self-contained `ui-parser.ts` instead. See the [UI Parser Contract](/adapters/adapter-ui-parser).
|
||||
|
||||
## Step 5: CLI Module
|
||||
|
||||
`format-event.ts` — pretty-prints stdout for `paperclipai run --watch` using `picocolors`.
|
||||
|
||||
## Step 6: Register
|
||||
```ts
|
||||
export function formatStdoutEvent(line: string, debug: boolean): void {
|
||||
if (line.startsWith("[tool-done]")) {
|
||||
console.log(chalk.green(` ✓ ${line}`));
|
||||
} else {
|
||||
console.log(` ${line}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 6: Register (Built-in Only)
|
||||
|
||||
Add the adapter to all three registries:
|
||||
|
||||
@@ -90,6 +185,24 @@ Add the adapter to all three registries:
|
||||
2. `ui/src/adapters/registry.ts`
|
||||
3. `cli/src/adapters/registry.ts`
|
||||
|
||||
For external adapters, registration is automatic — the plugin loader handles it.
|
||||
|
||||
## Session Persistence
|
||||
|
||||
If your agent runtime supports conversation continuity across heartbeats:
|
||||
|
||||
1. Return `sessionParams` from `execute()` (e.g., `{ sessionId: "abc123" }`)
|
||||
2. Read `runtime.sessionParams` on the next wake to resume
|
||||
3. Optionally implement a `sessionCodec` for validation and display
|
||||
|
||||
```ts
|
||||
export const sessionCodec: AdapterSessionCodec = {
|
||||
deserialize(raw) { /* validate raw session data */ },
|
||||
serialize(params) { /* serialize for storage */ },
|
||||
getDisplayId(params) { /* human-readable session label */ },
|
||||
};
|
||||
```
|
||||
|
||||
## Skills Injection
|
||||
|
||||
Make Paperclip skills discoverable to your agent runtime without writing to the agent's working directory:
|
||||
@@ -105,3 +218,10 @@ Make Paperclip skills discoverable to your agent runtime without writing to the
|
||||
- Inject secrets via environment variables, not prompts
|
||||
- Configure network access controls if the runtime supports them
|
||||
- Always enforce timeout and grace period
|
||||
- The UI parser module runs in a browser sandbox — zero runtime imports, no side effects
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [External Adapters](/adapters/external-adapters) — build a standalone adapter plugin
|
||||
- [UI Parser Contract](/adapters/adapter-ui-parser) — ship a custom run-log parser
|
||||
- [How Agents Work](/guides/agent-developer/how-agents-work) — the heartbeat lifecycle
|
||||
|
||||
392
docs/adapters/external-adapters.md
Normal file
392
docs/adapters/external-adapters.md
Normal file
@@ -0,0 +1,392 @@
|
||||
---
|
||||
title: External Adapters
|
||||
summary: Build, package, and distribute adapters as plugins without modifying Paperclip source
|
||||
---
|
||||
|
||||
Paperclip supports external adapter plugins that can be installed from npm packages or local directories. External adapters work exactly like built-in adapters — they execute agents, parse output, and render transcripts — but they live in their own package and don't require changes to Paperclip's source code.
|
||||
|
||||
## Built-in vs External
|
||||
|
||||
| | Built-in | External |
|
||||
|---|---|---|
|
||||
| Source location | Inside `paperclip-fork/packages/adapters/` | Separate npm package or local directory |
|
||||
| Registration | Hardcoded in three registries | Loaded at startup via plugin system |
|
||||
| UI parser | Static import at build time | Dynamically loaded from API (see [UI Parser](/adapters/adapter-ui-parser)) |
|
||||
| Distribution | Ships with Paperclip | Published to npm or linked via `file:` |
|
||||
| Updates | Requires Paperclip release | Independent versioning |
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Minimal Package Structure
|
||||
|
||||
```
|
||||
my-adapter/
|
||||
package.json
|
||||
tsconfig.json
|
||||
src/
|
||||
index.ts # Shared metadata (type, label, models)
|
||||
server/
|
||||
index.ts # createServerAdapter() factory
|
||||
execute.ts # Core execution logic
|
||||
parse.ts # Output parsing
|
||||
test.ts # Environment diagnostics
|
||||
ui-parser.ts # Self-contained UI transcript parser
|
||||
```
|
||||
|
||||
### package.json
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "my-paperclip-adapter",
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"license": "MIT",
|
||||
"paperclip": {
|
||||
"adapterUiParser": "1.0.0"
|
||||
},
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./server": "./dist/server/index.js",
|
||||
"./ui-parser": "./dist/ui-parser.js"
|
||||
},
|
||||
"files": ["dist"],
|
||||
"scripts": {
|
||||
"build": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/adapter-utils": "^2026.325.0",
|
||||
"picocolors": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.0.0",
|
||||
"typescript": "^5.7.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Key fields:
|
||||
|
||||
| Field | Purpose |
|
||||
|-------|---------|
|
||||
| `exports["."]` | Entry point — must export `createServerAdapter` |
|
||||
| `exports["./ui-parser"]` | Self-contained UI parser module (optional but recommended) |
|
||||
| `paperclip.adapterUiParser` | Contract version for the UI parser (`"1.0.0"`) |
|
||||
| `files` | Limits what gets published — only `dist/` |
|
||||
|
||||
### tsconfig.json
|
||||
|
||||
```json
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "Node16",
|
||||
"moduleResolution": "Node16",
|
||||
"outDir": "dist",
|
||||
"rootDir": "src",
|
||||
"declaration": true,
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
```
|
||||
|
||||
## Server Module
|
||||
|
||||
The plugin loader calls `createServerAdapter()` from your package root. This function must return a `ServerAdapterModule`.
|
||||
|
||||
### src/index.ts
|
||||
|
||||
```ts
|
||||
export const type = "my_adapter"; // snake_case, globally unique
|
||||
export const label = "My Agent (local)";
|
||||
|
||||
export const models = [
|
||||
{ id: "model-a", label: "Model A" },
|
||||
];
|
||||
|
||||
export const agentConfigurationDoc = `# my_adapter configuration
|
||||
Use when: ...
|
||||
Don't use when: ...
|
||||
`;
|
||||
|
||||
// Required by plugin-loader convention
|
||||
export { createServerAdapter } from "./server/index.js";
|
||||
```
|
||||
|
||||
### src/server/index.ts
|
||||
|
||||
```ts
|
||||
import type { ServerAdapterModule } from "@paperclipai/adapter-utils";
|
||||
import { type, models, agentConfigurationDoc } from "../index.js";
|
||||
import { execute } from "./execute.js";
|
||||
import { testEnvironment } from "./test.js";
|
||||
|
||||
export function createServerAdapter(): ServerAdapterModule {
|
||||
return {
|
||||
type,
|
||||
execute,
|
||||
testEnvironment,
|
||||
models,
|
||||
agentConfigurationDoc,
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### src/server/execute.ts
|
||||
|
||||
The core execution function. Receives an `AdapterExecutionContext` and returns an `AdapterExecutionResult`.
|
||||
|
||||
```ts
|
||||
import type {
|
||||
AdapterExecutionContext,
|
||||
AdapterExecutionResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
|
||||
import {
|
||||
runChildProcess,
|
||||
buildPaperclipEnv,
|
||||
renderTemplate,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
|
||||
export async function execute(
|
||||
ctx: AdapterExecutionContext,
|
||||
): Promise<AdapterExecutionResult> {
|
||||
const { config, agent, runtime, context, onLog, onMeta } = ctx;
|
||||
|
||||
// 1. Read config with safe helpers
|
||||
const cwd = String(config.cwd ?? "/tmp");
|
||||
const command = String(config.command ?? "my-agent");
|
||||
const timeoutSec = Number(config.timeoutSec ?? 300);
|
||||
|
||||
// 2. Build environment with Paperclip vars injected
|
||||
const env = buildPaperclipEnv(agent);
|
||||
|
||||
// 3. Render prompt template
|
||||
const prompt = config.promptTemplate
|
||||
? renderTemplate(String(config.promptTemplate), {
|
||||
agentId: agent.id,
|
||||
agentName: agent.name,
|
||||
companyId: agent.companyId,
|
||||
runId: ctx.runId,
|
||||
taskId: context.taskId ?? "",
|
||||
taskTitle: context.taskTitle ?? "",
|
||||
})
|
||||
: "Continue your work.";
|
||||
|
||||
// 4. Spawn process
|
||||
const result = await runChildProcess(command, {
|
||||
args: [prompt],
|
||||
cwd,
|
||||
env,
|
||||
timeout: timeoutSec * 1000,
|
||||
graceMs: 10_000,
|
||||
onStdout: (chunk) => onLog("stdout", chunk),
|
||||
onStderr: (chunk) => onLog("stderr", chunk),
|
||||
});
|
||||
|
||||
// 5. Return structured result
|
||||
return {
|
||||
exitCode: result.exitCode,
|
||||
timedOut: result.timedOut,
|
||||
// Include session state for persistence
|
||||
sessionParams: { /* ... */ },
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
#### Available Helpers from `@paperclipai/adapter-utils`
|
||||
|
||||
| Helper | Purpose |
|
||||
|--------|---------|
|
||||
| `runChildProcess(command, opts)` | Spawn a child process with timeout, grace period, and streaming callbacks |
|
||||
| `buildPaperclipEnv(agent)` | Inject `PAPERCLIP_*` environment variables |
|
||||
| `renderTemplate(template, data)` | `{{variable}}` substitution in prompt templates |
|
||||
| `asString(v)`, `asNumber(v)`, `asBoolean(v)` | Safe config value extraction |
|
||||
|
||||
### src/server/test.ts
|
||||
|
||||
Validates the adapter configuration before running. Returns structured diagnostics.
|
||||
|
||||
```ts
|
||||
import type {
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterEnvironmentTestResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const checks = [];
|
||||
|
||||
// Example: check CLI is installed
|
||||
checks.push({
|
||||
level: "info",
|
||||
message: "My Agent CLI v1.2.0 detected",
|
||||
code: "cli_detected",
|
||||
});
|
||||
|
||||
// Example: check working directory
|
||||
const cwd = String(ctx.config.cwd ?? "");
|
||||
if (!cwd.startsWith("/")) {
|
||||
checks.push({
|
||||
level: "error",
|
||||
message: `Working directory must be absolute: "${cwd}"`,
|
||||
hint: "Use /home/user/project or /workspace",
|
||||
code: "invalid_cwd",
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: checks.some(c => c.level === "error") ? "fail" : "pass",
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Check levels:
|
||||
|
||||
| Level | Meaning | Effect |
|
||||
|-------|---------|--------|
|
||||
| `info` | Informational | Shown in test results |
|
||||
| `warn` | Non-blocking issue | Shown with yellow indicator |
|
||||
| `error` | Blocks execution | Prevents agent from running |
|
||||
|
||||
## Installation
|
||||
|
||||
### From npm
|
||||
|
||||
```sh
|
||||
# Via the Paperclip UI
|
||||
# Settings → Adapters → Install from npm → "my-paperclip-adapter"
|
||||
|
||||
# Or via API
|
||||
curl -X POST http://localhost:3102/api/adapters \
|
||||
-H "Authorization: Bearer <token>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"packageName": "my-paperclip-adapter"}'
|
||||
```
|
||||
|
||||
### From local directory
|
||||
|
||||
```sh
|
||||
curl -X POST http://localhost:3102/api/adapters \
|
||||
-H "Authorization: Bearer <token>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"localPath": "/home/user/my-adapter"}'
|
||||
```
|
||||
|
||||
Local adapters are symlinked into Paperclip's adapter directory. Changes to the source are picked up on server restart.
|
||||
|
||||
### Via adapter-plugins.json
|
||||
|
||||
For development, you can also edit `~/.paperclip/adapter-plugins.json` directly:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"packageName": "my-paperclip-adapter",
|
||||
"localPath": "/home/user/my-adapter",
|
||||
"type": "my_adapter",
|
||||
"installedAt": "2026-03-30T12:00:00.000Z"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Optional: Session Persistence
|
||||
|
||||
If your agent runtime supports sessions (conversation continuity across heartbeats), implement a session codec:
|
||||
|
||||
```ts
|
||||
import type { AdapterSessionCodec } from "@paperclipai/adapter-utils";
|
||||
|
||||
export const sessionCodec: AdapterSessionCodec = {
|
||||
deserialize(raw) {
|
||||
if (typeof raw !== "object" || raw === null) return null;
|
||||
const r = raw as Record<string, unknown>;
|
||||
return r.sessionId ? { sessionId: String(r.sessionId) } : null;
|
||||
},
|
||||
serialize(params) {
|
||||
return params?.sessionId ? { sessionId: String(params.sessionId) } : null;
|
||||
},
|
||||
getDisplayId(params) {
|
||||
return params?.sessionId ? String(params.sessionId) : null;
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
Include it in `createServerAdapter()`:
|
||||
|
||||
```ts
|
||||
return { type, execute, testEnvironment, sessionCodec, /* ... */ };
|
||||
```
|
||||
|
||||
## Optional: Skills Sync
|
||||
|
||||
If your agent runtime supports skills/plugins, implement `listSkills` and `syncSkills`:
|
||||
|
||||
```ts
|
||||
return {
|
||||
type,
|
||||
execute,
|
||||
testEnvironment,
|
||||
async listSkills(ctx) {
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
supported: true,
|
||||
mode: "ephemeral",
|
||||
desiredSkills: [],
|
||||
entries: [],
|
||||
warnings: [],
|
||||
};
|
||||
},
|
||||
async syncSkills(ctx, desiredSkills) {
|
||||
// Install desired skills into the runtime
|
||||
return { /* same shape as listSkills */ };
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
## Optional: Model Detection
|
||||
|
||||
If your runtime has a local config file that specifies the default model:
|
||||
|
||||
```ts
|
||||
async function detectModel() {
|
||||
// Read ~/.my-agent/config.yaml or similar
|
||||
return {
|
||||
model: "anthropic/claude-sonnet-4",
|
||||
provider: "anthropic",
|
||||
source: "~/.my-agent/config.yaml",
|
||||
candidates: ["anthropic/claude-sonnet-4", "openai/gpt-4o"],
|
||||
};
|
||||
}
|
||||
|
||||
return { type, execute, testEnvironment, detectModel: () => detectModel() };
|
||||
```
|
||||
|
||||
## Publishing
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
npm publish
|
||||
```
|
||||
|
||||
Other Paperclip users can then install your adapter by package name from the UI or API.
|
||||
|
||||
## Security
|
||||
|
||||
- Treat agent output as untrusted — parse defensively, never `eval()` agent output
|
||||
- Inject secrets via environment variables, not in prompts
|
||||
- Configure network access controls if the runtime supports them
|
||||
- Always enforce timeout and grace period — don't let agents run forever
|
||||
- The UI parser module runs in a browser sandbox — it must have zero runtime imports and no side effects
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [UI Parser Contract](/adapters/adapter-ui-parser) — add a custom run-log parser so the UI renders your adapter's output correctly
|
||||
- [Creating an Adapter](/adapters/creating-an-adapter) — full walkthrough of adapter internals
|
||||
- [How Agents Work](/guides/agent-developer/how-agents-work) — understand the heartbeat lifecycle your adapter serves
|
||||
@@ -22,43 +22,67 @@ When a heartbeat fires, Paperclip:
|
||||
| [Codex Local](/adapters/codex-local) | `codex_local` | Runs OpenAI Codex CLI locally |
|
||||
| [Gemini Local](/adapters/gemini-local) | `gemini_local` | Runs Gemini CLI locally (experimental — adapter package exists, not yet in stable type enum) |
|
||||
| OpenCode Local | `opencode_local` | Runs OpenCode CLI locally (multi-provider `provider/model`) |
|
||||
| Hermes Local | `hermes_local` | Runs Hermes CLI locally |
|
||||
| Cursor | `cursor` | Runs Cursor in background mode |
|
||||
| Pi Local | `pi_local` | Runs an embedded Pi agent locally |
|
||||
| Hermes Local | `hermes_local` | Runs Hermes CLI locally (`hermes-paperclip-adapter`) |
|
||||
| OpenClaw Gateway | `openclaw_gateway` | Connects to an OpenClaw gateway endpoint |
|
||||
| [Process](/adapters/process) | `process` | Executes arbitrary shell commands |
|
||||
| [HTTP](/adapters/http) | `http` | Sends webhooks to external agents |
|
||||
|
||||
### External (plugin) adapters
|
||||
|
||||
These adapters ship as standalone npm packages and are installed via the plugin system:
|
||||
|
||||
| Adapter | Package | Type Key | Description |
|
||||
|---------|---------|----------|-------------|
|
||||
| Droid Local | `@henkey/droid-paperclip-adapter` | `droid_local` | Runs Factory Droid locally |
|
||||
|
||||
## External Adapters
|
||||
|
||||
You can build and distribute adapters as standalone packages — no changes to Paperclip's source code required. External adapters are loaded at startup via the plugin system.
|
||||
|
||||
```sh
|
||||
# Install from npm via API
|
||||
curl -X POST http://localhost:3102/api/adapters \
|
||||
-d '{"packageName": "my-paperclip-adapter"}'
|
||||
|
||||
# Or link from a local directory
|
||||
curl -X POST http://localhost:3102/api/adapters \
|
||||
-d '{"localPath": "/home/user/my-adapter"}'
|
||||
```
|
||||
|
||||
See [External Adapters](/adapters/external-adapters) for the full guide.
|
||||
|
||||
## Adapter Architecture
|
||||
|
||||
Each adapter is a package with three modules:
|
||||
Each adapter is a package with modules consumed by three registries:
|
||||
|
||||
```
|
||||
packages/adapters/<name>/
|
||||
my-adapter/
|
||||
src/
|
||||
index.ts # Shared metadata (type, label, models)
|
||||
server/
|
||||
execute.ts # Core execution logic
|
||||
parse.ts # Output parsing
|
||||
test.ts # Environment diagnostics
|
||||
ui/
|
||||
parse-stdout.ts # Stdout -> transcript entries for run viewer
|
||||
build-config.ts # Form values -> adapterConfig JSON
|
||||
ui-parser.ts # Self-contained UI transcript parser (for external adapters)
|
||||
cli/
|
||||
format-event.ts # Terminal output for `paperclipai run --watch`
|
||||
```
|
||||
|
||||
Three registries consume these modules:
|
||||
|
||||
| Registry | What it does |
|
||||
|----------|-------------|
|
||||
| **Server** | Executes agents, captures results |
|
||||
| **UI** | Renders run transcripts, provides config forms |
|
||||
| **CLI** | Formats terminal output for live watching |
|
||||
| Registry | What it does | Source |
|
||||
|----------|-------------|--------|
|
||||
| **Server** | Executes agents, captures results | `createServerAdapter()` from package root |
|
||||
| **UI** | Renders run transcripts, provides config forms | `ui-parser.js` (dynamic) or static import (built-in) |
|
||||
| **CLI** | Formats terminal output for live watching | Static import |
|
||||
|
||||
## Choosing an Adapter
|
||||
|
||||
- **Need a coding agent?** Use `claude_local`, `codex_local`, `opencode_local`, or `hermes_local`
|
||||
- **Need a coding agent?** Use `claude_local`, `codex_local`, `opencode_local`, `hermes_local`, or install `droid_local` as an external plugin
|
||||
- **Need to run a script or command?** Use `process`
|
||||
- **Need to call an external service?** Use `http`
|
||||
- **Need something custom?** [Create your own adapter](/adapters/creating-an-adapter)
|
||||
- **Need something custom?** [Create your own adapter](/adapters/creating-an-adapter) or [build an external adapter plugin](/adapters/external-adapters)
|
||||
|
||||
## UI Parser Contract
|
||||
|
||||
External adapters can ship a self-contained UI parser that tells the Paperclip web UI how to render their stdout. Without it, the UI uses a generic shell parser. See the [UI Parser Contract](/adapters/adapter-ui-parser) for details.
|
||||
|
||||
@@ -37,14 +37,18 @@ Built-in adapters:
|
||||
- `claude_local`: runs your local `claude` CLI
|
||||
- `codex_local`: runs your local `codex` CLI
|
||||
- `opencode_local`: runs your local `opencode` CLI
|
||||
- `hermes_local`: runs your local `hermes` CLI
|
||||
- `cursor`: runs Cursor in background mode
|
||||
- `pi_local`: runs an embedded Pi agent locally
|
||||
- `hermes_local`: runs your local `hermes` CLI (`hermes-paperclip-adapter`)
|
||||
- `openclaw_gateway`: connects to an OpenClaw gateway endpoint
|
||||
- `process`: generic shell command adapter
|
||||
- `http`: calls an external HTTP endpoint
|
||||
|
||||
For local CLI adapters (`claude_local`, `codex_local`, `opencode_local`, `hermes_local`), Paperclip assumes the CLI is already installed and authenticated on the host machine.
|
||||
External plugin adapters (install via the adapter manager or API):
|
||||
|
||||
- `droid_local`: runs your local Factory Droid CLI (`@henkey/droid-paperclip-adapter`)
|
||||
|
||||
For local CLI adapters (`claude_local`, `codex_local`, `opencode_local`, `hermes_local`, `droid_local`), Paperclip assumes the CLI is already installed and authenticated on the host machine.
|
||||
|
||||
## 3.2 Runtime behavior
|
||||
|
||||
@@ -173,7 +177,7 @@ Start with least privilege where possible, and avoid exposing secrets in broad r
|
||||
|
||||
## 10. Minimal setup checklist
|
||||
|
||||
1. Choose adapter (e.g. `claude_local`, `codex_local`, `opencode_local`, `hermes_local`, `cursor`, or `openclaw_gateway`).
|
||||
1. Choose adapter (e.g. `claude_local`, `codex_local`, `opencode_local`, `hermes_local`, `cursor`, or `openclaw_gateway`). External plugins like `droid_local` are also available via the adapter manager.
|
||||
2. Set `cwd` to the target workspace (for local adapters).
|
||||
3. Optionally add a prompt template (`promptTemplate`) or use the managed instructions bundle.
|
||||
4. Configure heartbeat policy (timer and/or assignment wakeups).
|
||||
|
||||
@@ -73,7 +73,7 @@ POST /api/issues/{issueId}/checkout
|
||||
Headers: X-Paperclip-Run-Id: {runId}
|
||||
{
|
||||
"agentId": "{yourAgentId}",
|
||||
"expectedStatuses": ["todo", "backlog", "blocked"]
|
||||
"expectedStatuses": ["todo", "backlog", "blocked", "in_review"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -98,6 +98,8 @@
|
||||
"adapters/codex-local",
|
||||
"adapters/process",
|
||||
"adapters/http",
|
||||
"adapters/external-adapters",
|
||||
"adapters/adapter-ui-parser",
|
||||
"adapters/creating-an-adapter"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ Each vote creates two local records:
|
||||
|
||||
All data lives in your local Paperclip database. Nothing leaves your machine unless you explicitly choose to share.
|
||||
|
||||
When a vote is marked for sharing, Paperclip also queues the trace bundle for background export through the Telemetry Backend. The app server never uploads raw feedback trace bundles directly to object storage.
|
||||
When a vote is marked for sharing, Paperclip immediately tries to upload the trace bundle through the Telemetry Backend. The upload is compressed in transit so full trace bundles stay under gateway size limits. If that immediate push fails, the trace is left in a retriable failed state for later flush attempts. The app server never uploads raw feedback trace bundles directly to object storage.
|
||||
|
||||
## Viewing your votes
|
||||
|
||||
@@ -148,6 +148,8 @@ Open any file in `traces/` to see:
|
||||
|
||||
Open `full-traces/<issue>-<trace>/bundle.json` to see the expanded export metadata, including capture notes, adapter type, integrity metadata, and the inventory of raw files written alongside it.
|
||||
|
||||
Each entry in `bundle.json.files[]` includes the actual captured file payload under `contents`, not just a pathname. For text artifacts this is stored as UTF-8 text; binary artifacts use base64 plus an `encoding` marker.
|
||||
|
||||
Built-in local adapters now export their native session artifacts more directly:
|
||||
|
||||
- `codex_local`: `adapter/codex/session.jsonl`
|
||||
@@ -168,19 +170,21 @@ Your preference is saved per-company. You can change it any time via the feedbac
|
||||
| Status | Meaning |
|
||||
|--------|---------|
|
||||
| `local_only` | Vote stored locally, not marked for sharing |
|
||||
| `pending` | Marked for sharing, waiting to be sent |
|
||||
| `pending` | Marked for sharing, saved locally, and waiting for the immediate upload attempt |
|
||||
| `sent` | Successfully transmitted |
|
||||
| `failed` | Transmission attempted but failed (will retry) |
|
||||
| `failed` | Transmission attempted but failed (for example the backend is unreachable or not configured); later flushes retry once a backend is available |
|
||||
|
||||
Your local database always retains the full vote and trace data regardless of sharing status.
|
||||
|
||||
## Remote sync
|
||||
|
||||
Votes you choose to share are queued as `pending` traces and flushed by the server's background worker to the Telemetry Backend. The Telemetry Backend validates the request, then persists the bundle into its configured object storage.
|
||||
Votes you choose to share are sent to the Telemetry Backend immediately from the vote request. The server also keeps a background flush worker so failed traces can retry later. The Telemetry Backend validates the request, then persists the bundle into its configured object storage.
|
||||
|
||||
- App server responsibility: build the bundle, POST it to Telemetry Backend, update trace status
|
||||
- Telemetry Backend responsibility: authenticate the request, validate payload shape, compress/store the bundle, return the final object key
|
||||
- Retry behavior: failed uploads move to `failed` with an error message in `failureReason`, and the worker retries them on later ticks
|
||||
- Default endpoint: when no feedback export backend URL is configured, Paperclip falls back to `https://telemetry.paperclip.ing`
|
||||
- Important nuance: the uploaded object is a snapshot of the full bundle at vote time. If you fetch a local bundle later and the underlying adapter session file has continued to grow, the local regenerated bundle may be larger than the already-uploaded snapshot for that same trace.
|
||||
|
||||
Exported objects use a deterministic key pattern so they are easy to inspect:
|
||||
|
||||
|
||||
@@ -31,14 +31,14 @@ Close linked issues if the approval resolves them, or comment on why they remain
|
||||
### Step 3: Get Assignments
|
||||
|
||||
```
|
||||
GET /api/companies/{companyId}/issues?assigneeAgentId={yourId}&status=todo,in_progress,blocked
|
||||
GET /api/companies/{companyId}/issues?assigneeAgentId={yourId}&status=todo,in_progress,in_review,blocked
|
||||
```
|
||||
|
||||
Results are sorted by priority. This is your inbox.
|
||||
|
||||
### Step 4: Pick Work
|
||||
|
||||
- Work on `in_progress` tasks first, then `todo`
|
||||
- Work on `in_progress` tasks first, then `in_review` when you were woken by a comment on it, then `todo`
|
||||
- Skip `blocked` unless you can unblock it
|
||||
- If `PAPERCLIP_TASK_ID` is set and assigned to you, prioritize it
|
||||
- If woken by a comment mention, read that comment thread first
|
||||
@@ -50,7 +50,7 @@ Before doing any work, you must checkout the task:
|
||||
```
|
||||
POST /api/issues/{issueId}/checkout
|
||||
Headers: X-Paperclip-Run-Id: {runId}
|
||||
{ "agentId": "{yourId}", "expectedStatuses": ["todo", "backlog", "blocked"] }
|
||||
{ "agentId": "{yourId}", "expectedStatuses": ["todo", "backlog", "blocked", "in_review"] }
|
||||
```
|
||||
|
||||
If already checked out by you, this succeeds. If another agent owns it: `409 Conflict` — stop and pick a different task. **Never retry a 409.**
|
||||
|
||||
@@ -11,7 +11,7 @@ Before doing any work on a task, checkout is required:
|
||||
|
||||
```
|
||||
POST /api/issues/{issueId}/checkout
|
||||
{ "agentId": "{yourId}", "expectedStatuses": ["todo", "backlog", "blocked"] }
|
||||
{ "agentId": "{yourId}", "expectedStatuses": ["todo", "backlog", "blocked", "in_review"] }
|
||||
```
|
||||
|
||||
This is an atomic operation. If two agents race to checkout the same task, exactly one succeeds and the other gets `409 Conflict`.
|
||||
@@ -82,8 +82,8 @@ This releases your ownership. Leave a comment explaining why.
|
||||
|
||||
```
|
||||
GET /api/agents/me
|
||||
GET /api/companies/company-1/issues?assigneeAgentId=agent-42&status=todo,in_progress,blocked
|
||||
# -> [{ id: "issue-101", status: "in_progress" }, { id: "issue-99", status: "todo" }]
|
||||
GET /api/companies/company-1/issues?assigneeAgentId=agent-42&status=todo,in_progress,in_review,blocked
|
||||
# -> [{ id: "issue-101", status: "in_progress" }, { id: "issue-100", status: "in_review" }, { id: "issue-99", status: "todo" }]
|
||||
|
||||
# Continue in_progress work
|
||||
GET /api/issues/issue-101
|
||||
@@ -96,7 +96,7 @@ PATCH /api/issues/issue-101
|
||||
|
||||
# Pick up next task
|
||||
POST /api/issues/issue-99/checkout
|
||||
{ "agentId": "agent-42", "expectedStatuses": ["todo"] }
|
||||
{ "agentId": "agent-42", "expectedStatuses": ["todo", "backlog", "blocked", "in_review"] }
|
||||
|
||||
# Partial progress
|
||||
PATCH /api/issues/issue-99
|
||||
|
||||
@@ -22,6 +22,9 @@ export type {
|
||||
AdapterModel,
|
||||
HireApprovedPayload,
|
||||
HireApprovedHookResult,
|
||||
ConfigFieldOption,
|
||||
ConfigFieldSchema,
|
||||
AdapterConfigSchema,
|
||||
ServerAdapterModule,
|
||||
QuotaWindow,
|
||||
ProviderQuotaResult,
|
||||
|
||||
@@ -68,6 +68,7 @@ export function redactTranscriptEntryPaths(entry: TranscriptEntry, opts?: HomePa
|
||||
case "stderr":
|
||||
case "system":
|
||||
case "stdout":
|
||||
case "diff":
|
||||
return { ...entry, text: redactHomePathUserSegments(entry.text, opts) };
|
||||
case "tool_call":
|
||||
return {
|
||||
|
||||
@@ -193,6 +193,174 @@ export function joinPromptSections(
|
||||
.join(separator);
|
||||
}
|
||||
|
||||
type PaperclipWakeIssue = {
|
||||
id: string | null;
|
||||
identifier: string | null;
|
||||
title: string | null;
|
||||
status: string | null;
|
||||
priority: string | null;
|
||||
};
|
||||
|
||||
type PaperclipWakeComment = {
|
||||
id: string | null;
|
||||
issueId: string | null;
|
||||
body: string;
|
||||
bodyTruncated: boolean;
|
||||
createdAt: string | null;
|
||||
authorType: string | null;
|
||||
authorId: string | null;
|
||||
};
|
||||
|
||||
type PaperclipWakePayload = {
|
||||
reason: string | null;
|
||||
issue: PaperclipWakeIssue | null;
|
||||
commentIds: string[];
|
||||
latestCommentId: string | null;
|
||||
comments: PaperclipWakeComment[];
|
||||
requestedCount: number;
|
||||
includedCount: number;
|
||||
missingCount: number;
|
||||
truncated: boolean;
|
||||
fallbackFetchNeeded: boolean;
|
||||
};
|
||||
|
||||
function normalizePaperclipWakeIssue(value: unknown): PaperclipWakeIssue | null {
|
||||
const issue = parseObject(value);
|
||||
const id = asString(issue.id, "").trim() || null;
|
||||
const identifier = asString(issue.identifier, "").trim() || null;
|
||||
const title = asString(issue.title, "").trim() || null;
|
||||
const status = asString(issue.status, "").trim() || null;
|
||||
const priority = asString(issue.priority, "").trim() || null;
|
||||
if (!id && !identifier && !title) return null;
|
||||
return {
|
||||
id,
|
||||
identifier,
|
||||
title,
|
||||
status,
|
||||
priority,
|
||||
};
|
||||
}
|
||||
|
||||
function normalizePaperclipWakeComment(value: unknown): PaperclipWakeComment | null {
|
||||
const comment = parseObject(value);
|
||||
const author = parseObject(comment.author);
|
||||
const body = asString(comment.body, "");
|
||||
if (!body.trim()) return null;
|
||||
return {
|
||||
id: asString(comment.id, "").trim() || null,
|
||||
issueId: asString(comment.issueId, "").trim() || null,
|
||||
body,
|
||||
bodyTruncated: asBoolean(comment.bodyTruncated, false),
|
||||
createdAt: asString(comment.createdAt, "").trim() || null,
|
||||
authorType: asString(author.type, "").trim() || null,
|
||||
authorId: asString(author.id, "").trim() || null,
|
||||
};
|
||||
}
|
||||
|
||||
export function normalizePaperclipWakePayload(value: unknown): PaperclipWakePayload | null {
|
||||
const payload = parseObject(value);
|
||||
const comments = Array.isArray(payload.comments)
|
||||
? payload.comments
|
||||
.map((entry) => normalizePaperclipWakeComment(entry))
|
||||
.filter((entry): entry is PaperclipWakeComment => Boolean(entry))
|
||||
: [];
|
||||
const commentWindow = parseObject(payload.commentWindow);
|
||||
const commentIds = Array.isArray(payload.commentIds)
|
||||
? payload.commentIds
|
||||
.filter((entry): entry is string => typeof entry === "string" && entry.trim().length > 0)
|
||||
.map((entry) => entry.trim())
|
||||
: [];
|
||||
|
||||
if (comments.length === 0 && commentIds.length === 0) return null;
|
||||
|
||||
return {
|
||||
reason: asString(payload.reason, "").trim() || null,
|
||||
issue: normalizePaperclipWakeIssue(payload.issue),
|
||||
commentIds,
|
||||
latestCommentId: asString(payload.latestCommentId, "").trim() || null,
|
||||
comments,
|
||||
requestedCount: asNumber(commentWindow.requestedCount, comments.length || commentIds.length),
|
||||
includedCount: asNumber(commentWindow.includedCount, comments.length),
|
||||
missingCount: asNumber(commentWindow.missingCount, 0),
|
||||
truncated: asBoolean(payload.truncated, false),
|
||||
fallbackFetchNeeded: asBoolean(payload.fallbackFetchNeeded, false),
|
||||
};
|
||||
}
|
||||
|
||||
export function stringifyPaperclipWakePayload(value: unknown): string | null {
|
||||
const normalized = normalizePaperclipWakePayload(value);
|
||||
if (!normalized) return null;
|
||||
return JSON.stringify(normalized);
|
||||
}
|
||||
|
||||
export function renderPaperclipWakePrompt(
|
||||
value: unknown,
|
||||
options: { resumedSession?: boolean } = {},
|
||||
): string {
|
||||
const normalized = normalizePaperclipWakePayload(value);
|
||||
if (!normalized) return "";
|
||||
const resumedSession = options.resumedSession === true;
|
||||
|
||||
const lines = resumedSession
|
||||
? [
|
||||
"## Paperclip Resume Delta",
|
||||
"",
|
||||
"You are resuming an existing Paperclip session.",
|
||||
"This heartbeat is scoped to the issue below. Do not switch to another issue until you have handled this wake.",
|
||||
"Focus on the new wake delta below and continue the current task without restating the full heartbeat boilerplate.",
|
||||
"Fetch the API thread only when `fallbackFetchNeeded` is true or you need broader history than this batch.",
|
||||
"",
|
||||
`- reason: ${normalized.reason ?? "unknown"}`,
|
||||
`- issue: ${normalized.issue?.identifier ?? normalized.issue?.id ?? "unknown"}${normalized.issue?.title ? ` ${normalized.issue.title}` : ""}`,
|
||||
`- pending comments: ${normalized.includedCount}/${normalized.requestedCount}`,
|
||||
`- latest comment id: ${normalized.latestCommentId ?? "unknown"}`,
|
||||
`- fallback fetch needed: ${normalized.fallbackFetchNeeded ? "yes" : "no"}`,
|
||||
]
|
||||
: [
|
||||
"## Paperclip Wake Payload",
|
||||
"",
|
||||
"Treat this wake payload as the highest-priority change for the current heartbeat.",
|
||||
"This heartbeat is scoped to the issue below. Do not switch to another issue until you have handled this wake.",
|
||||
"Before generic repo exploration or boilerplate heartbeat updates, acknowledge the latest comment and explain how it changes your next action.",
|
||||
"Use this inline wake data first before refetching the issue thread.",
|
||||
"Only fetch the API thread when `fallbackFetchNeeded` is true or you need broader history than this batch.",
|
||||
"",
|
||||
`- reason: ${normalized.reason ?? "unknown"}`,
|
||||
`- issue: ${normalized.issue?.identifier ?? normalized.issue?.id ?? "unknown"}${normalized.issue?.title ? ` ${normalized.issue.title}` : ""}`,
|
||||
`- pending comments: ${normalized.includedCount}/${normalized.requestedCount}`,
|
||||
`- latest comment id: ${normalized.latestCommentId ?? "unknown"}`,
|
||||
`- fallback fetch needed: ${normalized.fallbackFetchNeeded ? "yes" : "no"}`,
|
||||
];
|
||||
|
||||
if (normalized.issue?.status) {
|
||||
lines.push(`- issue status: ${normalized.issue.status}`);
|
||||
}
|
||||
if (normalized.issue?.priority) {
|
||||
lines.push(`- issue priority: ${normalized.issue.priority}`);
|
||||
}
|
||||
if (normalized.missingCount > 0) {
|
||||
lines.push(`- omitted comments: ${normalized.missingCount}`);
|
||||
}
|
||||
|
||||
lines.push("", "New comments in order:");
|
||||
|
||||
for (const [index, comment] of normalized.comments.entries()) {
|
||||
const authorLabel = comment.authorId
|
||||
? `${comment.authorType ?? "unknown"} ${comment.authorId}`
|
||||
: comment.authorType ?? "unknown";
|
||||
lines.push(
|
||||
`${index + 1}. comment ${comment.id ?? "unknown"} at ${comment.createdAt ?? "unknown"} by ${authorLabel}`,
|
||||
comment.body,
|
||||
);
|
||||
if (comment.bodyTruncated) {
|
||||
lines.push("[comment body truncated]");
|
||||
}
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
return lines.join("\n").trim();
|
||||
}
|
||||
|
||||
export function redactEnvForLogs(env: Record<string, string>): Record<string, string> {
|
||||
const redacted: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
|
||||
@@ -41,6 +41,7 @@ export const LEGACY_SESSIONED_ADAPTER_TYPES = new Set([
|
||||
"codex_local",
|
||||
"cursor",
|
||||
"gemini_local",
|
||||
"hermes_local",
|
||||
"opencode_local",
|
||||
"pi_local",
|
||||
]);
|
||||
@@ -76,6 +77,11 @@ export const ADAPTER_SESSION_MANAGEMENT: Record<string, AdapterSessionManagement
|
||||
nativeContextManagement: "unknown",
|
||||
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
|
||||
},
|
||||
hermes_local: {
|
||||
supportsSessionResume: true,
|
||||
nativeContextManagement: "confirmed",
|
||||
defaultSessionCompaction: ADAPTER_MANAGED_SESSION_POLICY,
|
||||
},
|
||||
};
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
|
||||
@@ -261,6 +261,34 @@ export interface ProviderQuotaResult {
|
||||
windows: QuotaWindow[];
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Adapter config schema — declarative UI config for external adapters
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface ConfigFieldOption {
|
||||
label: string;
|
||||
value: string;
|
||||
/** Optional group key for categorizing options (e.g. provider name) */
|
||||
group?: string;
|
||||
}
|
||||
|
||||
export interface ConfigFieldSchema {
|
||||
key: string;
|
||||
label: string;
|
||||
type: "text" | "select" | "toggle" | "number" | "textarea" | "combobox";
|
||||
options?: ConfigFieldOption[];
|
||||
default?: unknown;
|
||||
hint?: string;
|
||||
required?: boolean;
|
||||
group?: string;
|
||||
/** Optional metadata — not rendered, but available to custom UI logic */
|
||||
meta?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface AdapterConfigSchema {
|
||||
fields: ConfigFieldSchema[];
|
||||
}
|
||||
|
||||
export interface ServerAdapterModule {
|
||||
type: string;
|
||||
execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult>;
|
||||
@@ -292,7 +320,14 @@ export interface ServerAdapterModule {
|
||||
* Returns the detected model/provider and the config source, or null if
|
||||
* the adapter does not support detection or no config is found.
|
||||
*/
|
||||
detectModel?: () => Promise<{ model: string; provider: string; source: string } | null>;
|
||||
detectModel?: () => Promise<{ model: string; provider: string; source: string; candidates?: string[] } | null>;
|
||||
/**
|
||||
* Optional: return a declarative config schema so the UI can render
|
||||
* adapter-specific form fields without shipping React components.
|
||||
* Dynamic options (e.g. scanning a profiles directory) should be
|
||||
* resolved inside this method — the caller receives a fully hydrated schema.
|
||||
*/
|
||||
getConfigSchema?: () => Promise<AdapterConfigSchema> | AdapterConfigSchema;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -309,7 +344,8 @@ export type TranscriptEntry =
|
||||
| { kind: "result"; ts: string; text: string; inputTokens: number; outputTokens: number; cachedTokens: number; costUsd: number; subtype: string; isError: boolean; errors: string[] }
|
||||
| { kind: "stderr"; ts: string; text: string }
|
||||
| { kind: "system"; ts: string; text: string }
|
||||
| { kind: "stdout"; ts: string; text: string };
|
||||
| { kind: "stdout"; ts: string; text: string }
|
||||
| { kind: "diff"; ts: string; changeType: "add" | "remove" | "context" | "hunk" | "file_header" | "truncation"; text: string };
|
||||
|
||||
export type StdoutLineParser = (line: string, ts: string) => TranscriptEntry[];
|
||||
|
||||
@@ -353,4 +389,6 @@ export interface CreateConfigValues {
|
||||
maxTurnsPerRun: number;
|
||||
heartbeatEnabled: boolean;
|
||||
intervalSec: number;
|
||||
/** Arbitrary key-value pairs populated by schema-driven config fields. */
|
||||
adapterSchemaValues?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ Core fields:
|
||||
- chrome (boolean, optional): pass --chrome when running Claude
|
||||
- promptTemplate (string, optional): run prompt template
|
||||
- maxTurnsPerRun (number, optional): max turns for one run
|
||||
- dangerouslySkipPermissions (boolean, optional): pass --dangerously-skip-permissions to claude
|
||||
- dangerouslySkipPermissions (boolean, optional, default true): pass --dangerously-skip-permissions to claude; defaults to true because Paperclip runs Claude in headless --print mode where interactive permission prompts cannot be answered
|
||||
- command (string, optional): defaults to "claude"
|
||||
- extraArgs (string[], optional): additional CLI args
|
||||
- env (object, optional): KEY=VALUE environment variables
|
||||
|
||||
@@ -20,6 +20,8 @@ import {
|
||||
ensurePathInEnv,
|
||||
resolveCommandForLogs,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
@@ -100,8 +102,16 @@ function hasNonEmptyEnvValue(env: Record<string, string>, key: string): boolean
|
||||
return typeof raw === "string" && raw.trim().length > 0;
|
||||
}
|
||||
|
||||
function resolveClaudeBillingType(env: Record<string, string>): "api" | "subscription" {
|
||||
// Claude uses API-key auth when ANTHROPIC_API_KEY is present; otherwise rely on local login/session auth.
|
||||
function isBedrockAuth(env: Record<string, string>): boolean {
|
||||
return (
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
hasNonEmptyEnvValue(env, "ANTHROPIC_BEDROCK_BASE_URL")
|
||||
);
|
||||
}
|
||||
|
||||
function resolveClaudeBillingType(env: Record<string, string>): "api" | "subscription" | "metered_api" {
|
||||
if (isBedrockAuth(env)) return "metered_api";
|
||||
return hasNonEmptyEnvValue(env, "ANTHROPIC_API_KEY") ? "api" : "subscription";
|
||||
}
|
||||
|
||||
@@ -170,6 +180,7 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
|
||||
if (wakeTaskId) {
|
||||
env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
@@ -189,6 +200,9 @@ async function buildClaudeRuntimeConfig(input: ClaudeExecutionInput): Promise<Cl
|
||||
if (linkedIssueIds.length > 0) {
|
||||
env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
}
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
@@ -317,7 +331,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const effort = asString(config.effort, "");
|
||||
const chrome = asBoolean(config.chrome, false);
|
||||
const maxTurns = asNumber(config.maxTurnsPerRun, 0);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, false);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, true);
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
const instructionsFileDir = instructionsFilePath ? `${path.dirname(instructionsFilePath)}/` : "";
|
||||
const commandNotes = instructionsFilePath
|
||||
@@ -398,20 +412,24 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: Boolean(sessionId) });
|
||||
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const prompt = joinPromptSections([
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
renderedPrompt,
|
||||
]);
|
||||
const promptMetrics = {
|
||||
promptChars: prompt.length,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
};
|
||||
@@ -421,7 +439,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (resumeSessionId) args.push("--resume", resumeSessionId);
|
||||
if (dangerouslySkipPermissions) args.push("--dangerously-skip-permissions");
|
||||
if (chrome) args.push("--chrome");
|
||||
if (model) args.push("--model", model);
|
||||
// Skip --model for Bedrock: Anthropic-style model IDs (e.g. "claude-opus-4-6") are not
|
||||
// valid Bedrock model identifiers. Let the CLI use its own configured model instead.
|
||||
if (model && !isBedrockAuth(effectiveEnv)) args.push("--model", model);
|
||||
if (effort) args.push("--effort", effort);
|
||||
if (maxTurns > 0) args.push("--max-turns", String(maxTurns));
|
||||
if (effectiveInstructionsFilePath) {
|
||||
@@ -568,7 +588,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
sessionParams: resolvedSessionParams,
|
||||
sessionDisplayId: resolvedSessionId,
|
||||
provider: "anthropic",
|
||||
biller: "anthropic",
|
||||
biller: isBedrockAuth(effectiveEnv) ? "aws_bedrock" : "anthropic",
|
||||
model: parsedStream.model || asString(parsed.model, model),
|
||||
billingType,
|
||||
costUsd: parsedStream.costUsd ?? asNumber(parsed.total_cost_usd, 0),
|
||||
|
||||
@@ -477,6 +477,14 @@ function formatProviderError(source: string, error: unknown): string {
|
||||
}
|
||||
|
||||
export async function getQuotaWindows(): Promise<ProviderQuotaResult> {
|
||||
if (
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
hasNonEmptyProcessEnv("ANTHROPIC_BEDROCK_BASE_URL")
|
||||
) {
|
||||
return { provider: "anthropic", source: "bedrock", ok: true, windows: [] };
|
||||
}
|
||||
|
||||
const authStatus = await readClaudeAuthStatus();
|
||||
const authDescription = describeClaudeSubscriptionAuth(authStatus);
|
||||
const token = await readClaudeToken();
|
||||
|
||||
@@ -95,9 +95,31 @@ export async function testEnvironment(
|
||||
});
|
||||
}
|
||||
|
||||
const hasBedrock =
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
isNonEmpty(env.ANTHROPIC_BEDROCK_BASE_URL) ||
|
||||
isNonEmpty(process.env.ANTHROPIC_BEDROCK_BASE_URL);
|
||||
|
||||
const configApiKey = env.ANTHROPIC_API_KEY;
|
||||
const hostApiKey = process.env.ANTHROPIC_API_KEY;
|
||||
if (isNonEmpty(configApiKey) || isNonEmpty(hostApiKey)) {
|
||||
if (hasBedrock) {
|
||||
const source =
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "1" ||
|
||||
env.CLAUDE_CODE_USE_BEDROCK === "true" ||
|
||||
isNonEmpty(env.ANTHROPIC_BEDROCK_BASE_URL)
|
||||
? "adapter config env"
|
||||
: "server environment";
|
||||
checks.push({
|
||||
code: "claude_bedrock_auth",
|
||||
level: "info",
|
||||
message: "AWS Bedrock auth detected. Claude will use Bedrock for inference.",
|
||||
detail: `Detected in ${source}.`,
|
||||
hint: "Ensure AWS credentials (AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY or AWS_PROFILE) and AWS_REGION are configured.",
|
||||
});
|
||||
} else if (isNonEmpty(configApiKey) || isNonEmpty(hostApiKey)) {
|
||||
const source = isNonEmpty(configApiKey) ? "adapter config env" : "server environment";
|
||||
checks.push({
|
||||
code: "claude_anthropic_api_key_overrides_subscription",
|
||||
@@ -131,7 +153,7 @@ export async function testEnvironment(
|
||||
const effort = asString(config.effort, "").trim();
|
||||
const chrome = asBoolean(config.chrome, false);
|
||||
const maxTurns = asNumber(config.maxTurnsPerRun, 0);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, false);
|
||||
const dangerouslySkipPermissions = asBoolean(config.dangerouslySkipPermissions, true);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
@@ -141,7 +163,10 @@ export async function testEnvironment(
|
||||
const args = ["--print", "-", "--output-format", "stream-json", "--verbose"];
|
||||
if (dangerouslySkipPermissions) args.push("--dangerously-skip-permissions");
|
||||
if (chrome) args.push("--chrome");
|
||||
if (model) args.push("--model", model);
|
||||
// Skip --model for Bedrock: Anthropic-style model IDs (e.g. "claude-opus-4-6") are not
|
||||
// valid Bedrock model identifiers. Let the CLI use whatever model is configured in its
|
||||
// own settings when Bedrock auth is active.
|
||||
if (model && !hasBedrock) args.push("--model", model);
|
||||
if (effort) args.push("--effort", effort);
|
||||
if (maxTurns > 0) args.push("--max-turns", String(maxTurns));
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
|
||||
@@ -18,6 +18,8 @@ import {
|
||||
resolveCommandForLogs,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
joinPromptSections,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
@@ -313,6 +315,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
if (wakeTaskId) {
|
||||
env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
}
|
||||
@@ -331,6 +334,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (linkedIssueIds.length > 0) {
|
||||
env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
}
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
@@ -434,11 +440,36 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
}
|
||||
const repoAgentsNote =
|
||||
"Codex exec automatically applies repo-scoped AGENTS.md instructions from the current workspace; Paperclip does not currently suppress that discovery.";
|
||||
const bootstrapPromptTemplate = asString(config.bootstrapPromptTemplate, "");
|
||||
const templateData = {
|
||||
agentId: agent.id,
|
||||
companyId: agent.companyId,
|
||||
runId,
|
||||
company: { id: agent.companyId },
|
||||
agent,
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: Boolean(sessionId) });
|
||||
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
|
||||
const promptInstructionsPrefix = shouldUseResumeDeltaPrompt ? "" : instructionsPrefix;
|
||||
instructionsChars = promptInstructionsPrefix.length;
|
||||
const commandNotes = (() => {
|
||||
if (!instructionsFilePath) {
|
||||
return [repoAgentsNote];
|
||||
}
|
||||
if (instructionsPrefix.length > 0) {
|
||||
if (shouldUseResumeDeltaPrompt) {
|
||||
return [
|
||||
`Loaded agent instructions from ${instructionsFilePath}`,
|
||||
"Skipped stdin instruction reinjection because an existing Codex session is being resumed with a wake delta.",
|
||||
repoAgentsNote,
|
||||
];
|
||||
}
|
||||
return [
|
||||
`Loaded agent instructions from ${instructionsFilePath}`,
|
||||
`Prepended instructions + path directive to stdin prompt (relative references from ${instructionsDir}).`,
|
||||
@@ -450,25 +481,12 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
repoAgentsNote,
|
||||
];
|
||||
})();
|
||||
const bootstrapPromptTemplate = asString(config.bootstrapPromptTemplate, "");
|
||||
const templateData = {
|
||||
agentId: agent.id,
|
||||
companyId: agent.companyId,
|
||||
runId,
|
||||
company: { id: agent.companyId },
|
||||
agent,
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const prompt = joinPromptSections([
|
||||
instructionsPrefix,
|
||||
promptInstructionsPrefix,
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
renderedPrompt,
|
||||
]);
|
||||
@@ -476,6 +494,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
promptChars: prompt.length,
|
||||
instructionsChars,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
};
|
||||
|
||||
50
packages/adapters/codex-local/src/server/parse.test.ts
Normal file
50
packages/adapters/codex-local/src/server/parse.test.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isCodexUnknownSessionError, parseCodexJsonl } from "./parse.js";
|
||||
|
||||
describe("parseCodexJsonl", () => {
|
||||
it("captures session id, assistant summary, usage, and error message", () => {
|
||||
const stdout = [
|
||||
JSON.stringify({ type: "thread.started", thread_id: "thread_123" }),
|
||||
JSON.stringify({
|
||||
type: "item.completed",
|
||||
item: { type: "agent_message", text: "Recovered response" },
|
||||
}),
|
||||
JSON.stringify({
|
||||
type: "turn.completed",
|
||||
usage: { input_tokens: 10, cached_input_tokens: 2, output_tokens: 4 },
|
||||
}),
|
||||
JSON.stringify({ type: "turn.failed", error: { message: "resume failed" } }),
|
||||
].join("\n");
|
||||
|
||||
expect(parseCodexJsonl(stdout)).toEqual({
|
||||
sessionId: "thread_123",
|
||||
summary: "Recovered response",
|
||||
usage: {
|
||||
inputTokens: 10,
|
||||
cachedInputTokens: 2,
|
||||
outputTokens: 4,
|
||||
},
|
||||
errorMessage: "resume failed",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("isCodexUnknownSessionError", () => {
|
||||
it("detects the current missing-rollout thread error", () => {
|
||||
expect(
|
||||
isCodexUnknownSessionError(
|
||||
"",
|
||||
"Error: thread/resume: thread/resume failed: no rollout found for thread id d448e715-7607-4bcc-91fc-7a3c0c5a9632",
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("still detects existing stale-session wordings", () => {
|
||||
expect(isCodexUnknownSessionError("unknown thread id", "")).toBe(true);
|
||||
expect(isCodexUnknownSessionError("", "state db missing rollout path for thread abc")).toBe(true);
|
||||
});
|
||||
|
||||
it("does not classify unrelated Codex failures as stale sessions", () => {
|
||||
expect(isCodexUnknownSessionError("", "model overloaded")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -67,7 +67,7 @@ export function isCodexUnknownSessionError(stdout: string, stderr: string): bool
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
return /unknown (session|thread)|session .* not found|thread .* not found|conversation .* not found|missing rollout path for thread|state db missing rollout path/i.test(
|
||||
return /unknown (session|thread)|session .* not found|thread .* not found|conversation .* not found|missing rollout path for thread|state db missing rollout path|no rollout found for thread id/i.test(
|
||||
haystack,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ import {
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
removeMaintainerOnlySkillSymlinks,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
joinPromptSections,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
@@ -219,6 +221,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
if (wakeTaskId) {
|
||||
env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
}
|
||||
@@ -237,6 +240,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (linkedIssueIds.length > 0) {
|
||||
env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
}
|
||||
if (wakePayloadJson) {
|
||||
env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
@@ -352,16 +358,19 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: Boolean(sessionId) });
|
||||
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const paperclipEnvNote = renderPaperclipEnvNote(env);
|
||||
const prompt = joinPromptSections([
|
||||
instructionsPrefix,
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
paperclipEnvNote,
|
||||
renderedPrompt,
|
||||
@@ -370,6 +379,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
promptChars: prompt.length,
|
||||
instructionsChars,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
runtimeNoteChars: paperclipEnvNote.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
|
||||
@@ -22,6 +22,8 @@ import {
|
||||
removeMaintainerOnlySkillSymlinks,
|
||||
parseObject,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { DEFAULT_GEMINI_LOCAL_MODEL } from "../index.js";
|
||||
@@ -193,12 +195,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
if (wakeTaskId) env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
if (wakeReason) env.PAPERCLIP_WAKE_REASON = wakeReason;
|
||||
if (wakeCommentId) env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
|
||||
if (approvalId) env.PAPERCLIP_APPROVAL_ID = approvalId;
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (effectiveWorkspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
@@ -295,17 +299,20 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: Boolean(sessionId) });
|
||||
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const paperclipEnvNote = renderPaperclipEnvNote(env);
|
||||
const apiAccessNote = renderApiAccessNote(env);
|
||||
const prompt = joinPromptSections([
|
||||
instructionsPrefix,
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
paperclipEnvNote,
|
||||
apiAccessNote,
|
||||
@@ -315,6 +322,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
promptChars: prompt.length,
|
||||
instructionsChars: instructionsPrefix.length,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
runtimeNoteChars: paperclipEnvNote.length + apiAccessNote.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
|
||||
@@ -36,6 +36,7 @@ Request behavior fields:
|
||||
- waitTimeoutMs (number, optional): agent.wait timeout override (default timeoutSec * 1000)
|
||||
- autoPairOnFirstConnect (boolean, optional): on first "pairing required", attempt device.pair.list/device.pair.approve via shared auth, then retry once (default true)
|
||||
- paperclipApiUrl (string, optional): absolute Paperclip base URL advertised in wake text
|
||||
- claimedApiKeyPath (string, optional): path to the claimed API key JSON file read by the agent at wake time (default ~/.openclaw/workspace/paperclip-claimed-api-key.json)
|
||||
|
||||
Session routing fields:
|
||||
- sessionKeyStrategy (string, optional): issue (default), fixed, or run
|
||||
|
||||
@@ -3,7 +3,14 @@ import type {
|
||||
AdapterExecutionResult,
|
||||
AdapterRuntimeServiceReport,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import { asNumber, asString, buildPaperclipEnv, parseObject } from "@paperclipai/adapter-utils/server-utils";
|
||||
import {
|
||||
asNumber,
|
||||
asString,
|
||||
buildPaperclipEnv,
|
||||
parseObject,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import crypto, { randomUUID } from "node:crypto";
|
||||
import { WebSocket } from "ws";
|
||||
|
||||
@@ -313,6 +320,12 @@ function resolvePaperclipApiUrlOverride(value: unknown): string | null {
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_CLAIMED_API_KEY_PATH = "~/.openclaw/workspace/paperclip-claimed-api-key.json";
|
||||
|
||||
function resolveClaimedApiKeyPath(value: unknown): string {
|
||||
return nonEmpty(value) ?? DEFAULT_CLAIMED_API_KEY_PATH;
|
||||
}
|
||||
|
||||
function buildPaperclipEnvForWake(ctx: AdapterExecutionContext, wakePayload: WakePayload): Record<string, string> {
|
||||
const paperclipApiUrlOverride = resolvePaperclipApiUrlOverride(ctx.config.paperclipApiUrl);
|
||||
const paperclipEnv: Record<string, string> = {
|
||||
@@ -335,7 +348,11 @@ function buildPaperclipEnvForWake(ctx: AdapterExecutionContext, wakePayload: Wak
|
||||
return paperclipEnv;
|
||||
}
|
||||
|
||||
function buildWakeText(payload: WakePayload, paperclipEnv: Record<string, string>): string {
|
||||
function buildWakeText(
|
||||
payload: WakePayload,
|
||||
paperclipEnv: Record<string, string>,
|
||||
structuredWakePrompt: string,
|
||||
): string {
|
||||
const claimedApiKeyPath = "~/.openclaw/workspace/paperclip-claimed-api-key.json";
|
||||
const orderedKeys = [
|
||||
"PAPERCLIP_RUN_ID",
|
||||
@@ -390,20 +407,26 @@ function buildWakeText(payload: WakePayload, paperclipEnv: Record<string, string
|
||||
"1) GET /api/agents/me",
|
||||
`2) Determine issueId: PAPERCLIP_TASK_ID if present, otherwise issue_id (${issueIdHint}).`,
|
||||
"3) If issueId exists:",
|
||||
" - POST /api/issues/{issueId}/checkout with {\"agentId\":\"$PAPERCLIP_AGENT_ID\",\"expectedStatuses\":[\"todo\",\"backlog\",\"blocked\"]}",
|
||||
" - POST /api/issues/{issueId}/checkout with {\"agentId\":\"$PAPERCLIP_AGENT_ID\",\"expectedStatuses\":[\"todo\",\"backlog\",\"blocked\",\"in_review\"]}",
|
||||
" - GET /api/issues/{issueId}",
|
||||
" - GET /api/issues/{issueId}/comments",
|
||||
" - Execute the issue instructions exactly.",
|
||||
" - If instructions require a comment, POST /api/issues/{issueId}/comments with {\"body\":\"...\"}.",
|
||||
" - PATCH /api/issues/{issueId} with {\"status\":\"done\",\"comment\":\"what changed and why\"}.",
|
||||
"4) If issueId does not exist:",
|
||||
" - GET /api/companies/$PAPERCLIP_COMPANY_ID/issues?assigneeAgentId=$PAPERCLIP_AGENT_ID&status=todo,in_progress,blocked",
|
||||
" - Pick in_progress first, then todo, then blocked, then execute step 3.",
|
||||
" - GET /api/companies/$PAPERCLIP_COMPANY_ID/issues?assigneeAgentId=$PAPERCLIP_AGENT_ID&status=todo,in_progress,in_review,blocked",
|
||||
" - Pick in_progress first, then in_review when you were woken by a comment, then todo, then blocked, then execute step 3.",
|
||||
"",
|
||||
"Useful endpoints for issue work:",
|
||||
"- POST /api/issues/{issueId}/comments",
|
||||
"- PATCH /api/issues/{issueId}",
|
||||
"- POST /api/companies/{companyId}/issues (when asked to create a new issue)",
|
||||
...(structuredWakePrompt
|
||||
? [
|
||||
"",
|
||||
structuredWakePrompt,
|
||||
]
|
||||
: []),
|
||||
"",
|
||||
"Complete the workflow in this run.",
|
||||
];
|
||||
@@ -415,6 +438,17 @@ function appendWakeText(baseText: string, wakeText: string): string {
|
||||
return trimmedBase.length > 0 ? `${trimmedBase}\n\n${wakeText}` : wakeText;
|
||||
}
|
||||
|
||||
function joinWakePayloadSections(structuredWakePrompt: string, structuredWakeJson: string): string {
|
||||
const sections = [
|
||||
structuredWakePrompt.trim(),
|
||||
"Structured wake payload JSON:",
|
||||
"```json",
|
||||
structuredWakeJson,
|
||||
"```",
|
||||
].filter((entry) => entry.trim().length > 0);
|
||||
return sections.join("\n");
|
||||
}
|
||||
|
||||
function buildStandardPaperclipPayload(
|
||||
ctx: AdapterExecutionContext,
|
||||
wakePayload: WakePayload,
|
||||
@@ -447,6 +481,10 @@ function buildStandardPaperclipPayload(
|
||||
approvalStatus: wakePayload.approvalStatus,
|
||||
apiUrl: paperclipEnv.PAPERCLIP_API_URL ?? null,
|
||||
};
|
||||
const structuredWake = parseObject(ctx.context.paperclipWake);
|
||||
if (Object.keys(structuredWake).length > 0) {
|
||||
standardPaperclip.wake = structuredWake;
|
||||
}
|
||||
|
||||
if (workspace) {
|
||||
standardPaperclip.workspace = workspace;
|
||||
@@ -1053,7 +1091,15 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
|
||||
const wakePayload = buildWakePayload(ctx);
|
||||
const paperclipEnv = buildPaperclipEnvForWake(ctx, wakePayload);
|
||||
const wakeText = buildWakeText(wakePayload, paperclipEnv);
|
||||
const structuredWakePrompt = renderPaperclipWakePrompt(ctx.context.paperclipWake);
|
||||
const structuredWakeJson = stringifyPaperclipWakePayload(ctx.context.paperclipWake);
|
||||
const wakeText = buildWakeText(
|
||||
wakePayload,
|
||||
paperclipEnv,
|
||||
structuredWakeJson
|
||||
? joinWakePayloadSections(structuredWakePrompt, structuredWakeJson)
|
||||
: structuredWakePrompt,
|
||||
);
|
||||
|
||||
const sessionKeyStrategy = normalizeSessionKeyStrategy(ctx.config.sessionKeyStrategy);
|
||||
const configuredSessionKey = nonEmpty(ctx.config.sessionKey);
|
||||
@@ -1075,6 +1121,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
idempotencyKey: ctx.runId,
|
||||
};
|
||||
delete agentParams.text;
|
||||
agentParams.paperclip = paperclipPayload;
|
||||
|
||||
const configuredAgentId = nonEmpty(ctx.config.agentId);
|
||||
if (configuredAgentId && !nonEmpty(agentParams.agentId)) {
|
||||
|
||||
@@ -17,6 +17,8 @@ import {
|
||||
ensurePathInEnv,
|
||||
resolveCommandForLogs,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
runChildProcess,
|
||||
readPaperclipRuntimeSkillEntries,
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
@@ -154,12 +156,14 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
if (wakeTaskId) env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
if (wakeReason) env.PAPERCLIP_WAKE_REASON = wakeReason;
|
||||
if (wakeCommentId) env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
|
||||
if (approvalId) env.PAPERCLIP_APPROVAL_ID = approvalId;
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (effectiveWorkspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
@@ -222,7 +226,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
`[paperclip] OpenCode session "${runtimeSessionId}" was saved for cwd "${runtimeSessionCwd}" and will not be resumed in "${cwd}".\n`,
|
||||
);
|
||||
}
|
||||
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
const resolvedInstructionsFilePath = instructionsFilePath
|
||||
? path.resolve(cwd, instructionsFilePath)
|
||||
@@ -271,15 +274,18 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
};
|
||||
const renderedPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!sessionId && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: Boolean(sessionId) });
|
||||
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
|
||||
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const prompt = joinPromptSections([
|
||||
instructionsPrefix,
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
renderedPrompt,
|
||||
]);
|
||||
@@ -287,6 +293,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
promptChars: prompt.length,
|
||||
instructionsChars: instructionsPrefix.length,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
heartbeatPromptChars: renderedPrompt.length,
|
||||
};
|
||||
|
||||
@@ -20,6 +20,8 @@ import {
|
||||
resolvePaperclipDesiredSkillNames,
|
||||
removeMaintainerOnlySkillSymlinks,
|
||||
renderTemplate,
|
||||
renderPaperclipWakePrompt,
|
||||
stringifyPaperclipWakePayload,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { isPiUnknownSessionError, parsePiJsonl } from "./parse.js";
|
||||
@@ -177,6 +179,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
const wakePayloadJson = stringifyPaperclipWakePayload(context.paperclipWake);
|
||||
|
||||
if (wakeTaskId) env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
if (wakeReason) env.PAPERCLIP_WAKE_REASON = wakeReason;
|
||||
@@ -184,6 +187,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
if (approvalId) env.PAPERCLIP_APPROVAL_ID = approvalId;
|
||||
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
if (wakePayloadJson) env.PAPERCLIP_WAKE_PAYLOAD_JSON = wakePayloadJson;
|
||||
if (workspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = workspaceCwd;
|
||||
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
@@ -298,14 +302,17 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
context,
|
||||
};
|
||||
const renderedSystemPromptExtension = renderTemplate(systemPromptExtension, templateData);
|
||||
const renderedHeartbeatPrompt = renderTemplate(promptTemplate, templateData);
|
||||
const renderedBootstrapPrompt =
|
||||
!canResumeSession && bootstrapPromptTemplate.trim().length > 0
|
||||
? renderTemplate(bootstrapPromptTemplate, templateData).trim()
|
||||
: "";
|
||||
const wakePrompt = renderPaperclipWakePrompt(context.paperclipWake, { resumedSession: canResumeSession });
|
||||
const shouldUseResumeDeltaPrompt = canResumeSession && wakePrompt.length > 0;
|
||||
const renderedHeartbeatPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
|
||||
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
|
||||
const userPrompt = joinPromptSections([
|
||||
renderedBootstrapPrompt,
|
||||
wakePrompt,
|
||||
sessionHandoffNote,
|
||||
renderedHeartbeatPrompt,
|
||||
]);
|
||||
@@ -313,6 +320,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
systemPromptChars: renderedSystemPromptExtension.length,
|
||||
promptChars: userPrompt.length,
|
||||
bootstrapPromptChars: renderedBootstrapPrompt.length,
|
||||
wakePromptChars: wakePrompt.length,
|
||||
sessionHandoffChars: sessionHandoffNote.length,
|
||||
heartbeatPromptChars: renderedHeartbeatPrompt.length,
|
||||
};
|
||||
@@ -443,13 +451,15 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
|
||||
|
||||
const stderrLine = firstNonEmptyLine(attempt.proc.stderr);
|
||||
const rawExitCode = attempt.proc.exitCode;
|
||||
const fallbackErrorMessage = stderrLine || `Pi exited with code ${rawExitCode ?? -1}`;
|
||||
const parsedError = attempt.parsed.errors.find((error) => error.trim().length > 0) ?? "";
|
||||
const effectiveExitCode = (rawExitCode ?? 0) === 0 && parsedError ? 1 : rawExitCode;
|
||||
const fallbackErrorMessage = parsedError || stderrLine || `Pi exited with code ${rawExitCode ?? -1}`;
|
||||
|
||||
return {
|
||||
exitCode: rawExitCode,
|
||||
exitCode: effectiveExitCode,
|
||||
signal: attempt.proc.signal,
|
||||
timedOut: false,
|
||||
errorMessage: (rawExitCode ?? 0) === 0 ? null : fallbackErrorMessage,
|
||||
errorMessage: (effectiveExitCode ?? 0) === 0 ? null : fallbackErrorMessage,
|
||||
usage: {
|
||||
inputTokens: attempt.parsed.usage.inputTokens,
|
||||
outputTokens: attempt.parsed.usage.outputTokens,
|
||||
|
||||
@@ -209,6 +209,57 @@ describe("parsePiJsonl", () => {
|
||||
expect(parsed.usage.cachedInputTokens).toBe(25);
|
||||
expect(parsed.usage.costUsd).toBe(0.003);
|
||||
});
|
||||
|
||||
it("surfaces failed auto-retry exhaustion as an error", () => {
|
||||
const stdout = [
|
||||
JSON.stringify({
|
||||
type: "auto_retry_end",
|
||||
success: false,
|
||||
attempt: 3,
|
||||
finalError: "Cloud Code Assist API error (429): RESOURCE_EXHAUSTED",
|
||||
}),
|
||||
].join("\n");
|
||||
|
||||
const parsed = parsePiJsonl(stdout);
|
||||
expect(parsed.errors).toEqual(["Cloud Code Assist API error (429): RESOURCE_EXHAUSTED"]);
|
||||
});
|
||||
|
||||
it("does not treat successful auto-retry as an error", () => {
|
||||
const stdout = [
|
||||
JSON.stringify({
|
||||
type: "auto_retry_end",
|
||||
success: true,
|
||||
attempt: 2,
|
||||
}),
|
||||
].join("\n");
|
||||
|
||||
const parsed = parsePiJsonl(stdout);
|
||||
expect(parsed.errors).toEqual([]);
|
||||
});
|
||||
|
||||
it("surfaces standalone error events", () => {
|
||||
const stdout = [
|
||||
JSON.stringify({
|
||||
type: "error",
|
||||
message: "Connection to model provider lost",
|
||||
}),
|
||||
].join("\n");
|
||||
|
||||
const parsed = parsePiJsonl(stdout);
|
||||
expect(parsed.errors).toEqual(["Connection to model provider lost"]);
|
||||
});
|
||||
|
||||
it("ignores error events with empty messages", () => {
|
||||
const stdout = [
|
||||
JSON.stringify({
|
||||
type: "error",
|
||||
message: "",
|
||||
}),
|
||||
].join("\n");
|
||||
|
||||
const parsed = parsePiJsonl(stdout);
|
||||
expect(parsed.errors).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPiUnknownSessionError", () => {
|
||||
|
||||
@@ -76,6 +76,15 @@ export function parsePiJsonl(stdout: string): ParsedPiOutput {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (eventType === "auto_retry_end") {
|
||||
const succeeded = event.success === true;
|
||||
if (!succeeded) {
|
||||
const finalError = asString(event.finalError, "").trim();
|
||||
result.errors.push(finalError || "Pi exhausted automatic retries without producing a response.");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Turn lifecycle
|
||||
if (eventType === "turn_start") {
|
||||
continue;
|
||||
@@ -145,6 +154,14 @@ export function parsePiJsonl(stdout: string): ParsedPiOutput {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (eventType === "error") {
|
||||
const message = asString(event.message, "").trim();
|
||||
if (message) {
|
||||
result.errors.push(message);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Tool execution
|
||||
if (eventType === "tool_execution_start") {
|
||||
const toolCallId = asString(event.toolCallId, "");
|
||||
|
||||
@@ -176,4 +176,49 @@ describeEmbeddedPostgres("runDatabaseBackup", () => {
|
||||
},
|
||||
60_000,
|
||||
);
|
||||
|
||||
it(
|
||||
"restores statements incrementally when backup comments precede the first breakpoint",
|
||||
async () => {
|
||||
const restoreConnectionString = await createTempDatabase();
|
||||
const restoreSql = postgres(restoreConnectionString, { max: 1, onnotice: () => {} });
|
||||
const backupDir = createTempDir("paperclip-db-restore-manual-");
|
||||
const backupFile = path.join(backupDir, "manual.sql");
|
||||
|
||||
try {
|
||||
await fs.promises.writeFile(
|
||||
backupFile,
|
||||
[
|
||||
"-- Paperclip database backup",
|
||||
"-- Created: 2026-04-06T00:00:00.000Z",
|
||||
"",
|
||||
"BEGIN;",
|
||||
"-- paperclip statement breakpoint 69f6f3f1-42fd-46a6-bf17-d1d85f8f3900",
|
||||
"CREATE TABLE public.restore_stream_test (id integer primary key, payload text not null);",
|
||||
"-- paperclip statement breakpoint 69f6f3f1-42fd-46a6-bf17-d1d85f8f3900",
|
||||
"INSERT INTO public.restore_stream_test (id, payload)",
|
||||
"VALUES (1, 'hello');",
|
||||
"-- paperclip statement breakpoint 69f6f3f1-42fd-46a6-bf17-d1d85f8f3900",
|
||||
"COMMIT;",
|
||||
"-- paperclip statement breakpoint 69f6f3f1-42fd-46a6-bf17-d1d85f8f3900",
|
||||
].join("\n"),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await runDatabaseRestore({
|
||||
connectionString: restoreConnectionString,
|
||||
backupFile,
|
||||
});
|
||||
|
||||
const rows = await restoreSql.unsafe<{ payload: string }[]>(`
|
||||
SELECT payload
|
||||
FROM public.restore_stream_test
|
||||
`);
|
||||
expect(rows).toEqual([{ payload: "hello" }]);
|
||||
} finally {
|
||||
await restoreSql.end();
|
||||
}
|
||||
},
|
||||
20_000,
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { createWriteStream, existsSync, mkdirSync, readdirSync, statSync, unlinkSync } from "node:fs";
|
||||
import { readFile } from "node:fs/promises";
|
||||
import { createReadStream, createWriteStream, existsSync, mkdirSync, readdirSync, statSync, unlinkSync } from "node:fs";
|
||||
import { basename, resolve } from "node:path";
|
||||
import { createInterface } from "node:readline";
|
||||
import postgres from "postgres";
|
||||
|
||||
export type RunDatabaseBackupOptions = {
|
||||
@@ -45,6 +45,11 @@ type TableDefinition = {
|
||||
tablename: string;
|
||||
};
|
||||
|
||||
type ExtensionDefinition = {
|
||||
extension_name: string;
|
||||
schema_name: string;
|
||||
};
|
||||
|
||||
const DRIZZLE_SCHEMA = "drizzle";
|
||||
const DRIZZLE_MIGRATIONS_TABLE = "__drizzle_migrations";
|
||||
const DEFAULT_BACKUP_WRITE_BUFFER_BYTES = 1024 * 1024;
|
||||
@@ -142,6 +147,42 @@ function tableKey(schemaName: string, tableName: string): string {
|
||||
return `${schemaName}.${tableName}`;
|
||||
}
|
||||
|
||||
async function* readRestoreStatements(backupFile: string): AsyncGenerator<string> {
|
||||
const stream = createReadStream(backupFile, { encoding: "utf8" });
|
||||
const reader = createInterface({
|
||||
input: stream,
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
let statementLines: string[] = [];
|
||||
|
||||
const flushStatement = () => {
|
||||
const statement = statementLines.join("\n").trim();
|
||||
statementLines = [];
|
||||
return statement;
|
||||
};
|
||||
|
||||
try {
|
||||
for await (const line of reader) {
|
||||
if (line === STATEMENT_BREAKPOINT) {
|
||||
const statement = flushStatement();
|
||||
if (statement.length > 0) {
|
||||
yield statement;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
statementLines.push(line);
|
||||
}
|
||||
|
||||
const trailingStatement = flushStatement();
|
||||
if (trailingStatement.length > 0) {
|
||||
yield trailingStatement;
|
||||
}
|
||||
} finally {
|
||||
reader.close();
|
||||
stream.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
export function createBufferedTextFileWriter(filePath: string, maxBufferedBytes = DEFAULT_BACKUP_WRITE_BUFFER_BYTES) {
|
||||
const stream = createWriteStream(filePath, { encoding: "utf8" });
|
||||
const flushThreshold = Math.max(1, Math.trunc(maxBufferedBytes));
|
||||
@@ -340,6 +381,25 @@ export async function runDatabaseBackup(opts: RunDatabaseBackupOptions): Promise
|
||||
emit("");
|
||||
}
|
||||
|
||||
const extensions = await sql<ExtensionDefinition[]>`
|
||||
SELECT
|
||||
e.extname AS extension_name,
|
||||
n.nspname AS schema_name
|
||||
FROM pg_extension e
|
||||
JOIN pg_namespace n ON n.oid = e.extnamespace
|
||||
WHERE e.extname <> 'plpgsql'
|
||||
ORDER BY e.extname
|
||||
`;
|
||||
if (extensions.length > 0) {
|
||||
emit("-- Extensions");
|
||||
for (const extension of extensions) {
|
||||
emitStatement(
|
||||
`CREATE EXTENSION IF NOT EXISTS ${quoteIdentifier(extension.extension_name)} WITH SCHEMA ${quoteIdentifier(extension.schema_name)};`,
|
||||
);
|
||||
}
|
||||
emit("");
|
||||
}
|
||||
|
||||
if (sequences.length > 0) {
|
||||
emit("-- Sequences");
|
||||
for (const seq of sequences) {
|
||||
@@ -626,13 +686,7 @@ export async function runDatabaseRestore(opts: RunDatabaseRestoreOptions): Promi
|
||||
|
||||
try {
|
||||
await sql`SELECT 1`;
|
||||
const contents = await readFile(opts.backupFile, "utf8");
|
||||
const statements = contents
|
||||
.split(STATEMENT_BREAKPOINT)
|
||||
.map((statement) => statement.trim())
|
||||
.filter((statement) => statement.length > 0);
|
||||
|
||||
for (const statement of statements) {
|
||||
for await (const statement of readRestoreStatements(opts.backupFile)) {
|
||||
await sql.unsafe(statement).execute();
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -401,4 +401,70 @@ describeEmbeddedPostgres("applyPendingMigrations", () => {
|
||||
},
|
||||
20_000,
|
||||
);
|
||||
|
||||
it(
|
||||
"replays migration 0050 safely when projects.env already exists",
|
||||
async () => {
|
||||
const connectionString = await createTempDatabase();
|
||||
|
||||
await applyPendingMigrations(connectionString);
|
||||
|
||||
const sql = postgres(connectionString, { max: 1, onnotice: () => {} });
|
||||
try {
|
||||
const stiffLuckmanHash = await migrationHash("0050_stiff_luckman.sql");
|
||||
|
||||
await sql.unsafe(
|
||||
`DELETE FROM "drizzle"."__drizzle_migrations" WHERE hash = '${stiffLuckmanHash}'`,
|
||||
);
|
||||
|
||||
const columns = await sql.unsafe<{ column_name: string }[]>(
|
||||
`
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'projects'
|
||||
AND column_name = 'env'
|
||||
`,
|
||||
);
|
||||
expect(columns).toHaveLength(1);
|
||||
} finally {
|
||||
await sql.end();
|
||||
}
|
||||
|
||||
const pendingState = await inspectMigrations(connectionString);
|
||||
expect(pendingState).toMatchObject({
|
||||
status: "needsMigrations",
|
||||
pendingMigrations: ["0050_stiff_luckman.sql"],
|
||||
reason: "pending-migrations",
|
||||
});
|
||||
|
||||
await applyPendingMigrations(connectionString);
|
||||
|
||||
const finalState = await inspectMigrations(connectionString);
|
||||
expect(finalState.status).toBe("upToDate");
|
||||
|
||||
const verifySql = postgres(connectionString, { max: 1, onnotice: () => {} });
|
||||
try {
|
||||
const columns = await verifySql.unsafe<{ column_name: string; is_nullable: string; data_type: string }[]>(
|
||||
`
|
||||
SELECT column_name, is_nullable, data_type
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'projects'
|
||||
AND column_name = 'env'
|
||||
`,
|
||||
);
|
||||
expect(columns).toEqual([
|
||||
expect.objectContaining({
|
||||
column_name: "env",
|
||||
is_nullable: "YES",
|
||||
data_type: "jsonb",
|
||||
}),
|
||||
]);
|
||||
} finally {
|
||||
await verifySql.end();
|
||||
}
|
||||
},
|
||||
20_000,
|
||||
);
|
||||
});
|
||||
|
||||
@@ -29,4 +29,5 @@ export {
|
||||
createEmbeddedPostgresLogBuffer,
|
||||
formatEmbeddedPostgresError,
|
||||
} from "./embedded-postgres-error.js";
|
||||
export { issueRelations } from "./schema/issue_relations.js";
|
||||
export * from "./schema/index.js";
|
||||
|
||||
21
packages/db/src/migrations/0049_flawless_abomination.sql
Normal file
21
packages/db/src/migrations/0049_flawless_abomination.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
CREATE TABLE "issue_relations" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"company_id" uuid NOT NULL,
|
||||
"issue_id" uuid NOT NULL,
|
||||
"related_issue_id" uuid NOT NULL,
|
||||
"type" text NOT NULL,
|
||||
"created_by_agent_id" uuid,
|
||||
"created_by_user_id" text,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "issue_relations" ADD CONSTRAINT "issue_relations_type_check" CHECK ("type" IN ('blocks'));--> statement-breakpoint
|
||||
ALTER TABLE "issue_relations" ADD CONSTRAINT "issue_relations_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "issue_relations" ADD CONSTRAINT "issue_relations_issue_id_issues_id_fk" FOREIGN KEY ("issue_id") REFERENCES "public"."issues"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "issue_relations" ADD CONSTRAINT "issue_relations_related_issue_id_issues_id_fk" FOREIGN KEY ("related_issue_id") REFERENCES "public"."issues"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "issue_relations" ADD CONSTRAINT "issue_relations_created_by_agent_id_agents_id_fk" FOREIGN KEY ("created_by_agent_id") REFERENCES "public"."agents"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||
CREATE INDEX "issue_relations_company_issue_idx" ON "issue_relations" USING btree ("company_id","issue_id");--> statement-breakpoint
|
||||
CREATE INDEX "issue_relations_company_related_issue_idx" ON "issue_relations" USING btree ("company_id","related_issue_id");--> statement-breakpoint
|
||||
CREATE INDEX "issue_relations_company_type_idx" ON "issue_relations" USING btree ("company_id","type");--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX "issue_relations_company_edge_uq" ON "issue_relations" USING btree ("company_id","issue_id","related_issue_id","type");
|
||||
1
packages/db/src/migrations/0050_stiff_luckman.sql
Normal file
1
packages/db/src/migrations/0050_stiff_luckman.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE "projects" ADD COLUMN IF NOT EXISTS "env" jsonb;
|
||||
5
packages/db/src/migrations/0051_young_korg.sql
Normal file
5
packages/db/src/migrations/0051_young_korg.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;--> statement-breakpoint
|
||||
CREATE INDEX "issue_comments_body_search_idx" ON "issue_comments" USING gin ("body" gin_trgm_ops);--> statement-breakpoint
|
||||
CREATE INDEX "issues_title_search_idx" ON "issues" USING gin ("title" gin_trgm_ops);--> statement-breakpoint
|
||||
CREATE INDEX "issues_identifier_search_idx" ON "issues" USING gin ("identifier" gin_trgm_ops);--> statement-breakpoint
|
||||
CREATE INDEX "issues_description_search_idx" ON "issues" USING gin ("description" gin_trgm_ops);
|
||||
12766
packages/db/src/migrations/meta/0049_snapshot.json
Normal file
12766
packages/db/src/migrations/meta/0049_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
12772
packages/db/src/migrations/meta/0050_snapshot.json
Normal file
12772
packages/db/src/migrations/meta/0050_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
12836
packages/db/src/migrations/meta/0051_snapshot.json
Normal file
12836
packages/db/src/migrations/meta/0051_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -344,6 +344,27 @@
|
||||
"when": 1775145655557,
|
||||
"tag": "0048_flashy_marrow",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 49,
|
||||
"version": "7",
|
||||
"when": 1775349863293,
|
||||
"tag": "0049_flawless_abomination",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 50,
|
||||
"version": "7",
|
||||
"when": 1775487782768,
|
||||
"tag": "0050_stiff_luckman",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 51,
|
||||
"version": "7",
|
||||
"when": 1775524651831,
|
||||
"tag": "0051_young_korg",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ export { workspaceRuntimeServices } from "./workspace_runtime_services.js";
|
||||
export { projectGoals } from "./project_goals.js";
|
||||
export { goals } from "./goals.js";
|
||||
export { issues } from "./issues.js";
|
||||
export { issueRelations } from "./issue_relations.js";
|
||||
export { routines, routineTriggers, routineRuns } from "./routines.js";
|
||||
export { issueWorkProducts } from "./issue_work_products.js";
|
||||
export { labels } from "./labels.js";
|
||||
|
||||
@@ -31,5 +31,6 @@ export const issueComments = pgTable(
|
||||
table.issueId,
|
||||
table.createdAt,
|
||||
),
|
||||
bodySearchIdx: index("issue_comments_body_search_idx").using("gin", table.body.op("gin_trgm_ops")),
|
||||
}),
|
||||
);
|
||||
|
||||
30
packages/db/src/schema/issue_relations.ts
Normal file
30
packages/db/src/schema/issue_relations.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { index, pgTable, text, timestamp, uniqueIndex, uuid } from "drizzle-orm/pg-core";
|
||||
import { agents } from "./agents.js";
|
||||
import { companies } from "./companies.js";
|
||||
import { issues } from "./issues.js";
|
||||
|
||||
export const issueRelations = pgTable(
|
||||
"issue_relations",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
companyId: uuid("company_id").notNull().references(() => companies.id),
|
||||
issueId: uuid("issue_id").notNull().references(() => issues.id, { onDelete: "cascade" }),
|
||||
relatedIssueId: uuid("related_issue_id").notNull().references(() => issues.id, { onDelete: "cascade" }),
|
||||
type: text("type").$type<"blocks">().notNull(),
|
||||
createdByAgentId: uuid("created_by_agent_id").references(() => agents.id, { onDelete: "set null" }),
|
||||
createdByUserId: text("created_by_user_id"),
|
||||
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
|
||||
},
|
||||
(table) => ({
|
||||
companyIssueIdx: index("issue_relations_company_issue_idx").on(table.companyId, table.issueId),
|
||||
companyRelatedIssueIdx: index("issue_relations_company_related_issue_idx").on(table.companyId, table.relatedIssueId),
|
||||
companyTypeIdx: index("issue_relations_company_type_idx").on(table.companyId, table.type),
|
||||
companyEdgeUq: uniqueIndex("issue_relations_company_edge_uq").on(
|
||||
table.companyId,
|
||||
table.issueId,
|
||||
table.relatedIssueId,
|
||||
table.type,
|
||||
),
|
||||
}),
|
||||
);
|
||||
@@ -76,6 +76,9 @@ export const issues = pgTable(
|
||||
projectWorkspaceIdx: index("issues_company_project_workspace_idx").on(table.companyId, table.projectWorkspaceId),
|
||||
executionWorkspaceIdx: index("issues_company_execution_workspace_idx").on(table.companyId, table.executionWorkspaceId),
|
||||
identifierIdx: uniqueIndex("issues_identifier_idx").on(table.identifier),
|
||||
titleSearchIdx: index("issues_title_search_idx").using("gin", table.title.op("gin_trgm_ops")),
|
||||
identifierSearchIdx: index("issues_identifier_search_idx").using("gin", table.identifier.op("gin_trgm_ops")),
|
||||
descriptionSearchIdx: index("issues_description_search_idx").using("gin", table.description.op("gin_trgm_ops")),
|
||||
openRoutineExecutionIdx: uniqueIndex("issues_open_routine_execution_uq")
|
||||
.on(table.companyId, table.originKind, table.originId)
|
||||
.where(
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { pgTable, uuid, text, timestamp, date, index, jsonb } from "drizzle-orm/pg-core";
|
||||
import type { AgentEnvConfig } from "@paperclipai/shared";
|
||||
import { companies } from "./companies.js";
|
||||
import { goals } from "./goals.js";
|
||||
import { agents } from "./agents.js";
|
||||
@@ -15,6 +16,7 @@ export const projects = pgTable(
|
||||
leadAgentId: uuid("lead_agent_id").references(() => agents.id),
|
||||
targetDate: date("target_date"),
|
||||
color: text("color"),
|
||||
env: jsonb("env").$type<AgentEnvConfig>(),
|
||||
pauseReason: text("pause_reason"),
|
||||
pausedAt: timestamp("paused_at", { withTimezone: true }),
|
||||
executionWorkspacePolicy: jsonb("execution_workspace_policy").$type<Record<string, unknown>>(),
|
||||
|
||||
77
packages/mcp-server/README.md
Normal file
77
packages/mcp-server/README.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Paperclip MCP Server
|
||||
|
||||
Model Context Protocol server for Paperclip.
|
||||
|
||||
This package is a thin MCP wrapper over the existing Paperclip REST API. It does
|
||||
not talk to the database directly and it does not reimplement business logic.
|
||||
|
||||
## Authentication
|
||||
|
||||
The server reads its configuration from environment variables:
|
||||
|
||||
- `PAPERCLIP_API_URL` - Paperclip base URL, for example `http://localhost:3100`
|
||||
- `PAPERCLIP_API_KEY` - bearer token used for `/api` requests
|
||||
- `PAPERCLIP_COMPANY_ID` - optional default company for company-scoped tools
|
||||
- `PAPERCLIP_AGENT_ID` - optional default agent for checkout helpers
|
||||
- `PAPERCLIP_RUN_ID` - optional run id forwarded on mutating requests
|
||||
|
||||
## Usage
|
||||
|
||||
```sh
|
||||
npx -y @paperclipai/mcp-server
|
||||
```
|
||||
|
||||
Or locally in this repo:
|
||||
|
||||
```sh
|
||||
pnpm --filter @paperclipai/mcp-server build
|
||||
node packages/mcp-server/dist/stdio.js
|
||||
```
|
||||
|
||||
## Tool Surface
|
||||
|
||||
Read tools:
|
||||
|
||||
- `paperclipMe`
|
||||
- `paperclipInboxLite`
|
||||
- `paperclipListAgents`
|
||||
- `paperclipGetAgent`
|
||||
- `paperclipListIssues`
|
||||
- `paperclipGetIssue`
|
||||
- `paperclipGetHeartbeatContext`
|
||||
- `paperclipListComments`
|
||||
- `paperclipGetComment`
|
||||
- `paperclipListIssueApprovals`
|
||||
- `paperclipListDocuments`
|
||||
- `paperclipGetDocument`
|
||||
- `paperclipListDocumentRevisions`
|
||||
- `paperclipListProjects`
|
||||
- `paperclipGetProject`
|
||||
- `paperclipListGoals`
|
||||
- `paperclipGetGoal`
|
||||
- `paperclipListApprovals`
|
||||
- `paperclipGetApproval`
|
||||
- `paperclipGetApprovalIssues`
|
||||
- `paperclipListApprovalComments`
|
||||
|
||||
Write tools:
|
||||
|
||||
- `paperclipCreateIssue`
|
||||
- `paperclipUpdateIssue`
|
||||
- `paperclipCheckoutIssue`
|
||||
- `paperclipReleaseIssue`
|
||||
- `paperclipAddComment`
|
||||
- `paperclipUpsertIssueDocument`
|
||||
- `paperclipRestoreIssueDocumentRevision`
|
||||
- `paperclipCreateApproval`
|
||||
- `paperclipLinkIssueApproval`
|
||||
- `paperclipUnlinkIssueApproval`
|
||||
- `paperclipApprovalDecision`
|
||||
- `paperclipAddApprovalComment`
|
||||
|
||||
Escape hatch:
|
||||
|
||||
- `paperclipApiRequest`
|
||||
|
||||
`paperclipApiRequest` is limited to paths under `/api` and JSON bodies. It is
|
||||
meant for endpoints that do not yet have a dedicated MCP tool.
|
||||
55
packages/mcp-server/package.json
Normal file
55
packages/mcp-server/package.json
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"name": "@paperclipai/mcp-server",
|
||||
"version": "0.1.0",
|
||||
"license": "MIT",
|
||||
"homepage": "https://github.com/paperclipai/paperclip",
|
||||
"bugs": {
|
||||
"url": "https://github.com/paperclipai/paperclip/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/paperclipai/paperclip",
|
||||
"directory": "packages/mcp-server"
|
||||
},
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"paperclip-mcp-server": "./dist/stdio.js"
|
||||
},
|
||||
"exports": {
|
||||
".": "./src/index.ts"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"bin": {
|
||||
"paperclip-mcp-server": "./dist/stdio.js"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"test": "vitest run"
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.29.0",
|
||||
"@paperclipai/shared": "workspace:*",
|
||||
"zod": "^3.24.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3",
|
||||
"vitest": "^3.0.5"
|
||||
}
|
||||
}
|
||||
114
packages/mcp-server/src/client.ts
Normal file
114
packages/mcp-server/src/client.ts
Normal file
@@ -0,0 +1,114 @@
|
||||
import type { PaperclipMcpConfig } from "./config.js";
|
||||
|
||||
export class PaperclipApiError extends Error {
|
||||
readonly status: number;
|
||||
readonly method: string;
|
||||
readonly path: string;
|
||||
readonly body: unknown;
|
||||
|
||||
constructor(input: {
|
||||
status: number;
|
||||
method: string;
|
||||
path: string;
|
||||
body: unknown;
|
||||
message: string;
|
||||
}) {
|
||||
super(input.message);
|
||||
this.name = "PaperclipApiError";
|
||||
this.status = input.status;
|
||||
this.method = input.method;
|
||||
this.path = input.path;
|
||||
this.body = input.body;
|
||||
}
|
||||
}
|
||||
|
||||
export interface JsonRequestOptions {
|
||||
body?: unknown;
|
||||
includeRunId?: boolean;
|
||||
}
|
||||
|
||||
function isWriteMethod(method: string): boolean {
|
||||
return !["GET", "HEAD"].includes(method.toUpperCase());
|
||||
}
|
||||
|
||||
function buildErrorMessage(method: string, path: string, status: number, body: unknown): string {
|
||||
if (body && typeof body === "object" && "error" in body && typeof body.error === "string") {
|
||||
return `${method} ${path} failed with ${status}: ${body.error}`;
|
||||
}
|
||||
return `${method} ${path} failed with ${status}`;
|
||||
}
|
||||
|
||||
async function parseResponseBody(response: Response): Promise<unknown> {
|
||||
const text = await response.text();
|
||||
if (!text) return null;
|
||||
try {
|
||||
return JSON.parse(text) as unknown;
|
||||
} catch {
|
||||
return text;
|
||||
}
|
||||
}
|
||||
|
||||
export class PaperclipApiClient {
|
||||
constructor(private readonly config: PaperclipMcpConfig) {}
|
||||
|
||||
get defaults() {
|
||||
return {
|
||||
companyId: this.config.companyId,
|
||||
agentId: this.config.agentId,
|
||||
runId: this.config.runId,
|
||||
};
|
||||
}
|
||||
|
||||
resolveCompanyId(companyId?: string | null): string {
|
||||
const resolved = companyId?.trim() || this.config.companyId;
|
||||
if (!resolved) {
|
||||
throw new Error("companyId is required because PAPERCLIP_COMPANY_ID is not set");
|
||||
}
|
||||
return resolved;
|
||||
}
|
||||
|
||||
resolveAgentId(agentId?: string | null): string {
|
||||
const resolved = agentId?.trim() || this.config.agentId;
|
||||
if (!resolved) {
|
||||
throw new Error("agentId is required because PAPERCLIP_AGENT_ID is not set");
|
||||
}
|
||||
return resolved;
|
||||
}
|
||||
|
||||
async requestJson<T>(method: string, path: string, options: JsonRequestOptions = {}): Promise<T> {
|
||||
if (!path.startsWith("/")) {
|
||||
throw new Error(`API path must start with "/": ${path}`);
|
||||
}
|
||||
|
||||
const url = new URL(path.slice(1), `${this.config.apiUrl}/`);
|
||||
const headers: Record<string, string> = {
|
||||
Authorization: `Bearer ${this.config.apiKey}`,
|
||||
Accept: "application/json",
|
||||
};
|
||||
if (options.body !== undefined) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
if ((options.includeRunId ?? isWriteMethod(method)) && this.config.runId) {
|
||||
headers["X-Paperclip-Run-Id"] = this.config.runId;
|
||||
}
|
||||
|
||||
const response = await fetch(url, {
|
||||
method,
|
||||
headers,
|
||||
body: options.body === undefined ? undefined : JSON.stringify(options.body),
|
||||
});
|
||||
const parsedBody = await parseResponseBody(response);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new PaperclipApiError({
|
||||
status: response.status,
|
||||
method: method.toUpperCase(),
|
||||
path,
|
||||
body: parsedBody,
|
||||
message: buildErrorMessage(method.toUpperCase(), path, response.status, parsedBody),
|
||||
});
|
||||
}
|
||||
|
||||
return parsedBody as T;
|
||||
}
|
||||
}
|
||||
39
packages/mcp-server/src/config.ts
Normal file
39
packages/mcp-server/src/config.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
export interface PaperclipMcpConfig {
|
||||
apiUrl: string;
|
||||
apiKey: string;
|
||||
companyId: string | null;
|
||||
agentId: string | null;
|
||||
runId: string | null;
|
||||
}
|
||||
|
||||
function nonEmpty(value: string | undefined): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function stripTrailingSlash(value: string): string {
|
||||
return value.replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
export function normalizeApiUrl(apiUrl: string): string {
|
||||
const trimmed = stripTrailingSlash(apiUrl.trim());
|
||||
return trimmed.endsWith("/api") ? trimmed : `${trimmed}/api`;
|
||||
}
|
||||
|
||||
export function readConfigFromEnv(env: NodeJS.ProcessEnv = process.env): PaperclipMcpConfig {
|
||||
const apiUrl = nonEmpty(env.PAPERCLIP_API_URL);
|
||||
if (!apiUrl) {
|
||||
throw new Error("Missing PAPERCLIP_API_URL");
|
||||
}
|
||||
const apiKey = nonEmpty(env.PAPERCLIP_API_KEY);
|
||||
if (!apiKey) {
|
||||
throw new Error("Missing PAPERCLIP_API_KEY");
|
||||
}
|
||||
|
||||
return {
|
||||
apiUrl: normalizeApiUrl(apiUrl),
|
||||
apiKey,
|
||||
companyId: nonEmpty(env.PAPERCLIP_COMPANY_ID),
|
||||
agentId: nonEmpty(env.PAPERCLIP_AGENT_ID),
|
||||
runId: nonEmpty(env.PAPERCLIP_RUN_ID),
|
||||
};
|
||||
}
|
||||
31
packages/mcp-server/src/format.ts
Normal file
31
packages/mcp-server/src/format.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { PaperclipApiError } from "./client.js";
|
||||
|
||||
type McpTextResponse = {
|
||||
content: Array<{ type: "text"; text: string }>;
|
||||
};
|
||||
|
||||
export function formatTextResponse(value: unknown): McpTextResponse {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: typeof value === "string" ? value : JSON.stringify(value, null, 2),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function formatErrorResponse(error: unknown): McpTextResponse {
|
||||
if (error instanceof PaperclipApiError) {
|
||||
return formatTextResponse({
|
||||
error: error.message,
|
||||
status: error.status,
|
||||
method: error.method,
|
||||
path: error.path,
|
||||
body: error.body,
|
||||
});
|
||||
}
|
||||
return formatTextResponse({
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
}
|
||||
30
packages/mcp-server/src/index.ts
Normal file
30
packages/mcp-server/src/index.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||
import { PaperclipApiClient } from "./client.js";
|
||||
import { readConfigFromEnv, type PaperclipMcpConfig } from "./config.js";
|
||||
import { createToolDefinitions } from "./tools.js";
|
||||
|
||||
export function createPaperclipMcpServer(config: PaperclipMcpConfig = readConfigFromEnv()) {
|
||||
const server = new McpServer({
|
||||
name: "paperclip",
|
||||
version: "0.1.0",
|
||||
});
|
||||
|
||||
const client = new PaperclipApiClient(config);
|
||||
const tools = createToolDefinitions(client);
|
||||
for (const tool of tools) {
|
||||
server.tool(tool.name, tool.description, tool.schema.shape, tool.execute);
|
||||
}
|
||||
|
||||
return {
|
||||
server,
|
||||
tools,
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
export async function runServer(config: PaperclipMcpConfig = readConfigFromEnv()) {
|
||||
const { server } = createPaperclipMcpServer(config);
|
||||
const transport = new StdioServerTransport();
|
||||
await server.connect(transport);
|
||||
}
|
||||
7
packages/mcp-server/src/stdio.ts
Normal file
7
packages/mcp-server/src/stdio.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
import { runServer } from "./index.js";
|
||||
|
||||
void runServer().catch((error) => {
|
||||
console.error("Failed to start Paperclip MCP server:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
159
packages/mcp-server/src/tools.test.ts
Normal file
159
packages/mcp-server/src/tools.test.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { PaperclipApiClient } from "./client.js";
|
||||
import { createToolDefinitions } from "./tools.js";
|
||||
|
||||
function makeClient() {
|
||||
return new PaperclipApiClient({
|
||||
apiUrl: "http://localhost:3100/api",
|
||||
apiKey: "token-123",
|
||||
companyId: "11111111-1111-1111-1111-111111111111",
|
||||
agentId: "22222222-2222-2222-2222-222222222222",
|
||||
runId: "33333333-3333-3333-3333-333333333333",
|
||||
});
|
||||
}
|
||||
|
||||
function getTool(name: string) {
|
||||
const tool = createToolDefinitions(makeClient()).find((candidate) => candidate.name === name);
|
||||
if (!tool) throw new Error(`Missing tool ${name}`);
|
||||
return tool;
|
||||
}
|
||||
|
||||
function mockJsonResponse(body: unknown, status = 200) {
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
|
||||
describe("paperclip MCP tools", () => {
|
||||
beforeEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("adds auth headers and run id to mutating requests", async () => {
|
||||
const fetchMock = vi.fn().mockResolvedValue(
|
||||
mockJsonResponse({ ok: true }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const tool = getTool("paperclipUpdateIssue");
|
||||
await tool.execute({
|
||||
issueId: "PAP-1135",
|
||||
status: "done",
|
||||
});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
const [url, init] = fetchMock.mock.calls[0] as [string, RequestInit];
|
||||
expect(String(url)).toBe("http://localhost:3100/api/issues/PAP-1135");
|
||||
expect(init.method).toBe("PATCH");
|
||||
expect((init.headers as Record<string, string>)["Authorization"]).toBe("Bearer token-123");
|
||||
expect((init.headers as Record<string, string>)["X-Paperclip-Run-Id"]).toBe(
|
||||
"33333333-3333-3333-3333-333333333333",
|
||||
);
|
||||
});
|
||||
|
||||
it("uses default company id for company-scoped list tools", async () => {
|
||||
const fetchMock = vi.fn().mockResolvedValue(
|
||||
mockJsonResponse([{ id: "issue-1" }]),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const tool = getTool("paperclipListIssues");
|
||||
const response = await tool.execute({});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
const [url] = fetchMock.mock.calls[0] as [string];
|
||||
expect(String(url)).toBe(
|
||||
"http://localhost:3100/api/companies/11111111-1111-1111-1111-111111111111/issues",
|
||||
);
|
||||
expect(response.content[0]?.text).toContain("issue-1");
|
||||
});
|
||||
|
||||
it("uses default agent id for checkout requests", async () => {
|
||||
const fetchMock = vi.fn().mockResolvedValue(
|
||||
mockJsonResponse({ id: "PAP-1135", status: "in_progress" }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const tool = getTool("paperclipCheckoutIssue");
|
||||
await tool.execute({
|
||||
issueId: "PAP-1135",
|
||||
});
|
||||
|
||||
const [, init] = fetchMock.mock.calls[0] as [string, RequestInit];
|
||||
expect(JSON.parse(String(init.body))).toEqual({
|
||||
agentId: "22222222-2222-2222-2222-222222222222",
|
||||
expectedStatuses: ["todo", "backlog", "blocked"],
|
||||
});
|
||||
});
|
||||
|
||||
it("defaults issue document format to markdown", async () => {
|
||||
const fetchMock = vi.fn().mockResolvedValue(
|
||||
mockJsonResponse({ key: "plan", latestRevisionNumber: 2 }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const tool = getTool("paperclipUpsertIssueDocument");
|
||||
await tool.execute({
|
||||
issueId: "PAP-1135",
|
||||
key: "plan",
|
||||
body: "# Updated",
|
||||
});
|
||||
|
||||
const [, init] = fetchMock.mock.calls[0] as [string, RequestInit];
|
||||
expect(JSON.parse(String(init.body))).toEqual({
|
||||
format: "markdown",
|
||||
body: "# Updated",
|
||||
});
|
||||
});
|
||||
|
||||
it("creates approvals with the expected company-scoped payload", async () => {
|
||||
const fetchMock = vi.fn().mockResolvedValue(
|
||||
mockJsonResponse({ id: "approval-1" }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const tool = getTool("paperclipCreateApproval");
|
||||
await tool.execute({
|
||||
type: "hire_agent",
|
||||
payload: { branch: "pap-1167" },
|
||||
issueIds: ["44444444-4444-4444-4444-444444444444"],
|
||||
});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
const [url, init] = fetchMock.mock.calls[0] as [string, RequestInit];
|
||||
expect(String(url)).toBe(
|
||||
"http://localhost:3100/api/companies/11111111-1111-1111-1111-111111111111/approvals",
|
||||
);
|
||||
expect(init.method).toBe("POST");
|
||||
expect(JSON.parse(String(init.body))).toEqual({
|
||||
type: "hire_agent",
|
||||
payload: { branch: "pap-1167" },
|
||||
issueIds: ["44444444-4444-4444-4444-444444444444"],
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects invalid generic request paths", async () => {
|
||||
vi.stubGlobal("fetch", vi.fn());
|
||||
|
||||
const tool = getTool("paperclipApiRequest");
|
||||
const response = await tool.execute({
|
||||
method: "GET",
|
||||
path: "issues",
|
||||
});
|
||||
|
||||
expect(response.content[0]?.text).toContain("path must start with /");
|
||||
});
|
||||
|
||||
it("rejects generic request paths that escape /api", async () => {
|
||||
vi.stubGlobal("fetch", vi.fn());
|
||||
|
||||
const tool = getTool("paperclipApiRequest");
|
||||
const response = await tool.execute({
|
||||
method: "GET",
|
||||
path: "/../../secret",
|
||||
});
|
||||
|
||||
expect(response.content[0]?.text).toContain("must not contain '..'");
|
||||
});
|
||||
});
|
||||
427
packages/mcp-server/src/tools.ts
Normal file
427
packages/mcp-server/src/tools.ts
Normal file
@@ -0,0 +1,427 @@
|
||||
import { z } from "zod";
|
||||
import {
|
||||
addIssueCommentSchema,
|
||||
checkoutIssueSchema,
|
||||
createApprovalSchema,
|
||||
createIssueSchema,
|
||||
updateIssueSchema,
|
||||
upsertIssueDocumentSchema,
|
||||
linkIssueApprovalSchema,
|
||||
} from "@paperclipai/shared";
|
||||
import { PaperclipApiClient } from "./client.js";
|
||||
import { formatErrorResponse, formatTextResponse } from "./format.js";
|
||||
|
||||
export interface ToolDefinition {
|
||||
name: string;
|
||||
description: string;
|
||||
schema: z.AnyZodObject;
|
||||
execute: (input: Record<string, unknown>) => Promise<{
|
||||
content: Array<{ type: "text"; text: string }>;
|
||||
}>;
|
||||
}
|
||||
|
||||
function makeTool<TSchema extends z.ZodRawShape>(
|
||||
name: string,
|
||||
description: string,
|
||||
schema: z.ZodObject<TSchema>,
|
||||
execute: (input: z.infer<typeof schema>) => Promise<unknown>,
|
||||
): ToolDefinition {
|
||||
return {
|
||||
name,
|
||||
description,
|
||||
schema,
|
||||
execute: async (input) => {
|
||||
try {
|
||||
const parsed = schema.parse(input);
|
||||
return formatTextResponse(await execute(parsed));
|
||||
} catch (error) {
|
||||
return formatErrorResponse(error);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function parseOptionalJson(raw: string | undefined | null): unknown {
|
||||
if (!raw || raw.trim().length === 0) return undefined;
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
|
||||
const companyIdOptional = z.string().uuid().optional().nullable();
|
||||
const agentIdOptional = z.string().uuid().optional().nullable();
|
||||
const issueIdSchema = z.string().min(1);
|
||||
const projectIdSchema = z.string().min(1);
|
||||
const goalIdSchema = z.string().uuid();
|
||||
const approvalIdSchema = z.string().uuid();
|
||||
const documentKeySchema = z.string().trim().min(1).max(64);
|
||||
|
||||
const listIssuesSchema = z.object({
|
||||
companyId: companyIdOptional,
|
||||
status: z.string().optional(),
|
||||
projectId: z.string().uuid().optional(),
|
||||
assigneeAgentId: z.string().uuid().optional(),
|
||||
participantAgentId: z.string().uuid().optional(),
|
||||
assigneeUserId: z.string().optional(),
|
||||
touchedByUserId: z.string().optional(),
|
||||
inboxArchivedByUserId: z.string().optional(),
|
||||
unreadForUserId: z.string().optional(),
|
||||
labelId: z.string().uuid().optional(),
|
||||
executionWorkspaceId: z.string().uuid().optional(),
|
||||
originKind: z.string().optional(),
|
||||
originId: z.string().optional(),
|
||||
includeRoutineExecutions: z.boolean().optional(),
|
||||
q: z.string().optional(),
|
||||
});
|
||||
|
||||
const listCommentsSchema = z.object({
|
||||
issueId: issueIdSchema,
|
||||
after: z.string().uuid().optional(),
|
||||
order: z.enum(["asc", "desc"]).optional(),
|
||||
limit: z.number().int().positive().max(500).optional(),
|
||||
});
|
||||
|
||||
const upsertDocumentToolSchema = z.object({
|
||||
issueId: issueIdSchema,
|
||||
key: documentKeySchema,
|
||||
title: z.string().trim().max(200).nullable().optional(),
|
||||
format: z.enum(["markdown"]).default("markdown"),
|
||||
body: z.string().max(524288),
|
||||
changeSummary: z.string().trim().max(500).nullable().optional(),
|
||||
baseRevisionId: z.string().uuid().nullable().optional(),
|
||||
});
|
||||
|
||||
const createIssueToolSchema = z.object({
|
||||
companyId: companyIdOptional,
|
||||
}).merge(createIssueSchema);
|
||||
|
||||
const updateIssueToolSchema = z.object({
|
||||
issueId: issueIdSchema,
|
||||
}).merge(updateIssueSchema);
|
||||
|
||||
const checkoutIssueToolSchema = z.object({
|
||||
issueId: issueIdSchema,
|
||||
agentId: agentIdOptional,
|
||||
expectedStatuses: checkoutIssueSchema.shape.expectedStatuses.optional(),
|
||||
});
|
||||
|
||||
const addCommentToolSchema = z.object({
|
||||
issueId: issueIdSchema,
|
||||
}).merge(addIssueCommentSchema);
|
||||
|
||||
const approvalDecisionSchema = z.object({
|
||||
approvalId: approvalIdSchema,
|
||||
action: z.enum(["approve", "reject", "requestRevision", "resubmit"]),
|
||||
decisionNote: z.string().optional(),
|
||||
payloadJson: z.string().optional(),
|
||||
});
|
||||
|
||||
const createApprovalToolSchema = z.object({
|
||||
companyId: companyIdOptional,
|
||||
}).merge(createApprovalSchema);
|
||||
|
||||
const apiRequestSchema = z.object({
|
||||
method: z.enum(["GET", "POST", "PUT", "PATCH", "DELETE"]),
|
||||
path: z.string().min(1),
|
||||
jsonBody: z.string().optional(),
|
||||
});
|
||||
|
||||
export function createToolDefinitions(client: PaperclipApiClient): ToolDefinition[] {
|
||||
return [
|
||||
makeTool(
|
||||
"paperclipMe",
|
||||
"Get the current authenticated Paperclip actor details",
|
||||
z.object({}),
|
||||
async () => client.requestJson("GET", "/agents/me"),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipInboxLite",
|
||||
"Get the current authenticated agent inbox-lite assignment list",
|
||||
z.object({}),
|
||||
async () => client.requestJson("GET", "/agents/me/inbox-lite"),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListAgents",
|
||||
"List agents in a company",
|
||||
z.object({ companyId: companyIdOptional }),
|
||||
async ({ companyId }) => client.requestJson("GET", `/companies/${client.resolveCompanyId(companyId)}/agents`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetAgent",
|
||||
"Get a single agent by id",
|
||||
z.object({ agentId: z.string().min(1), companyId: companyIdOptional }),
|
||||
async ({ agentId, companyId }) => {
|
||||
const qs = companyId ? `?companyId=${encodeURIComponent(companyId)}` : "";
|
||||
return client.requestJson("GET", `/agents/${encodeURIComponent(agentId)}${qs}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListIssues",
|
||||
"List issues for a company with optional filters",
|
||||
listIssuesSchema,
|
||||
async (input) => {
|
||||
const companyId = client.resolveCompanyId(input.companyId);
|
||||
const params = new URLSearchParams();
|
||||
for (const [key, value] of Object.entries(input)) {
|
||||
if (key === "companyId" || value === undefined || value === null) continue;
|
||||
params.set(key, String(value));
|
||||
}
|
||||
const qs = params.toString();
|
||||
return client.requestJson("GET", `/companies/${companyId}/issues${qs ? `?${qs}` : ""}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetIssue",
|
||||
"Get a single issue by UUID or identifier",
|
||||
z.object({ issueId: issueIdSchema }),
|
||||
async ({ issueId }) => client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetHeartbeatContext",
|
||||
"Get compact heartbeat context for an issue",
|
||||
z.object({ issueId: issueIdSchema, wakeCommentId: z.string().uuid().optional() }),
|
||||
async ({ issueId, wakeCommentId }) => {
|
||||
const qs = wakeCommentId ? `?wakeCommentId=${encodeURIComponent(wakeCommentId)}` : "";
|
||||
return client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/heartbeat-context${qs}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListComments",
|
||||
"List issue comments with incremental options",
|
||||
listCommentsSchema,
|
||||
async ({ issueId, after, order, limit }) => {
|
||||
const params = new URLSearchParams();
|
||||
if (after) params.set("after", after);
|
||||
if (order) params.set("order", order);
|
||||
if (limit) params.set("limit", String(limit));
|
||||
const qs = params.toString();
|
||||
return client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/comments${qs ? `?${qs}` : ""}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetComment",
|
||||
"Get a specific issue comment by id",
|
||||
z.object({ issueId: issueIdSchema, commentId: z.string().uuid() }),
|
||||
async ({ issueId, commentId }) =>
|
||||
client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/comments/${encodeURIComponent(commentId)}`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListIssueApprovals",
|
||||
"List approvals linked to an issue",
|
||||
z.object({ issueId: issueIdSchema }),
|
||||
async ({ issueId }) => client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/approvals`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListDocuments",
|
||||
"List issue documents",
|
||||
z.object({ issueId: issueIdSchema }),
|
||||
async ({ issueId }) => client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/documents`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetDocument",
|
||||
"Get one issue document by key",
|
||||
z.object({ issueId: issueIdSchema, key: documentKeySchema }),
|
||||
async ({ issueId, key }) =>
|
||||
client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/documents/${encodeURIComponent(key)}`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListDocumentRevisions",
|
||||
"List revisions for an issue document",
|
||||
z.object({ issueId: issueIdSchema, key: documentKeySchema }),
|
||||
async ({ issueId, key }) =>
|
||||
client.requestJson(
|
||||
"GET",
|
||||
`/issues/${encodeURIComponent(issueId)}/documents/${encodeURIComponent(key)}/revisions`,
|
||||
),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListProjects",
|
||||
"List projects in a company",
|
||||
z.object({ companyId: companyIdOptional }),
|
||||
async ({ companyId }) => client.requestJson("GET", `/companies/${client.resolveCompanyId(companyId)}/projects`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetProject",
|
||||
"Get a project by id or company-scoped short reference",
|
||||
z.object({ projectId: projectIdSchema, companyId: companyIdOptional }),
|
||||
async ({ projectId, companyId }) => {
|
||||
const qs = companyId ? `?companyId=${encodeURIComponent(companyId)}` : "";
|
||||
return client.requestJson("GET", `/projects/${encodeURIComponent(projectId)}${qs}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListGoals",
|
||||
"List goals in a company",
|
||||
z.object({ companyId: companyIdOptional }),
|
||||
async ({ companyId }) => client.requestJson("GET", `/companies/${client.resolveCompanyId(companyId)}/goals`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetGoal",
|
||||
"Get a goal by id",
|
||||
z.object({ goalId: goalIdSchema }),
|
||||
async ({ goalId }) => client.requestJson("GET", `/goals/${encodeURIComponent(goalId)}`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListApprovals",
|
||||
"List approvals in a company",
|
||||
z.object({ companyId: companyIdOptional, status: z.string().optional() }),
|
||||
async ({ companyId, status }) => {
|
||||
const qs = status ? `?status=${encodeURIComponent(status)}` : "";
|
||||
return client.requestJson("GET", `/companies/${client.resolveCompanyId(companyId)}/approvals${qs}`);
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipCreateApproval",
|
||||
"Create a board approval request, optionally linked to one or more issues",
|
||||
createApprovalToolSchema,
|
||||
async ({ companyId, ...body }) =>
|
||||
client.requestJson("POST", `/companies/${client.resolveCompanyId(companyId)}/approvals`, {
|
||||
body,
|
||||
}),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetApproval",
|
||||
"Get an approval by id",
|
||||
z.object({ approvalId: approvalIdSchema }),
|
||||
async ({ approvalId }) => client.requestJson("GET", `/approvals/${encodeURIComponent(approvalId)}`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipGetApprovalIssues",
|
||||
"List issues linked to an approval",
|
||||
z.object({ approvalId: approvalIdSchema }),
|
||||
async ({ approvalId }) => client.requestJson("GET", `/approvals/${encodeURIComponent(approvalId)}/issues`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipListApprovalComments",
|
||||
"List comments for an approval",
|
||||
z.object({ approvalId: approvalIdSchema }),
|
||||
async ({ approvalId }) => client.requestJson("GET", `/approvals/${encodeURIComponent(approvalId)}/comments`),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipCreateIssue",
|
||||
"Create a new issue",
|
||||
createIssueToolSchema,
|
||||
async ({ companyId, ...body }) =>
|
||||
client.requestJson("POST", `/companies/${client.resolveCompanyId(companyId)}/issues`, { body }),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipUpdateIssue",
|
||||
"Patch an issue, optionally including a comment",
|
||||
updateIssueToolSchema,
|
||||
async ({ issueId, ...body }) =>
|
||||
client.requestJson("PATCH", `/issues/${encodeURIComponent(issueId)}`, { body }),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipCheckoutIssue",
|
||||
"Checkout an issue for an agent",
|
||||
checkoutIssueToolSchema,
|
||||
async ({ issueId, agentId, expectedStatuses }) =>
|
||||
client.requestJson("POST", `/issues/${encodeURIComponent(issueId)}/checkout`, {
|
||||
body: {
|
||||
agentId: client.resolveAgentId(agentId),
|
||||
expectedStatuses: expectedStatuses ?? ["todo", "backlog", "blocked"],
|
||||
},
|
||||
}),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipReleaseIssue",
|
||||
"Release an issue checkout",
|
||||
z.object({ issueId: issueIdSchema }),
|
||||
async ({ issueId }) => client.requestJson("POST", `/issues/${encodeURIComponent(issueId)}/release`, { body: {} }),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipAddComment",
|
||||
"Add a comment to an issue",
|
||||
addCommentToolSchema,
|
||||
async ({ issueId, ...body }) =>
|
||||
client.requestJson("POST", `/issues/${encodeURIComponent(issueId)}/comments`, { body }),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipUpsertIssueDocument",
|
||||
"Create or update an issue document",
|
||||
upsertDocumentToolSchema,
|
||||
async ({ issueId, key, ...body }) =>
|
||||
client.requestJson(
|
||||
"PUT",
|
||||
`/issues/${encodeURIComponent(issueId)}/documents/${encodeURIComponent(key)}`,
|
||||
{ body },
|
||||
),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipRestoreIssueDocumentRevision",
|
||||
"Restore a prior revision of an issue document",
|
||||
z.object({
|
||||
issueId: issueIdSchema,
|
||||
key: documentKeySchema,
|
||||
revisionId: z.string().uuid(),
|
||||
}),
|
||||
async ({ issueId, key, revisionId }) =>
|
||||
client.requestJson(
|
||||
"POST",
|
||||
`/issues/${encodeURIComponent(issueId)}/documents/${encodeURIComponent(key)}/revisions/${encodeURIComponent(revisionId)}/restore`,
|
||||
{ body: {} },
|
||||
),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipLinkIssueApproval",
|
||||
"Link an approval to an issue",
|
||||
z.object({ issueId: issueIdSchema }).merge(linkIssueApprovalSchema),
|
||||
async ({ issueId, approvalId }) =>
|
||||
client.requestJson("POST", `/issues/${encodeURIComponent(issueId)}/approvals`, {
|
||||
body: { approvalId },
|
||||
}),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipUnlinkIssueApproval",
|
||||
"Unlink an approval from an issue",
|
||||
z.object({ issueId: issueIdSchema, approvalId: approvalIdSchema }),
|
||||
async ({ issueId, approvalId }) =>
|
||||
client.requestJson(
|
||||
"DELETE",
|
||||
`/issues/${encodeURIComponent(issueId)}/approvals/${encodeURIComponent(approvalId)}`,
|
||||
),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipApprovalDecision",
|
||||
"Approve, reject, request revision, or resubmit an approval",
|
||||
approvalDecisionSchema,
|
||||
async ({ approvalId, action, decisionNote, payloadJson }) => {
|
||||
const path =
|
||||
action === "approve"
|
||||
? `/approvals/${encodeURIComponent(approvalId)}/approve`
|
||||
: action === "reject"
|
||||
? `/approvals/${encodeURIComponent(approvalId)}/reject`
|
||||
: action === "requestRevision"
|
||||
? `/approvals/${encodeURIComponent(approvalId)}/request-revision`
|
||||
: `/approvals/${encodeURIComponent(approvalId)}/resubmit`;
|
||||
|
||||
const body =
|
||||
action === "resubmit"
|
||||
? { payload: parseOptionalJson(payloadJson) ?? {} }
|
||||
: { decisionNote };
|
||||
|
||||
return client.requestJson("POST", path, { body });
|
||||
},
|
||||
),
|
||||
makeTool(
|
||||
"paperclipAddApprovalComment",
|
||||
"Add a comment to an approval",
|
||||
z.object({ approvalId: approvalIdSchema, body: z.string().min(1) }),
|
||||
async ({ approvalId, body }) =>
|
||||
client.requestJson("POST", `/approvals/${encodeURIComponent(approvalId)}/comments`, {
|
||||
body: { body },
|
||||
}),
|
||||
),
|
||||
makeTool(
|
||||
"paperclipApiRequest",
|
||||
"Make a JSON request to an existing Paperclip /api endpoint for unsupported operations",
|
||||
apiRequestSchema,
|
||||
async ({ method, path, jsonBody }) => {
|
||||
if (!path.startsWith("/") || path.includes("..")) {
|
||||
throw new Error("path must start with / and be relative to /api, and must not contain '..'");
|
||||
}
|
||||
return client.requestJson(method, path, {
|
||||
body: parseOptionalJson(jsonBody),
|
||||
});
|
||||
},
|
||||
),
|
||||
];
|
||||
}
|
||||
8
packages/mcp-server/tsconfig.json
Normal file
8
packages/mcp-server/tsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
7
packages/mcp-server/vitest.config.ts
Normal file
7
packages/mcp-server/vitest.config.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { defineConfig } from "vitest/config";
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
environment: "node",
|
||||
},
|
||||
});
|
||||
@@ -606,7 +606,7 @@ export interface WorkerToHostMethods {
|
||||
result: IssueComment[],
|
||||
];
|
||||
"issues.createComment": [
|
||||
params: { issueId: string; body: string; companyId: string },
|
||||
params: { issueId: string; body: string; companyId: string; authorAgentId?: string },
|
||||
result: IssueComment,
|
||||
];
|
||||
|
||||
|
||||
@@ -405,7 +405,7 @@ export function createTestHarness(options: TestHarnessOptions): TestHarness {
|
||||
if (!isInCompany(issues.get(issueId), companyId)) return [];
|
||||
return issueComments.get(issueId) ?? [];
|
||||
},
|
||||
async createComment(issueId, body, companyId) {
|
||||
async createComment(issueId, body, companyId, options) {
|
||||
requireCapability(manifest, capabilitySet, "issue.comments.create");
|
||||
const parentIssue = issues.get(issueId);
|
||||
if (!isInCompany(parentIssue, companyId)) {
|
||||
@@ -416,7 +416,7 @@ export function createTestHarness(options: TestHarnessOptions): TestHarness {
|
||||
id: randomUUID(),
|
||||
companyId: parentIssue.companyId,
|
||||
issueId,
|
||||
authorAgentId: null,
|
||||
authorAgentId: options?.authorAgentId ?? null,
|
||||
authorUserId: null,
|
||||
body,
|
||||
createdAt: now,
|
||||
|
||||
@@ -909,7 +909,12 @@ export interface PluginIssuesClient {
|
||||
companyId: string,
|
||||
): Promise<Issue>;
|
||||
listComments(issueId: string, companyId: string): Promise<IssueComment[]>;
|
||||
createComment(issueId: string, body: string, companyId: string): Promise<IssueComment>;
|
||||
createComment(
|
||||
issueId: string,
|
||||
body: string,
|
||||
companyId: string,
|
||||
options?: { authorAgentId?: string },
|
||||
): Promise<IssueComment>;
|
||||
/** Read and write issue documents. Requires `issue.documents.read` / `issue.documents.write`. */
|
||||
documents: PluginIssueDocumentsClient;
|
||||
}
|
||||
|
||||
@@ -610,8 +610,8 @@ export function startWorkerRpcHost(options: WorkerRpcHostOptions): WorkerRpcHost
|
||||
return callHost("issues.listComments", { issueId, companyId });
|
||||
},
|
||||
|
||||
async createComment(issueId: string, body: string, companyId: string) {
|
||||
return callHost("issues.createComment", { issueId, body, companyId });
|
||||
async createComment(issueId: string, body: string, companyId: string, options?: { authorAgentId?: string }) {
|
||||
return callHost("issues.createComment", { issueId, body, companyId, authorAgentId: options?.authorAgentId });
|
||||
},
|
||||
|
||||
documents: {
|
||||
|
||||
15
packages/shared/src/adapter-type.ts
Normal file
15
packages/shared/src/adapter-type.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { z } from "zod";
|
||||
import { AGENT_ADAPTER_TYPES } from "./constants.js";
|
||||
|
||||
export const agentAdapterTypeSchema = z
|
||||
.string()
|
||||
.trim()
|
||||
.min(1)
|
||||
.default("process")
|
||||
.describe(`Known built-in adapters: ${AGENT_ADAPTER_TYPES.join(", ")}. External adapters may register additional non-empty string types at runtime.`);
|
||||
|
||||
export const optionalAgentAdapterTypeSchema = z
|
||||
.string()
|
||||
.trim()
|
||||
.min(1)
|
||||
.optional();
|
||||
38
packages/shared/src/adapter-types.test.ts
Normal file
38
packages/shared/src/adapter-types.test.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { acceptInviteSchema, createAgentSchema, updateAgentSchema } from "./index.js";
|
||||
|
||||
describe("dynamic adapter type validation schemas", () => {
|
||||
it("accepts external adapter types in create/update agent schemas", () => {
|
||||
expect(
|
||||
createAgentSchema.parse({
|
||||
name: "External Agent",
|
||||
adapterType: "external_adapter",
|
||||
}).adapterType,
|
||||
).toBe("external_adapter");
|
||||
|
||||
expect(
|
||||
updateAgentSchema.parse({
|
||||
adapterType: "external_adapter",
|
||||
}).adapterType,
|
||||
).toBe("external_adapter");
|
||||
});
|
||||
|
||||
it("still rejects blank adapter types", () => {
|
||||
expect(() =>
|
||||
createAgentSchema.parse({
|
||||
name: "Blank Adapter",
|
||||
adapterType: " ",
|
||||
}),
|
||||
).toThrow();
|
||||
});
|
||||
|
||||
it("accepts external adapter types in invite acceptance schema", () => {
|
||||
expect(
|
||||
acceptInviteSchema.parse({
|
||||
requestType: "agent",
|
||||
agentName: "External Joiner",
|
||||
adapterType: "external_adapter",
|
||||
}).adapterType,
|
||||
).toBe("external_adapter");
|
||||
});
|
||||
});
|
||||
@@ -31,9 +31,8 @@ export const AGENT_ADAPTER_TYPES = [
|
||||
"pi_local",
|
||||
"cursor",
|
||||
"openclaw_gateway",
|
||||
"hermes_local",
|
||||
] as const;
|
||||
export type AgentAdapterType = (typeof AGENT_ADAPTER_TYPES)[number];
|
||||
export type AgentAdapterType = (typeof AGENT_ADAPTER_TYPES)[number] | (string & {});
|
||||
|
||||
export const AGENT_ROLES = [
|
||||
"ceo",
|
||||
@@ -136,6 +135,9 @@ export type IssuePriority = (typeof ISSUE_PRIORITIES)[number];
|
||||
export const ISSUE_ORIGIN_KINDS = ["manual", "routine_execution"] as const;
|
||||
export type IssueOriginKind = (typeof ISSUE_ORIGIN_KINDS)[number];
|
||||
|
||||
export const ISSUE_RELATION_TYPES = ["blocks"] as const;
|
||||
export type IssueRelationType = (typeof ISSUE_RELATION_TYPES)[number];
|
||||
|
||||
export const GOAL_LEVELS = ["company", "team", "agent", "task"] as const;
|
||||
export type GoalLevel = (typeof GOAL_LEVELS)[number];
|
||||
|
||||
@@ -198,7 +200,12 @@ export const PROJECT_COLORS = [
|
||||
"#3b82f6", // blue
|
||||
] as const;
|
||||
|
||||
export const APPROVAL_TYPES = ["hire_agent", "approve_ceo_strategy", "budget_override_required"] as const;
|
||||
export const APPROVAL_TYPES = [
|
||||
"hire_agent",
|
||||
"approve_ceo_strategy",
|
||||
"budget_override_required",
|
||||
"request_board_approval",
|
||||
] as const;
|
||||
export type ApprovalType = (typeof APPROVAL_TYPES)[number];
|
||||
|
||||
export const APPROVAL_STATUSES = [
|
||||
|
||||
19
packages/shared/src/execution-workspace-guards.ts
Normal file
19
packages/shared/src/execution-workspace-guards.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { ExecutionWorkspace } from "./types/workspace-runtime.js";
|
||||
|
||||
type ExecutionWorkspaceGuardTarget = Pick<ExecutionWorkspace, "closedAt" | "mode" | "name" | "status">;
|
||||
|
||||
const CLOSED_EXECUTION_WORKSPACE_STATUSES = new Set<ExecutionWorkspace["status"]>(["archived", "cleanup_failed"]);
|
||||
|
||||
export function isClosedIsolatedExecutionWorkspace(
|
||||
workspace: Pick<ExecutionWorkspaceGuardTarget, "closedAt" | "mode" | "status"> | null | undefined,
|
||||
): boolean {
|
||||
if (!workspace) return false;
|
||||
if (workspace.mode !== "isolated_workspace") return false;
|
||||
return workspace.closedAt != null || CLOSED_EXECUTION_WORKSPACE_STATUSES.has(workspace.status);
|
||||
}
|
||||
|
||||
export function getClosedIsolatedExecutionWorkspaceMessage(
|
||||
workspace: Pick<ExecutionWorkspaceGuardTarget, "name">,
|
||||
): string {
|
||||
return `This issue is linked to the closed workspace "${workspace.name}". Move it to an open workspace before adding comments or resuming work.`;
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
export { agentAdapterTypeSchema, optionalAgentAdapterTypeSchema } from "./adapter-type.js";
|
||||
export {
|
||||
COMPANY_STATUSES,
|
||||
DEPLOYMENT_MODES,
|
||||
@@ -13,6 +14,7 @@ export {
|
||||
INBOX_MINE_ISSUE_STATUS_FILTER,
|
||||
ISSUE_PRIORITIES,
|
||||
ISSUE_ORIGIN_KINDS,
|
||||
ISSUE_RELATION_TYPES,
|
||||
GOAL_LEVELS,
|
||||
GOAL_STATUSES,
|
||||
PROJECT_STATUSES,
|
||||
@@ -81,6 +83,7 @@ export {
|
||||
type IssueStatus,
|
||||
type IssuePriority,
|
||||
type IssueOriginKind,
|
||||
type IssueRelationType,
|
||||
type GoalLevel,
|
||||
type GoalStatus,
|
||||
type ProjectStatus,
|
||||
@@ -228,6 +231,8 @@ export type {
|
||||
IssueWorkProductReviewState,
|
||||
Issue,
|
||||
IssueAssigneeAdapterOverrides,
|
||||
IssueRelation,
|
||||
IssueRelationIssueSummary,
|
||||
IssueComment,
|
||||
IssueDocument,
|
||||
IssueDocumentSummary,
|
||||
@@ -350,6 +355,11 @@ export {
|
||||
DEFAULT_FEEDBACK_DATA_SHARING_TERMS_VERSION,
|
||||
} from "./types/feedback.js";
|
||||
|
||||
export {
|
||||
getClosedIsolatedExecutionWorkspaceMessage,
|
||||
isClosedIsolatedExecutionWorkspace,
|
||||
} from "./execution-workspace-guards.js";
|
||||
|
||||
export {
|
||||
instanceGeneralSettingsSchema,
|
||||
patchInstanceGeneralSettingsSchema,
|
||||
@@ -594,14 +604,19 @@ export { deriveProjectUrlKey, normalizeProjectUrlKey, hasNonAsciiContent } from
|
||||
export {
|
||||
AGENT_MENTION_SCHEME,
|
||||
PROJECT_MENTION_SCHEME,
|
||||
SKILL_MENTION_SCHEME,
|
||||
buildAgentMentionHref,
|
||||
buildProjectMentionHref,
|
||||
buildSkillMentionHref,
|
||||
extractAgentMentionIds,
|
||||
extractSkillMentionIds,
|
||||
parseAgentMentionHref,
|
||||
parseProjectMentionHref,
|
||||
parseSkillMentionHref,
|
||||
extractProjectMentionIds,
|
||||
type ParsedAgentMention,
|
||||
type ParsedProjectMention,
|
||||
type ParsedSkillMention,
|
||||
} from "./project-mentions.js";
|
||||
|
||||
export {
|
||||
|
||||
@@ -2,10 +2,13 @@ import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
buildAgentMentionHref,
|
||||
buildProjectMentionHref,
|
||||
buildSkillMentionHref,
|
||||
extractAgentMentionIds,
|
||||
extractProjectMentionIds,
|
||||
extractSkillMentionIds,
|
||||
parseAgentMentionHref,
|
||||
parseProjectMentionHref,
|
||||
parseSkillMentionHref,
|
||||
} from "./project-mentions.js";
|
||||
|
||||
describe("project-mentions", () => {
|
||||
@@ -26,4 +29,13 @@ describe("project-mentions", () => {
|
||||
});
|
||||
expect(extractAgentMentionIds(`[@CodexCoder](${href})`)).toEqual(["agent-123"]);
|
||||
});
|
||||
|
||||
it("round-trips skill mentions with slug metadata", () => {
|
||||
const href = buildSkillMentionHref("skill-123", "release-changelog");
|
||||
expect(parseSkillMentionHref(href)).toEqual({
|
||||
skillId: "skill-123",
|
||||
slug: "release-changelog",
|
||||
});
|
||||
expect(extractSkillMentionIds(`[/release-changelog](${href})`)).toEqual(["skill-123"]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
export const PROJECT_MENTION_SCHEME = "project://";
|
||||
export const AGENT_MENTION_SCHEME = "agent://";
|
||||
export const SKILL_MENTION_SCHEME = "skill://";
|
||||
|
||||
const HEX_COLOR_RE = /^[0-9a-f]{6}$/i;
|
||||
const HEX_COLOR_SHORT_RE = /^[0-9a-f]{3}$/i;
|
||||
@@ -7,7 +8,9 @@ const HEX_COLOR_WITH_HASH_RE = /^#[0-9a-f]{6}$/i;
|
||||
const HEX_COLOR_SHORT_WITH_HASH_RE = /^#[0-9a-f]{3}$/i;
|
||||
const PROJECT_MENTION_LINK_RE = /\[[^\]]*]\((project:\/\/[^)\s]+)\)/gi;
|
||||
const AGENT_MENTION_LINK_RE = /\[[^\]]*]\((agent:\/\/[^)\s]+)\)/gi;
|
||||
const SKILL_MENTION_LINK_RE = /\[[^\]]*]\((skill:\/\/[^)\s]+)\)/gi;
|
||||
const AGENT_ICON_NAME_RE = /^[a-z0-9-]+$/i;
|
||||
const SKILL_SLUG_RE = /^[a-z0-9][a-z0-9-]*$/i;
|
||||
|
||||
export interface ParsedProjectMention {
|
||||
projectId: string;
|
||||
@@ -19,6 +22,11 @@ export interface ParsedAgentMention {
|
||||
icon: string | null;
|
||||
}
|
||||
|
||||
export interface ParsedSkillMention {
|
||||
skillId: string;
|
||||
slug: string | null;
|
||||
}
|
||||
|
||||
function normalizeHexColor(input: string | null | undefined): string | null {
|
||||
if (!input) return null;
|
||||
const trimmed = input.trim();
|
||||
@@ -103,6 +111,36 @@ export function parseAgentMentionHref(href: string): ParsedAgentMention | null {
|
||||
};
|
||||
}
|
||||
|
||||
export function buildSkillMentionHref(skillId: string, slug?: string | null): string {
|
||||
const trimmedSkillId = skillId.trim();
|
||||
const normalizedSlug = normalizeSkillSlug(slug ?? null);
|
||||
if (!normalizedSlug) {
|
||||
return `${SKILL_MENTION_SCHEME}${trimmedSkillId}`;
|
||||
}
|
||||
return `${SKILL_MENTION_SCHEME}${trimmedSkillId}?s=${encodeURIComponent(normalizedSlug)}`;
|
||||
}
|
||||
|
||||
export function parseSkillMentionHref(href: string): ParsedSkillMention | null {
|
||||
if (!href.startsWith(SKILL_MENTION_SCHEME)) return null;
|
||||
|
||||
let url: URL;
|
||||
try {
|
||||
url = new URL(href);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (url.protocol !== "skill:") return null;
|
||||
|
||||
const skillId = `${url.hostname}${url.pathname}`.replace(/^\/+/, "").trim();
|
||||
if (!skillId) return null;
|
||||
|
||||
return {
|
||||
skillId,
|
||||
slug: normalizeSkillSlug(url.searchParams.get("s") ?? url.searchParams.get("slug")),
|
||||
};
|
||||
}
|
||||
|
||||
export function extractProjectMentionIds(markdown: string): string[] {
|
||||
if (!markdown) return [];
|
||||
const ids = new Set<string>();
|
||||
@@ -127,9 +165,28 @@ export function extractAgentMentionIds(markdown: string): string[] {
|
||||
return [...ids];
|
||||
}
|
||||
|
||||
export function extractSkillMentionIds(markdown: string): string[] {
|
||||
if (!markdown) return [];
|
||||
const ids = new Set<string>();
|
||||
const re = new RegExp(SKILL_MENTION_LINK_RE);
|
||||
let match: RegExpExecArray | null;
|
||||
while ((match = re.exec(markdown)) !== null) {
|
||||
const parsed = parseSkillMentionHref(match[1]);
|
||||
if (parsed) ids.add(parsed.skillId);
|
||||
}
|
||||
return [...ids];
|
||||
}
|
||||
|
||||
function normalizeAgentIcon(input: string | null | undefined): string | null {
|
||||
if (!input) return null;
|
||||
const trimmed = input.trim().toLowerCase();
|
||||
if (!trimmed || !AGENT_ICON_NAME_RE.test(trimmed)) return null;
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function normalizeSkillSlug(input: string | null | undefined): string | null {
|
||||
if (!input) return null;
|
||||
const trimmed = input.trim().toLowerCase();
|
||||
if (!trimmed || !SKILL_SLUG_RE.test(trimmed)) return null;
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ export class TelemetryClient {
|
||||
app,
|
||||
schemaVersion,
|
||||
installId: state.installId,
|
||||
version: this.version,
|
||||
events,
|
||||
}),
|
||||
signal: controller.signal,
|
||||
|
||||
@@ -23,6 +23,48 @@ export function trackCompanyImported(
|
||||
});
|
||||
}
|
||||
|
||||
export function trackProjectCreated(client: TelemetryClient): void {
|
||||
client.track("project.created");
|
||||
}
|
||||
|
||||
export function trackRoutineCreated(client: TelemetryClient): void {
|
||||
client.track("routine.created");
|
||||
}
|
||||
|
||||
export function trackRoutineRun(
|
||||
client: TelemetryClient,
|
||||
dims: { source: string; status: string },
|
||||
): void {
|
||||
client.track("routine.run", {
|
||||
source: dims.source,
|
||||
status: dims.status,
|
||||
});
|
||||
}
|
||||
|
||||
export function trackGoalCreated(
|
||||
client: TelemetryClient,
|
||||
dims?: { goalLevel?: string | null },
|
||||
): void {
|
||||
client.track("goal.created", dims?.goalLevel ? { goal_level: dims.goalLevel } : undefined);
|
||||
}
|
||||
|
||||
export function trackAgentCreated(
|
||||
client: TelemetryClient,
|
||||
dims: { agentRole: string },
|
||||
): void {
|
||||
client.track("agent.created", { agent_role: dims.agentRole });
|
||||
}
|
||||
|
||||
export function trackSkillImported(
|
||||
client: TelemetryClient,
|
||||
dims: { sourceType: string; skillRef?: string | null },
|
||||
): void {
|
||||
client.track("skill.imported", {
|
||||
source_type: dims.sourceType,
|
||||
...(dims.skillRef ? { skill_ref: dims.skillRef } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
export function trackAgentFirstHeartbeat(
|
||||
client: TelemetryClient,
|
||||
dims: { agentRole: string },
|
||||
|
||||
@@ -5,6 +5,12 @@ export {
|
||||
trackInstallStarted,
|
||||
trackInstallCompleted,
|
||||
trackCompanyImported,
|
||||
trackProjectCreated,
|
||||
trackRoutineCreated,
|
||||
trackRoutineRun,
|
||||
trackGoalCreated,
|
||||
trackAgentCreated,
|
||||
trackSkillImported,
|
||||
trackAgentFirstHeartbeat,
|
||||
trackAgentTaskCompleted,
|
||||
trackErrorHandlerCrash,
|
||||
|
||||
@@ -24,6 +24,7 @@ export interface TelemetryEventEnvelope {
|
||||
app: string;
|
||||
schemaVersion: string;
|
||||
installId: string;
|
||||
version: string;
|
||||
events: TelemetryEvent[];
|
||||
}
|
||||
|
||||
@@ -31,6 +32,12 @@ export type TelemetryEventName =
|
||||
| "install.started"
|
||||
| "install.completed"
|
||||
| "company.imported"
|
||||
| "project.created"
|
||||
| "routine.created"
|
||||
| "routine.run"
|
||||
| "goal.created"
|
||||
| "agent.created"
|
||||
| "skill.imported"
|
||||
| "agent.first_heartbeat"
|
||||
| "agent.task_completed"
|
||||
| "error.handler_crash"
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import type { AgentEnvConfig } from "./secrets.js";
|
||||
import type { RoutineVariable } from "./routine.js";
|
||||
|
||||
export interface CompanyPortabilityInclude {
|
||||
company: boolean;
|
||||
agents: boolean;
|
||||
@@ -10,6 +13,7 @@ export interface CompanyPortabilityEnvInput {
|
||||
key: string;
|
||||
description: string | null;
|
||||
agentSlug: string | null;
|
||||
projectSlug: string | null;
|
||||
kind: "secret" | "plain";
|
||||
requirement: "required" | "optional";
|
||||
defaultValue: string | null;
|
||||
@@ -52,13 +56,12 @@ export interface CompanyPortabilityProjectManifestEntry {
|
||||
targetDate: string | null;
|
||||
color: string | null;
|
||||
status: string | null;
|
||||
env: AgentEnvConfig | null;
|
||||
executionWorkspacePolicy: Record<string, unknown> | null;
|
||||
workspaces: CompanyPortabilityProjectWorkspaceManifestEntry[];
|
||||
metadata: Record<string, unknown> | null;
|
||||
}
|
||||
|
||||
import type { RoutineVariable } from "./routine.js";
|
||||
|
||||
export interface CompanyPortabilityProjectWorkspaceManifestEntry {
|
||||
key: string;
|
||||
name: string;
|
||||
|
||||
@@ -96,6 +96,8 @@ export type {
|
||||
export type {
|
||||
Issue,
|
||||
IssueAssigneeAdapterOverrides,
|
||||
IssueRelation,
|
||||
IssueRelationIssueSummary,
|
||||
IssueComment,
|
||||
IssueDocument,
|
||||
IssueDocumentSummary,
|
||||
|
||||
@@ -96,6 +96,25 @@ export interface LegacyPlanDocument {
|
||||
source: "issue_description";
|
||||
}
|
||||
|
||||
export interface IssueRelationIssueSummary {
|
||||
id: string;
|
||||
identifier: string | null;
|
||||
title: string;
|
||||
status: IssueStatus;
|
||||
priority: IssuePriority;
|
||||
assigneeAgentId: string | null;
|
||||
assigneeUserId: string | null;
|
||||
}
|
||||
|
||||
export interface IssueRelation {
|
||||
id: string;
|
||||
companyId: string;
|
||||
issueId: string;
|
||||
relatedIssueId: string;
|
||||
type: "blocks";
|
||||
relatedIssue: IssueRelationIssueSummary;
|
||||
}
|
||||
|
||||
export interface Issue {
|
||||
id: string;
|
||||
companyId: string;
|
||||
@@ -133,6 +152,8 @@ export interface Issue {
|
||||
hiddenAt: Date | null;
|
||||
labelIds?: string[];
|
||||
labels?: IssueLabel[];
|
||||
blockedBy?: IssueRelationIssueSummary[];
|
||||
blocks?: IssueRelationIssueSummary[];
|
||||
planDocument?: IssueDocument | null;
|
||||
documentSummaries?: IssueDocumentSummary[];
|
||||
legacyPlanDocument?: LegacyPlanDocument | null;
|
||||
@@ -143,6 +164,7 @@ export interface Issue {
|
||||
mentionedProjects?: Project[];
|
||||
myLastTouchAt?: Date | null;
|
||||
lastExternalCommentAt?: Date | null;
|
||||
lastActivityAt?: Date | null;
|
||||
isUnreadForMe?: boolean;
|
||||
createdAt: Date;
|
||||
updatedAt: Date;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import type { PauseReason, ProjectStatus } from "../constants.js";
|
||||
import type { AgentEnvConfig } from "./secrets.js";
|
||||
import type {
|
||||
ProjectExecutionWorkspacePolicy,
|
||||
ProjectWorkspaceRuntimeConfig,
|
||||
@@ -65,6 +66,7 @@ export interface Project {
|
||||
leadAgentId: string | null;
|
||||
targetDate: string | null;
|
||||
color: string | null;
|
||||
env: AgentEnvConfig | null;
|
||||
pauseReason: PauseReason | null;
|
||||
pausedAt: Date | null;
|
||||
executionWorkspacePolicy: ProjectExecutionWorkspacePolicy | null;
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { z } from "zod";
|
||||
import {
|
||||
AGENT_ADAPTER_TYPES,
|
||||
INVITE_JOIN_TYPES,
|
||||
JOIN_REQUEST_STATUSES,
|
||||
JOIN_REQUEST_TYPES,
|
||||
PERMISSION_KEYS,
|
||||
} from "../constants.js";
|
||||
import { optionalAgentAdapterTypeSchema } from "../adapter-type.js";
|
||||
|
||||
export const createCompanyInviteSchema = z.object({
|
||||
allowedJoinTypes: z.enum(INVITE_JOIN_TYPES).default("both"),
|
||||
@@ -26,7 +26,7 @@ export type CreateOpenClawInvitePrompt = z.infer<
|
||||
export const acceptInviteSchema = z.object({
|
||||
requestType: z.enum(JOIN_REQUEST_TYPES),
|
||||
agentName: z.string().min(1).max(120).optional(),
|
||||
adapterType: z.enum(AGENT_ADAPTER_TYPES).optional(),
|
||||
adapterType: optionalAgentAdapterTypeSchema,
|
||||
capabilities: z.string().max(4000).optional().nullable(),
|
||||
agentDefaultsPayload: z.record(z.string(), z.unknown()).optional().nullable(),
|
||||
// OpenClaw join compatibility fields accepted at top level.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { z } from "zod";
|
||||
import {
|
||||
AGENT_ADAPTER_TYPES,
|
||||
AGENT_ICON_NAMES,
|
||||
AGENT_ROLES,
|
||||
AGENT_STATUSES,
|
||||
INBOX_MINE_ISSUE_STATUS_FILTER,
|
||||
} from "../constants.js";
|
||||
import { agentAdapterTypeSchema } from "../adapter-type.js";
|
||||
import { envConfigSchema } from "./secret.js";
|
||||
|
||||
export const agentPermissionsSchema = z.object({
|
||||
@@ -52,7 +52,7 @@ export const createAgentSchema = z.object({
|
||||
reportsTo: z.string().uuid().optional().nullable(),
|
||||
capabilities: z.string().optional().nullable(),
|
||||
desiredSkills: z.array(z.string().min(1)).optional(),
|
||||
adapterType: z.enum(AGENT_ADAPTER_TYPES).optional().default("process"),
|
||||
adapterType: agentAdapterTypeSchema,
|
||||
adapterConfig: adapterConfigSchema.optional().default({}),
|
||||
runtimeConfig: z.record(z.unknown()).optional().default({}),
|
||||
budgetMonthlyCents: z.number().int().nonnegative().optional().default(0),
|
||||
|
||||
@@ -15,6 +15,7 @@ export const portabilityEnvInputSchema = z.object({
|
||||
key: z.string().min(1),
|
||||
description: z.string().nullable(),
|
||||
agentSlug: z.string().min(1).nullable(),
|
||||
projectSlug: z.string().min(1).nullable(),
|
||||
kind: z.enum(["secret", "plain"]),
|
||||
requirement: z.enum(["required", "optional"]),
|
||||
defaultValue: z.string().nullable(),
|
||||
|
||||
@@ -41,6 +41,7 @@ export const createIssueSchema = z.object({
|
||||
projectWorkspaceId: z.string().uuid().optional().nullable(),
|
||||
goalId: z.string().uuid().optional().nullable(),
|
||||
parentId: z.string().uuid().optional().nullable(),
|
||||
blockedByIssueIds: z.array(z.string().uuid()).optional(),
|
||||
inheritExecutionWorkspaceFromIssueId: z.string().uuid().optional().nullable(),
|
||||
title: z.string().min(1),
|
||||
description: z.string().optional().nullable(),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { z } from "zod";
|
||||
import { PROJECT_STATUSES } from "../constants.js";
|
||||
import { envConfigSchema } from "./secret.js";
|
||||
|
||||
const executionWorkspaceStrategySchema = z
|
||||
.object({
|
||||
@@ -102,6 +103,7 @@ const projectFields = {
|
||||
leadAgentId: z.string().uuid().optional().nullable(),
|
||||
targetDate: z.string().optional().nullable(),
|
||||
color: z.string().optional().nullable(),
|
||||
env: envConfigSchema.optional().nullable(),
|
||||
executionWorkspacePolicy: projectExecutionWorkspacePolicySchema.optional().nullable(),
|
||||
archivedAt: z.string().datetime().optional().nullable(),
|
||||
};
|
||||
|
||||
68
releases/v2026.403.0.md
Normal file
68
releases/v2026.403.0.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# v2026.403.0
|
||||
|
||||
> Released: 2026-04-03
|
||||
|
||||
## Highlights
|
||||
|
||||
- **Inbox overhaul** — New "Mine" inbox tab with mail-client keyboard shortcuts (j/k navigation, a/y archive, o open), swipe-to-archive, "Mark all as read" button, operator search with keyboard controls, and a "Today" divider. Read/dismissed state now extends to all inbox item types. ([#2072](https://github.com/paperclipai/paperclip/pull/2072), [#2540](https://github.com/paperclipai/paperclip/pull/2540))
|
||||
- **Feedback and evals** — Thumbs-up/down feedback capture flow with voting UI, feedback modal styling, and run link placement in the feedback row. ([#2529](https://github.com/paperclipai/paperclip/pull/2529))
|
||||
- **Document revisions** — Issue document revision history with a restore flow, replay-safe migrations, and revision tracking API. ([#2317](https://github.com/paperclipai/paperclip/pull/2317))
|
||||
- **Telemetry** — Anonymized App-side telemetry. Disable with `DO_NOT_TRACK=1` or `PAPERCLIP_TELEMETRY_DISABLED=1` ([#2527](https://github.com/paperclipai/paperclip/pull/2527))
|
||||
- **Execution workspaces (EXPERIMENTAL)** — Full workspace lifecycle management for agent runs: workspace-aware routine runs, execution workspace detail pages with linked issues, runtime controls (start/stop), close readiness checks, and follow-up issue workspace inheritance. Project workspaces get their own detail pages and a dedicated tab on the project view. ([#2074](https://github.com/paperclipai/paperclip/pull/2074), [#2203](https://github.com/paperclipai/paperclip/pull/2203))
|
||||
|
||||
## Improvements
|
||||
|
||||
- **Comment interrupts** — New interrupt support for issue comments with queued comment thread UX.
|
||||
- **Docker improvements** — Improved base image organization, host UID/GID mapping for volume mounts, and Docker file structure. ([#2407](https://github.com/paperclipai/paperclip/pull/2407), [#1923](https://github.com/paperclipai/paperclip/pull/1923), @radiusred)
|
||||
- **Optimistic comments** — Comments render instantly with optimistic IDs while the server confirms; draft clearing is fixed for a smoother composing experience.
|
||||
- **GitHub Enterprise URL support** — Skill and company imports now accept GitHub Enterprise URLs with hardened GHE URL detection and shared GitHub helpers. ([#2449](https://github.com/paperclipai/paperclip/pull/2449), @statxc)
|
||||
- **Gemini local adapter** — Added `gemini_local` to the adapter types validation enum so Gemini agents no longer fail validation. ([#2430](https://github.com/paperclipai/paperclip/pull/2430), @bittoby)
|
||||
- **Routines skill** — New `paperclip-routines` skill with documentation moved into Paperclip references. Routine runs now support workspace awareness and variables. ([#2414](https://github.com/paperclipai/paperclip/pull/2414), @aronprins)
|
||||
- **GPT-5.4 and xhigh effort** — Added GPT-5.4 model fallback and xhigh effort options for OpenAI-based adapters. ([#112](https://github.com/paperclipai/paperclip/pull/112), @kevmok)
|
||||
- **Commit metrics** — New Paperclip commit metrics script with filtered exports and edge case handling.
|
||||
- **CLI onboarding** — Onboarding reruns now preserve existing config; exported tsx CLI entrypoint for cleaner startup. ([#2071](https://github.com/paperclipai/paperclip/pull/2071))
|
||||
- **Board delegation guide** — New documentation for board-operator delegation patterns. ([#1889](https://github.com/paperclipai/paperclip/pull/1889))
|
||||
- **Agent capabilities in org chart** — Agent capabilities field now renders on org chart cards. ([#2349](https://github.com/paperclipai/paperclip/pull/2349))
|
||||
- **PR template updates** — Added Model Used section to PR template; CONTRIBUTING.md now requires PR template, Greptile 5/5, and tests. ([#2552](https://github.com/paperclipai/paperclip/pull/2552), [#2618](https://github.com/paperclipai/paperclip/pull/2618))
|
||||
- **Hermes adapter upgrade** — Upgraded hermes-paperclip-adapter with UI adapter and skills support, plus detectModel improvements.
|
||||
- **Markdown editor monospace** — Agent instruction file editors now use monospace font. ([#2620](https://github.com/paperclipai/paperclip/pull/2620))
|
||||
- **Markdown link styling** — Links in markdown now render with underline and pointer cursor.
|
||||
- **@-mention autocomplete** — Mention autocomplete in project descriptions now renders via portal to prevent overflow clipping.
|
||||
- **Skipped wakeup messages** — Agent detail view now surfaces skipped wakeup messages for better observability.
|
||||
|
||||
## Fixes
|
||||
|
||||
- **Inbox ordering** — Self-touched issues no longer sink to the bottom of the inbox. ([#2144](https://github.com/paperclipai/paperclip/pull/2144))
|
||||
- **Env var type switching** — Switching an env var from Plain to Secret no longer loses the value; dropdown snap-back when switching is fixed. ([#2327](https://github.com/paperclipai/paperclip/pull/2327), @radiusred)
|
||||
- **Adapter type switching** — Adapter-agnostic keys are now preserved when changing adapter type.
|
||||
- **Project slug collisions** — Non-ASCII project names no longer produce duplicate slugs; a short UUID suffix is appended. ([#2328](https://github.com/paperclipai/paperclip/pull/2328), @bittoby)
|
||||
- **Codex RPC spawn error** — Fixed CodexRpcClient crash on ENOENT when spawning Codex. ([#2048](https://github.com/paperclipai/paperclip/pull/2048), @remdev)
|
||||
- **Heartbeat session reuse** — Fixed stale session reuse across heartbeat runs. ([#2065](https://github.com/paperclipai/paperclip/pull/2065), @edimuj)
|
||||
- **Vite HMR with reverse proxy** — Fixed WebSocket HMR connections behind reverse proxies and added StrictMode guard. ([#2171](https://github.com/paperclipai/paperclip/pull/2171))
|
||||
- **Copy button fallback** — Copy-to-clipboard now works in non-secure (HTTP) contexts. ([#2472](https://github.com/paperclipai/paperclip/pull/2472))
|
||||
- **Worktree default branch** — Worktree creation auto-detects the default branch when baseRef is not configured. ([#2463](https://github.com/paperclipai/paperclip/pull/2463))
|
||||
- **Session continuity** — Timer and heartbeat wakes now preserve session continuity.
|
||||
- **Worktree isolation** — Fixed worktree provision isolation, runtime recovery, and sibling port collisions.
|
||||
- **Cursor adapter auth** — Cursor adapter now checks native auth before warning about missing API key.
|
||||
- **Codex skill injection** — Fixed skill injection to use effective `$CODEX_HOME/skills/` instead of cwd.
|
||||
- **OpenCode config pollution** — Prevented `opencode.json` config pollution in workspace directories.
|
||||
- **Pi adapter** — Fixed Pi local adapter execution, transcript parsing, and model detection from stderr.
|
||||
- **x-forwarded-host origin check** — Board mutation origin check now includes x-forwarded-host header.
|
||||
- **Health DB probe** — Fixed database connectivity health check probe.
|
||||
- **Issue breadcrumb routing** — Hardened issue breadcrumb source routing.
|
||||
- **Instructions tab width** — Removed max-w-6xl constraint from instructions tab for full-width content. ([#2621](https://github.com/paperclipai/paperclip/pull/2621))
|
||||
- **Shell fallback on Windows** — Uses `sh` instead of `/bin/sh` as shell fallback on Windows. ([#891](https://github.com/paperclipai/paperclip/pull/891))
|
||||
- **Feedback migration** — Made feedback migration replay-safe after rebase.
|
||||
- **Issue detail polish** — Polished issue detail timelines and attachments display.
|
||||
|
||||
## Upgrade Guide
|
||||
|
||||
Four new database migrations (`0045`–`0048`) will run automatically on startup. These migrations add workspace lifecycle columns, routine variables, feedback tables, and document revision tracking. All migrations are additive — no existing data is modified.
|
||||
|
||||
If you use execution workspaces, note that follow-up issues now automatically inherit workspace linkage from their parent. For non-child follow-ups tied to the same workspace, set `inheritExecutionWorkspaceFromIssueId` explicitly when creating the issue.
|
||||
|
||||
## Contributors
|
||||
|
||||
Thank you to everyone who contributed to this release!
|
||||
|
||||
@aronprins, @bittoby, @cryppadotta, @edimuj, @HenkDz, @kevmok, @mvanhorn, @radiusred, @remdev, @statxc, @vanductai
|
||||
93
scripts/dev-runner-output.mjs
Normal file
93
scripts/dev-runner-output.mjs
Normal file
@@ -0,0 +1,93 @@
|
||||
const DEFAULT_CAPTURED_OUTPUT_BYTES = 256 * 1024;
|
||||
const DEFAULT_JSON_RESPONSE_BYTES = 64 * 1024;
|
||||
|
||||
function normalizeByteLimit(maxBytes) {
|
||||
return Math.max(1, Math.trunc(maxBytes));
|
||||
}
|
||||
|
||||
export function createCapturedOutputBuffer(maxBytes = DEFAULT_CAPTURED_OUTPUT_BYTES) {
|
||||
const limit = normalizeByteLimit(maxBytes);
|
||||
const chunks = [];
|
||||
let bufferedBytes = 0;
|
||||
let totalBytes = 0;
|
||||
let truncated = false;
|
||||
|
||||
return {
|
||||
append(chunk) {
|
||||
if (chunk === null || chunk === undefined) return;
|
||||
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
||||
if (buffer.length === 0) return;
|
||||
|
||||
chunks.push(buffer);
|
||||
bufferedBytes += buffer.length;
|
||||
totalBytes += buffer.length;
|
||||
|
||||
while (bufferedBytes > limit && chunks.length > 0) {
|
||||
const overflow = bufferedBytes - limit;
|
||||
const head = chunks[0];
|
||||
if (head.length <= overflow) {
|
||||
chunks.shift();
|
||||
bufferedBytes -= head.length;
|
||||
truncated = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
chunks[0] = head.subarray(overflow);
|
||||
bufferedBytes -= overflow;
|
||||
truncated = true;
|
||||
}
|
||||
},
|
||||
|
||||
finish() {
|
||||
const body = Buffer.concat(chunks).toString("utf8");
|
||||
if (!truncated) {
|
||||
return {
|
||||
text: body,
|
||||
truncated,
|
||||
totalBytes,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
text: `[output truncated to last ${limit} bytes; total ${totalBytes} bytes]\n${body}`,
|
||||
truncated,
|
||||
totalBytes,
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function parseJsonResponseWithLimit(response, maxBytes = DEFAULT_JSON_RESPONSE_BYTES) {
|
||||
const limit = normalizeByteLimit(maxBytes);
|
||||
const contentLength = Number.parseInt(response.headers.get("content-length") ?? "", 10);
|
||||
if (Number.isFinite(contentLength) && contentLength > limit) {
|
||||
throw new Error(`Response exceeds ${limit} bytes`);
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return JSON.parse("");
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let text = "";
|
||||
let totalBytes = 0;
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
totalBytes += value.byteLength;
|
||||
if (totalBytes > limit) {
|
||||
await reader.cancel("response too large");
|
||||
throw new Error(`Response exceeds ${limit} bytes`);
|
||||
}
|
||||
text += decoder.decode(value, { stream: true });
|
||||
}
|
||||
text += decoder.decode();
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
|
||||
return JSON.parse(text);
|
||||
}
|
||||
102
scripts/dev-runner-output.ts
Normal file
102
scripts/dev-runner-output.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
const DEFAULT_CAPTURED_OUTPUT_BYTES = 256 * 1024;
|
||||
const DEFAULT_JSON_RESPONSE_BYTES = 64 * 1024;
|
||||
|
||||
export type CapturedOutput = {
|
||||
text: string;
|
||||
truncated: boolean;
|
||||
totalBytes: number;
|
||||
};
|
||||
|
||||
function normalizeByteLimit(maxBytes: number) {
|
||||
return Math.max(1, Math.trunc(maxBytes));
|
||||
}
|
||||
|
||||
export function createCapturedOutputBuffer(maxBytes = DEFAULT_CAPTURED_OUTPUT_BYTES) {
|
||||
const limit = normalizeByteLimit(maxBytes);
|
||||
const chunks: Buffer[] = [];
|
||||
let bufferedBytes = 0;
|
||||
let totalBytes = 0;
|
||||
let truncated = false;
|
||||
|
||||
return {
|
||||
append(chunk: Buffer | string | null | undefined) {
|
||||
if (chunk === null || chunk === undefined) return;
|
||||
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
||||
if (buffer.length === 0) return;
|
||||
|
||||
chunks.push(buffer);
|
||||
bufferedBytes += buffer.length;
|
||||
totalBytes += buffer.length;
|
||||
|
||||
while (bufferedBytes > limit && chunks.length > 0) {
|
||||
const overflow = bufferedBytes - limit;
|
||||
const head = chunks[0]!;
|
||||
if (head.length <= overflow) {
|
||||
chunks.shift();
|
||||
bufferedBytes -= head.length;
|
||||
truncated = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
chunks[0] = head.subarray(overflow);
|
||||
bufferedBytes -= overflow;
|
||||
truncated = true;
|
||||
}
|
||||
},
|
||||
|
||||
finish(): CapturedOutput {
|
||||
const body = Buffer.concat(chunks).toString("utf8");
|
||||
if (!truncated) {
|
||||
return {
|
||||
text: body,
|
||||
truncated,
|
||||
totalBytes,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
text: `[output truncated to last ${limit} bytes; total ${totalBytes} bytes]\n${body}`,
|
||||
truncated,
|
||||
totalBytes,
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function parseJsonResponseWithLimit<T>(
|
||||
response: Response,
|
||||
maxBytes = DEFAULT_JSON_RESPONSE_BYTES,
|
||||
): Promise<T> {
|
||||
const limit = normalizeByteLimit(maxBytes);
|
||||
const contentLength = Number.parseInt(response.headers.get("content-length") ?? "", 10);
|
||||
if (Number.isFinite(contentLength) && contentLength > limit) {
|
||||
throw new Error(`Response exceeds ${limit} bytes`);
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error("Response has no body");
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let text = "";
|
||||
let totalBytes = 0;
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
totalBytes += value.byteLength;
|
||||
if (totalBytes > limit) {
|
||||
await reader.cancel("response too large");
|
||||
throw new Error(`Response exceeds ${limit} bytes`);
|
||||
}
|
||||
text += decoder.decode(value, { stream: true });
|
||||
}
|
||||
text += decoder.decode();
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
|
||||
return JSON.parse(text) as T;
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import path from "node:path";
|
||||
import { createInterface } from "node:readline/promises";
|
||||
import { stdin, stdout } from "node:process";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { createCapturedOutputBuffer, parseJsonResponseWithLimit } from "./dev-runner-output.mjs";
|
||||
import { shouldTrackDevServerPath } from "./dev-runner-paths.mjs";
|
||||
|
||||
const mode = process.argv[2] === "watch" ? "watch" : "dev";
|
||||
@@ -250,30 +251,33 @@ async function runPnpm(args, options = {}) {
|
||||
const spawned = spawn(pnpmBin, args, {
|
||||
stdio: options.stdio ?? ["ignore", "pipe", "pipe"],
|
||||
env: options.env ?? process.env,
|
||||
cwd: options.cwd,
|
||||
shell: process.platform === "win32",
|
||||
});
|
||||
|
||||
let stdoutBuffer = "";
|
||||
let stderrBuffer = "";
|
||||
const stdoutBuffer = createCapturedOutputBuffer();
|
||||
const stderrBuffer = createCapturedOutputBuffer();
|
||||
|
||||
if (spawned.stdout) {
|
||||
spawned.stdout.on("data", (chunk) => {
|
||||
stdoutBuffer += String(chunk);
|
||||
stdoutBuffer.append(chunk);
|
||||
});
|
||||
}
|
||||
if (spawned.stderr) {
|
||||
spawned.stderr.on("data", (chunk) => {
|
||||
stderrBuffer += String(chunk);
|
||||
stderrBuffer.append(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
spawned.on("error", reject);
|
||||
spawned.on("exit", (code, signal) => {
|
||||
const stdout = stdoutBuffer.finish();
|
||||
const stderr = stderrBuffer.finish();
|
||||
resolve({
|
||||
code: code ?? 0,
|
||||
signal,
|
||||
stdout: stdoutBuffer,
|
||||
stderr: stderrBuffer,
|
||||
stdout: stdout.text,
|
||||
stderr: stderr.text,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -426,7 +430,7 @@ async function getDevHealthPayload() {
|
||||
if (!response.ok) {
|
||||
throw new Error(`Health request failed (${response.status})`);
|
||||
}
|
||||
return await response.json();
|
||||
return await parseJsonResponseWithLimit(response);
|
||||
}
|
||||
|
||||
async function waitForChildExit() {
|
||||
|
||||
@@ -4,6 +4,7 @@ import { existsSync, mkdirSync, readdirSync, rmSync, statSync, writeFileSync } f
|
||||
import path from "node:path";
|
||||
import { createInterface } from "node:readline/promises";
|
||||
import { stdin, stdout } from "node:process";
|
||||
import { createCapturedOutputBuffer, parseJsonResponseWithLimit } from "./dev-runner-output.mjs";
|
||||
import { shouldTrackDevServerPath } from "./dev-runner-paths.mjs";
|
||||
import { createDevServiceIdentity, repoRoot } from "./dev-service-profile.ts";
|
||||
import {
|
||||
@@ -315,27 +316,29 @@ async function runPnpm(args: string[], options: {
|
||||
shell: process.platform === "win32",
|
||||
});
|
||||
|
||||
let stdoutBuffer = "";
|
||||
let stderrBuffer = "";
|
||||
const stdoutBuffer = createCapturedOutputBuffer();
|
||||
const stderrBuffer = createCapturedOutputBuffer();
|
||||
|
||||
if (spawned.stdout) {
|
||||
spawned.stdout.on("data", (chunk) => {
|
||||
stdoutBuffer += String(chunk);
|
||||
stdoutBuffer.append(chunk);
|
||||
});
|
||||
}
|
||||
if (spawned.stderr) {
|
||||
spawned.stderr.on("data", (chunk) => {
|
||||
stderrBuffer += String(chunk);
|
||||
stderrBuffer.append(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
spawned.on("error", reject);
|
||||
spawned.on("exit", (code, signal) => {
|
||||
const stdout = stdoutBuffer.finish();
|
||||
const stderr = stderrBuffer.finish();
|
||||
resolve({
|
||||
code: code ?? 0,
|
||||
signal,
|
||||
stdout: stdoutBuffer,
|
||||
stderr: stderrBuffer,
|
||||
stdout: stdout.text,
|
||||
stderr: stderr.text,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -484,7 +487,7 @@ async function getDevHealthPayload() {
|
||||
if (!response.ok) {
|
||||
throw new Error(`Health request failed (${response.status})`);
|
||||
}
|
||||
return await response.json();
|
||||
return await parseJsonResponseWithLimit<{ devServer?: { enabled?: boolean; autoRestartEnabled?: boolean; activeRunCount?: number } }>(response);
|
||||
}
|
||||
|
||||
async function waitForChildExit() {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env -S node --import tsx
|
||||
import { spawn } from "node:child_process";
|
||||
import fs from "node:fs/promises";
|
||||
import { existsSync, readdirSync, readFileSync, realpathSync } from "node:fs";
|
||||
import path from "node:path";
|
||||
import { repoRoot } from "./dev-service-profile.ts";
|
||||
|
||||
type WorkspaceLinkMismatch = {
|
||||
workspaceDir: string;
|
||||
packageName: string;
|
||||
expectedPath: string;
|
||||
actualPath: string | null;
|
||||
@@ -44,11 +45,11 @@ function discoverWorkspacePackagePaths(rootDir: string): Map<string, string> {
|
||||
|
||||
const workspacePackagePaths = discoverWorkspacePackagePaths(repoRoot);
|
||||
|
||||
function findServerWorkspaceLinkMismatches(): WorkspaceLinkMismatch[] {
|
||||
const serverPackageJson = readJsonFile(path.join(repoRoot, "server", "package.json"));
|
||||
function findWorkspaceLinkMismatches(workspaceDir: string): WorkspaceLinkMismatch[] {
|
||||
const packageJson = readJsonFile(path.join(repoRoot, workspaceDir, "package.json"));
|
||||
const dependencies = {
|
||||
...(serverPackageJson.dependencies as Record<string, unknown> | undefined),
|
||||
...(serverPackageJson.devDependencies as Record<string, unknown> | undefined),
|
||||
...(packageJson.dependencies as Record<string, unknown> | undefined),
|
||||
...(packageJson.devDependencies as Record<string, unknown> | undefined),
|
||||
};
|
||||
const mismatches: WorkspaceLinkMismatch[] = [];
|
||||
|
||||
@@ -58,11 +59,12 @@ function findServerWorkspaceLinkMismatches(): WorkspaceLinkMismatch[] {
|
||||
const expectedPath = workspacePackagePaths.get(packageName);
|
||||
if (!expectedPath) continue;
|
||||
|
||||
const linkPath = path.join(repoRoot, "server", "node_modules", ...packageName.split("/"));
|
||||
const linkPath = path.join(repoRoot, workspaceDir, "node_modules", ...packageName.split("/"));
|
||||
const actualPath = existsSync(linkPath) ? path.resolve(realpathSync(linkPath)) : null;
|
||||
if (actualPath === path.resolve(expectedPath)) continue;
|
||||
|
||||
mismatches.push({
|
||||
workspaceDir,
|
||||
packageName,
|
||||
expectedPath: path.resolve(expectedPath),
|
||||
actualPath,
|
||||
@@ -72,53 +74,32 @@ function findServerWorkspaceLinkMismatches(): WorkspaceLinkMismatch[] {
|
||||
return mismatches;
|
||||
}
|
||||
|
||||
function runCommand(command: string, args: string[], cwd: string) {
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
cwd,
|
||||
env: process.env,
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
child.on("error", reject);
|
||||
child.on("exit", (code, signal) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
reject(
|
||||
new Error(
|
||||
`${command} ${args.join(" ")} failed with ${signal ? `signal ${signal}` : `exit code ${code ?? "unknown"}`}`,
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function ensureServerWorkspaceLinksCurrent() {
|
||||
const mismatches = findServerWorkspaceLinkMismatches();
|
||||
async function ensureWorkspaceLinksCurrent(workspaceDir: string) {
|
||||
const mismatches = findWorkspaceLinkMismatches(workspaceDir);
|
||||
if (mismatches.length === 0) return;
|
||||
|
||||
console.log("[paperclip] detected stale workspace package links for server; relinking dependencies...");
|
||||
console.log(`[paperclip] detected stale workspace package links for ${workspaceDir}; relinking dependencies...`);
|
||||
for (const mismatch of mismatches) {
|
||||
console.log(
|
||||
`[paperclip] ${mismatch.packageName}: ${mismatch.actualPath ?? "missing"} -> ${mismatch.expectedPath}`,
|
||||
);
|
||||
}
|
||||
|
||||
const pnpmBin = process.platform === "win32" ? "pnpm.cmd" : "pnpm";
|
||||
await runCommand(
|
||||
pnpmBin,
|
||||
["install", "--force", "--config.confirmModulesPurge=false"],
|
||||
repoRoot,
|
||||
);
|
||||
for (const mismatch of mismatches) {
|
||||
const linkPath = path.join(repoRoot, mismatch.workspaceDir, "node_modules", ...mismatch.packageName.split("/"));
|
||||
await fs.mkdir(path.dirname(linkPath), { recursive: true });
|
||||
await fs.rm(linkPath, { recursive: true, force: true });
|
||||
await fs.symlink(mismatch.expectedPath, linkPath);
|
||||
}
|
||||
|
||||
const remainingMismatches = findServerWorkspaceLinkMismatches();
|
||||
const remainingMismatches = findWorkspaceLinkMismatches(workspaceDir);
|
||||
if (remainingMismatches.length === 0) return;
|
||||
|
||||
throw new Error(
|
||||
`Workspace relink did not repair all server package links: ${remainingMismatches.map((item) => item.packageName).join(", ")}`,
|
||||
`Workspace relink did not repair all ${workspaceDir} package links: ${remainingMismatches.map((item) => item.packageName).join(", ")}`,
|
||||
);
|
||||
}
|
||||
|
||||
await ensureServerWorkspaceLinksCurrent();
|
||||
for (const workspaceDir of ["server", "ui"]) {
|
||||
await ensureWorkspaceLinksCurrent(workspaceDir);
|
||||
}
|
||||
|
||||
@@ -335,6 +335,82 @@ disable_seeded_routines() {
|
||||
|
||||
disable_seeded_routines
|
||||
|
||||
list_base_node_modules_paths() {
|
||||
cd "$base_cwd" &&
|
||||
find . \
|
||||
-mindepth 1 \
|
||||
-maxdepth 4 \
|
||||
-type d \
|
||||
-name node_modules \
|
||||
! -path './.git/*' \
|
||||
! -path './.paperclip/*' \
|
||||
| sed 's#^\./##'
|
||||
}
|
||||
|
||||
if [[ -f "$worktree_cwd/package.json" && -f "$worktree_cwd/pnpm-lock.yaml" ]]; then
|
||||
needs_install=0
|
||||
|
||||
while IFS= read -r relative_path; do
|
||||
[[ -n "$relative_path" ]] || continue
|
||||
target_path="$worktree_cwd/$relative_path"
|
||||
|
||||
if [[ -L "$target_path" || ! -e "$target_path" ]]; then
|
||||
needs_install=1
|
||||
break
|
||||
fi
|
||||
done < <(list_base_node_modules_paths)
|
||||
|
||||
if [[ "$needs_install" -eq 1 ]]; then
|
||||
backup_suffix=".paperclip-backup-${BASHPID:-$$}"
|
||||
moved_symlink_paths=()
|
||||
|
||||
while IFS= read -r relative_path; do
|
||||
[[ -n "$relative_path" ]] || continue
|
||||
target_path="$worktree_cwd/$relative_path"
|
||||
if [[ -L "$target_path" ]]; then
|
||||
backup_path="${target_path}${backup_suffix}"
|
||||
rm -rf "$backup_path"
|
||||
mv "$target_path" "$backup_path"
|
||||
moved_symlink_paths+=("$relative_path")
|
||||
fi
|
||||
done < <(list_base_node_modules_paths)
|
||||
|
||||
restore_moved_symlinks() {
|
||||
local relative_path target_path backup_path
|
||||
[[ ${#moved_symlink_paths[@]} -gt 0 ]] || return 0
|
||||
for relative_path in "${moved_symlink_paths[@]}"; do
|
||||
target_path="$worktree_cwd/$relative_path"
|
||||
backup_path="${target_path}${backup_suffix}"
|
||||
[[ -L "$backup_path" ]] || continue
|
||||
rm -rf "$target_path"
|
||||
mv "$backup_path" "$target_path"
|
||||
done
|
||||
}
|
||||
|
||||
cleanup_moved_symlinks() {
|
||||
local relative_path target_path backup_path
|
||||
[[ ${#moved_symlink_paths[@]} -gt 0 ]] || return 0
|
||||
for relative_path in "${moved_symlink_paths[@]}"; do
|
||||
target_path="$worktree_cwd/$relative_path"
|
||||
backup_path="${target_path}${backup_suffix}"
|
||||
[[ -L "$backup_path" ]] && rm "$backup_path"
|
||||
done
|
||||
}
|
||||
|
||||
(
|
||||
cd "$worktree_cwd"
|
||||
pnpm install --frozen-lockfile
|
||||
) || {
|
||||
restore_moved_symlinks
|
||||
exit 1
|
||||
}
|
||||
|
||||
cleanup_moved_symlinks
|
||||
fi
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
while IFS= read -r relative_path; do
|
||||
[[ -n "$relative_path" ]] || continue
|
||||
source_path="$base_cwd/$relative_path"
|
||||
@@ -346,13 +422,5 @@ while IFS= read -r relative_path; do
|
||||
mkdir -p "$(dirname "$target_path")"
|
||||
ln -s "$source_path" "$target_path"
|
||||
done < <(
|
||||
cd "$base_cwd" &&
|
||||
find . \
|
||||
-mindepth 1 \
|
||||
-maxdepth 3 \
|
||||
-type d \
|
||||
-name node_modules \
|
||||
! -path './.git/*' \
|
||||
! -path './.paperclip/*' \
|
||||
| sed 's#^\./##'
|
||||
list_base_node_modules_paths
|
||||
)
|
||||
|
||||
92
scripts/screenshot.cjs
Normal file
92
scripts/screenshot.cjs
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Screenshot utility for Paperclip UI.
|
||||
*
|
||||
* Reads the board token from ~/.paperclip/auth.json and injects it as a
|
||||
* Bearer header so Playwright can access authenticated pages.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/screenshot.cjs <url-or-path> [output.png] [--width 1280] [--height 800] [--wait 2000]
|
||||
*
|
||||
* Examples:
|
||||
* node scripts/screenshot.cjs /PAPA/agents/cto/instructions /tmp/shot.png
|
||||
* node scripts/screenshot.cjs http://localhost:5173/PAPA/agents/cto/instructions
|
||||
*/
|
||||
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const os = require("os");
|
||||
|
||||
// --- CLI args -----------------------------------------------------------
|
||||
const args = process.argv.slice(2);
|
||||
function flag(name, fallback) {
|
||||
const i = args.indexOf(`--${name}`);
|
||||
if (i === -1) return fallback;
|
||||
const val = args.splice(i, 2)[1];
|
||||
return Number.isNaN(Number(val)) ? fallback : Number(val);
|
||||
}
|
||||
const width = flag("width", 1280);
|
||||
const height = flag("height", 800);
|
||||
const waitMs = flag("wait", 2000);
|
||||
|
||||
const rawUrl = args[0];
|
||||
const outPath = args[1] || "/tmp/paperclip-screenshot.png";
|
||||
|
||||
if (!rawUrl) {
|
||||
console.error("Usage: node scripts/screenshot.cjs <url-or-path> [output.png]");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// --- Auth ----------------------------------------------------------------
|
||||
function loadBoardToken() {
|
||||
const authPath = path.resolve(os.homedir(), ".paperclip/auth.json");
|
||||
try {
|
||||
const auth = JSON.parse(fs.readFileSync(authPath, "utf-8"));
|
||||
const creds = auth.credentials || {};
|
||||
const entry = Object.values(creds)[0];
|
||||
if (entry && entry.token && entry.apiBase) return { token: entry.token, apiBase: entry.apiBase };
|
||||
} catch (_) {
|
||||
// ignore
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const cred = loadBoardToken();
|
||||
if (!cred) {
|
||||
console.error("No board token found in ~/.paperclip/auth.json");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Resolve URL — if it starts with / treat as path relative to apiBase
|
||||
const url = rawUrl.startsWith("http") ? rawUrl : `${cred.apiBase}${rawUrl}`;
|
||||
|
||||
// Validate URL before launching browser
|
||||
const origin = new URL(url).origin;
|
||||
|
||||
// --- Screenshot ----------------------------------------------------------
|
||||
(async () => {
|
||||
const { chromium } = require("playwright");
|
||||
const browser = await chromium.launch();
|
||||
try {
|
||||
const context = await browser.newContext({
|
||||
viewport: { width, height },
|
||||
});
|
||||
|
||||
const page = await context.newPage();
|
||||
// Scope the auth header to the Paperclip origin only
|
||||
await page.route(`${origin}/**`, async (route) => {
|
||||
await route.continue({
|
||||
headers: { ...route.request().headers(), Authorization: `Bearer ${cred.token}` },
|
||||
});
|
||||
});
|
||||
await page.goto(url, { waitUntil: "networkidle", timeout: 20000 });
|
||||
await page.waitForTimeout(waitMs);
|
||||
await page.screenshot({ path: outPath, fullPage: false });
|
||||
console.log(`Saved: ${outPath}`);
|
||||
} catch (err) {
|
||||
console.error(`Screenshot failed: ${err.message}`);
|
||||
process.exitCode = 1;
|
||||
} finally {
|
||||
await browser.close();
|
||||
}
|
||||
})();
|
||||
143
server/src/__tests__/adapter-registry.test.ts
Normal file
143
server/src/__tests__/adapter-registry.test.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
import { describe, expect, it, beforeEach, afterEach, vi } from "vitest";
|
||||
import type { ServerAdapterModule } from "../adapters/index.js";
|
||||
import {
|
||||
detectAdapterModel,
|
||||
findActiveServerAdapter,
|
||||
findServerAdapter,
|
||||
listAdapterModels,
|
||||
registerServerAdapter,
|
||||
requireServerAdapter,
|
||||
unregisterServerAdapter,
|
||||
} from "../adapters/index.js";
|
||||
import { setOverridePaused } from "../adapters/registry.js";
|
||||
|
||||
const externalAdapter: ServerAdapterModule = {
|
||||
type: "external_test",
|
||||
execute: async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
}),
|
||||
testEnvironment: async () => ({
|
||||
adapterType: "external_test",
|
||||
status: "pass",
|
||||
checks: [],
|
||||
testedAt: new Date(0).toISOString(),
|
||||
}),
|
||||
models: [{ id: "external-model", label: "External Model" }],
|
||||
supportsLocalAgentJwt: false,
|
||||
};
|
||||
|
||||
describe("server adapter registry", () => {
|
||||
beforeEach(() => {
|
||||
unregisterServerAdapter("external_test");
|
||||
unregisterServerAdapter("claude_local");
|
||||
setOverridePaused("claude_local", false);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
unregisterServerAdapter("external_test");
|
||||
unregisterServerAdapter("claude_local");
|
||||
setOverridePaused("claude_local", false);
|
||||
});
|
||||
|
||||
it("registers external adapters and exposes them through lookup helpers", async () => {
|
||||
expect(findServerAdapter("external_test")).toBeNull();
|
||||
|
||||
registerServerAdapter(externalAdapter);
|
||||
|
||||
expect(requireServerAdapter("external_test")).toBe(externalAdapter);
|
||||
expect(await listAdapterModels("external_test")).toEqual([
|
||||
{ id: "external-model", label: "External Model" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("removes external adapters when unregistered", () => {
|
||||
registerServerAdapter(externalAdapter);
|
||||
|
||||
unregisterServerAdapter("external_test");
|
||||
|
||||
expect(findServerAdapter("external_test")).toBeNull();
|
||||
expect(() => requireServerAdapter("external_test")).toThrow(
|
||||
"Unknown adapter type: external_test",
|
||||
);
|
||||
});
|
||||
|
||||
it("allows external plugin to override a built-in adapter type", () => {
|
||||
// claude_local is always built-in
|
||||
const builtIn = findServerAdapter("claude_local");
|
||||
expect(builtIn).not.toBeNull();
|
||||
|
||||
const plugin: ServerAdapterModule = {
|
||||
type: "claude_local",
|
||||
execute: async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
}),
|
||||
testEnvironment: async () => ({
|
||||
adapterType: "claude_local",
|
||||
status: "pass",
|
||||
checks: [],
|
||||
testedAt: new Date(0).toISOString(),
|
||||
}),
|
||||
models: [{ id: "plugin-model", label: "Plugin Override" }],
|
||||
supportsLocalAgentJwt: false,
|
||||
};
|
||||
|
||||
registerServerAdapter(plugin);
|
||||
|
||||
// Plugin wins
|
||||
const resolved = requireServerAdapter("claude_local");
|
||||
expect(resolved).toBe(plugin);
|
||||
expect(resolved.models).toEqual([
|
||||
{ id: "plugin-model", label: "Plugin Override" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("switches active adapter behavior back to the builtin when an override is paused", async () => {
|
||||
const builtIn = findServerAdapter("claude_local");
|
||||
expect(builtIn).not.toBeNull();
|
||||
|
||||
const detectModel = vi.fn(async () => ({
|
||||
model: "plugin-model",
|
||||
provider: "plugin-provider",
|
||||
source: "plugin-source",
|
||||
}));
|
||||
const plugin: ServerAdapterModule = {
|
||||
type: "claude_local",
|
||||
execute: async () => ({
|
||||
exitCode: 0,
|
||||
signal: null,
|
||||
timedOut: false,
|
||||
}),
|
||||
testEnvironment: async () => ({
|
||||
adapterType: "claude_local",
|
||||
status: "pass",
|
||||
checks: [],
|
||||
testedAt: new Date(0).toISOString(),
|
||||
}),
|
||||
models: [{ id: "plugin-model", label: "Plugin Override" }],
|
||||
detectModel,
|
||||
supportsLocalAgentJwt: false,
|
||||
};
|
||||
|
||||
registerServerAdapter(plugin);
|
||||
|
||||
expect(findActiveServerAdapter("claude_local")).toBe(plugin);
|
||||
expect(await listAdapterModels("claude_local")).toEqual([
|
||||
{ id: "plugin-model", label: "Plugin Override" },
|
||||
]);
|
||||
expect(await detectAdapterModel("claude_local")).toMatchObject({
|
||||
model: "plugin-model",
|
||||
provider: "plugin-provider",
|
||||
});
|
||||
|
||||
expect(setOverridePaused("claude_local", true)).toBe(true);
|
||||
|
||||
expect(findActiveServerAdapter("claude_local")).not.toBe(plugin);
|
||||
expect(await listAdapterModels("claude_local")).toEqual(builtIn?.models ?? []);
|
||||
expect(await detectAdapterModel("claude_local")).toBeNull();
|
||||
expect(detectModel).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user