Compare commits

..

12 Commits

Author SHA1 Message Date
Dotta
83efe2a274 feat(costs): add billing, quota, and budget control plane 2026-03-14 22:06:02 -05:00
Sai Shankar
9973f86fb4 refactor(quota): move provider quota logic into adapter layer, add unit tests
- Extract all Anthropic credential/API logic into claude-local/src/server/quota.ts
- Extract all OpenAI/WHAM credential/API logic into codex-local/src/server/quota.ts
- Add optional getQuotaWindows() to ServerAdapterModule in adapter-utils
- Rewrite quota-windows.ts as a 29-line thin aggregator with zero provider knowledge
- Wire getQuotaWindows into adapter registry for claude-local and codex-local
- Add 47 unit tests covering toPercent, secondsToWindowLabel, WHAM normalization,
  readClaudeToken, readCodexToken, fetchClaudeQuota, fetchCodexQuota, fetchWithTimeout
- Add 8 unit tests covering parseDateRange validation and byProvider pro-rata math

Adding a third provider now requires only touching that provider's adapter.
2026-03-14 22:00:43 -05:00
Sai Shankar
9f8a75b5ee fix(costs): replace non-null map assertions with nullish coalescing, clarify weekData guard 2026-03-14 22:00:43 -05:00
Sai Shankar
565ceddf09 fix(costs): guard byProject against duplicate null keys, memoize ProviderQuotaCard row aggregations 2026-03-14 22:00:43 -05:00
Sai Shankar
fc3b6a80a6 fix(costs): align byAgent run filter to startedAt, tighten providerTabItems memo deps, stabilize byProject row keys 2026-03-14 22:00:43 -05:00
Sai Shankar
ca7912d8b4 feat(costs): add agent model breakdown, harden date validation, sync CostByProject type, fix quota threshold and tab-gated queries
- add byAgentModel endpoint and expandable per-agent model sub-rows in the spend tab
- validate date range inputs with isNaN + badRequest to return HTTP 400 on bad input
- move CostByProject from a local api/costs.ts definition into packages/shared types
- gate providerData query on mainTab === providers, consistent with weekData/windowData/quotaData
- fix byProject range filter from finishedAt to startedAt, consistent with byProvider runs query
- fix WHAM used_percent threshold from <= 1 to < 1 to avoid misclassifying 1% usage as 100%
- replace inline opacity style with tailwind bg-primary/85 class in ProviderQuotaCard
- reset expandedAgents set when company or date range changes
- sort agent model sub-rows by cost descending in ui memo
2026-03-14 22:00:43 -05:00
Sai Shankar
b5e5abaef7 fix(costs): harden company auth check, fix frozen date memo, hide empty quota rows
- add company existence check on quota-windows route to guard against
  sentinel and forged company IDs (was a no-op assertCompanyAccess)
- fix useDateRange minuteTick memo frozen at mount; realign interval to
  next calendar minute boundary via setTimeout + intervalRef pattern
- fix midnight timer in Costs.tsx to use stable [] dep and
  self-scheduling todayTimerRef to avoid StrictMode double-invoke
- return null for rolling window rows with no DB data instead of
  rendering $0.00 / 0 tok false zeros
- fix secondsToWindowLabel to handle windows >168h with actual day count
  instead of silently falling back to 7d
- fix byProvider.get(p) non-null assertion to use ?? [] fallback
2026-03-14 22:00:43 -05:00
Sai Shankar
43ac7d06a9 fix(costs): guard routes, fix DST ranges, sync provider state, wire live updates
- add companyAccess guard to costs route
- fix effectiveProvider/activeProvider desync via sync-back useEffect
- move ROLLING_WINDOWS to module level; replace IIFE with useMemo in ProviderQuotaCard
- add NO_COMPANY sentinel to eliminate non-null assertions before enabled guard
- fix DST-unsafe 7d/30d ranges in useDateRange (use Date constructor)
- remove providerData from providerTabItems memo deps (use byProvider)
- normalize used_percent 0-1 vs 0-100 ambiguity in quota-windows service
- rename secondsToWindowLabel index param to fallback; pass explicit labels
- add 4.33 magic number comment; fix quota window key collision
- remove rounded-md from date inputs (violates --radius: 0 theme)
- wire cost_event invalidation in LiveUpdatesProvider
2026-03-14 22:00:43 -05:00
Sai Shankar
cf8ad112be feat(costs): consolidate /usage into /costs with Spend + Providers tabs
merge Usage page into Costs as two tabs ('Spend' and 'Providers'),
extract shared date-range logic to useDateRange() hook, delete /usage
route and sidebar entry, fix quota-windows bugs from prior review
2026-03-14 22:00:43 -05:00
Sai Shankar
8757033f74 feat(usage): add subscription quota windows per provider on /usage page
reads local claude and codex auth files server-side, calls provider
quota apis (anthropic oauth usage, chatgpt wham/usage), and surfaces
live usedPercent per window in ProviderQuotaCard with threshold fill colors
2026-03-14 22:00:43 -05:00
Sai Shankar
2977697902 address greptile review: per-provider deficit notch, startedAt filter, weekRange refresh, deduplicate providerDisplayName 2026-03-14 22:00:22 -05:00
Sai Shankar
c53ab402d3 feat(ui): add resource and usage dashboard (/usage route)
adds a new /usage page that lets board operators see how much each ai
provider is consuming across any date window, with per-model breakdowns,
rolling 5h/24h/7d burn windows, weekly budget bars, and a deficit notch
when projected spend is on track to exceed the monthly budget.

- new GET /companies/:id/costs/by-provider endpoint aggregates cost events
  by provider + model with pro-rated billing type splits from heartbeat runs
- new GET /companies/:id/costs/window-spend endpoint returns rolling window
  spend (5h, 24h, 7d) per provider with no schema changes
- QuotaBar: reusable boxed-border progress bar with green/yellow/red
  threshold fill colors and optional deficit notch
- ProviderQuotaCard: per-provider card showing budget allocation bars,
  rolling windows, subscription usage, and model breakdown with token/cost
  share overlays
- Usage page: date preset toggles (mtd, 7d, 30d, ytd, all, custom),
  provider tabs, 30s polling plus ws invalidation on cost_event
- custom date range blocks queries until both dates are selected and
  treats boundaries as local-time (not utc midnight) so full days are
  included regardless of timezone
- query key to timestamp is floored to the nearest minute to prevent
  cache churn on every 30s refetch tick
2026-03-14 22:00:22 -05:00
216 changed files with 4527 additions and 67296 deletions

View File

@@ -1,201 +0,0 @@
---
name: doc-maintenance
description: >
Audit top-level documentation (README, SPEC, PRODUCT) against recent git
history to find drift — shipped features missing from docs or features
listed as upcoming that already landed. Proposes minimal edits, creates
a branch, and opens a PR. Use when asked to review docs for accuracy,
after major feature merges, or on a periodic schedule.
---
# Doc Maintenance Skill
Detect documentation drift and fix it via PR — no rewrites, no churn.
## When to Use
- Periodic doc review (e.g. weekly or after releases)
- After major feature merges
- When asked "are our docs up to date?"
- When asked to audit README / SPEC / PRODUCT accuracy
## Target Documents
| Document | Path | What matters |
|----------|------|-------------|
| README | `README.md` | Features table, roadmap, quickstart, "what is" accuracy, "works with" table |
| SPEC | `doc/SPEC.md` | No false "not supported" claims, major model/schema accuracy |
| PRODUCT | `doc/PRODUCT.md` | Core concepts, feature list, principles accuracy |
Out of scope: DEVELOPING.md, DATABASE.md, CLI.md, doc/plans/, skill files,
release notes. These are dev-facing or ephemeral — lower risk of user-facing
confusion.
## Workflow
### Step 1 — Detect what changed
Find the last review cursor:
```bash
# Read the last-reviewed commit SHA
CURSOR_FILE=".doc-review-cursor"
if [ -f "$CURSOR_FILE" ]; then
LAST_SHA=$(cat "$CURSOR_FILE" | head -1)
else
# First run: look back 60 days
LAST_SHA=$(git log --format="%H" --after="60 days ago" --reverse | head -1)
fi
```
Then gather commits since the cursor:
```bash
git log "$LAST_SHA"..HEAD --oneline --no-merges
```
### Step 2 — Classify changes
Scan commit messages and changed files. Categorize into:
- **Feature** — new capabilities (keywords: `feat`, `add`, `implement`, `support`)
- **Breaking** — removed/renamed things (keywords: `remove`, `breaking`, `drop`, `rename`)
- **Structural** — new directories, config changes, new adapters, new CLI commands
**Ignore:** refactors, test-only changes, CI config, dependency bumps, doc-only
changes, style/formatting commits. These don't affect doc accuracy.
For borderline cases, check the actual diff — a commit titled "refactor: X"
that adds a new public API is a feature.
### Step 3 — Build a change summary
Produce a concise list like:
```
Since last review (<sha>, <date>):
- FEATURE: Plugin system merged (runtime, SDK, CLI, slots, event bridge)
- FEATURE: Project archiving added
- BREAKING: Removed legacy webhook adapter
- STRUCTURAL: New .agents/skills/ directory convention
```
If there are no notable changes, skip to Step 7 (update cursor and exit).
### Step 4 — Audit each target doc
For each target document, read it fully and cross-reference against the change
summary. Check for:
1. **False negatives** — major shipped features not mentioned at all
2. **False positives** — features listed as "coming soon" / "roadmap" / "planned"
/ "not supported" / "TBD" that already shipped
3. **Quickstart accuracy** — install commands, prereqs, and startup instructions
still correct (README only)
4. **Feature table accuracy** — does the features section reflect current
capabilities? (README only)
5. **Works-with accuracy** — are supported adapters/integrations listed correctly?
Use `references/audit-checklist.md` as the structured checklist.
Use `references/section-map.md` to know where to look for each feature area.
### Step 5 — Create branch and apply minimal edits
```bash
# Create a branch for the doc updates
BRANCH="docs/maintenance-$(date +%Y%m%d)"
git checkout -b "$BRANCH"
```
Apply **only** the edits needed to fix drift. Rules:
- **Minimal patches only.** Fix inaccuracies, don't rewrite sections.
- **Preserve voice and style.** Match the existing tone of each document.
- **No cosmetic changes.** Don't fix typos, reformat tables, or reorganize
sections unless they're part of a factual fix.
- **No new sections.** If a feature needs a whole new section, note it in the
PR description as a follow-up — don't add it in a maintenance pass.
- **Roadmap items:** Move shipped features out of Roadmap. Add a brief mention
in the appropriate existing section if there isn't one already. Don't add
long descriptions.
### Step 6 — Open a PR
Commit the changes and open a PR:
```bash
git add README.md doc/SPEC.md doc/PRODUCT.md .doc-review-cursor
git commit -m "docs: update documentation for accuracy
- [list each fix briefly]
Co-Authored-By: Paperclip <noreply@paperclip.ing>"
git push -u origin "$BRANCH"
gh pr create \
--title "docs: periodic documentation accuracy update" \
--body "$(cat <<'EOF'
## Summary
Automated doc maintenance pass. Fixes documentation drift detected since
last review.
### Changes
- [list each fix]
### Change summary (since last review)
- [list notable code changes that triggered doc updates]
## Review notes
- Only factual accuracy fixes — no style/cosmetic changes
- Preserves existing voice and structure
- Larger doc additions (new sections, tutorials) noted as follow-ups
🤖 Generated by doc-maintenance skill
EOF
)"
```
### Step 7 — Update the cursor
After a successful audit (whether or not edits were needed), update the cursor:
```bash
git rev-parse HEAD > .doc-review-cursor
```
If edits were made, this is already committed in the PR branch. If no edits
were needed, commit the cursor update to the current branch.
## Change Classification Rules
| Signal | Category | Doc update needed? |
|--------|----------|-------------------|
| `feat:`, `add`, `implement`, `support` in message | Feature | Yes if user-facing |
| `remove`, `drop`, `breaking`, `!:` in message | Breaking | Yes |
| New top-level directory or config file | Structural | Maybe |
| `fix:`, `bugfix` | Fix | No (unless it changes behavior described in docs) |
| `refactor:`, `chore:`, `ci:`, `test:` | Maintenance | No |
| `docs:` | Doc change | No (already handled) |
| Dependency bumps only | Maintenance | No |
## Patch Style Guide
- Fix the fact, not the prose
- If removing a roadmap item, don't leave a gap — remove the bullet cleanly
- If adding a feature mention, match the format of surrounding entries
(e.g. if features are in a table, add a table row)
- Keep README changes especially minimal — it shouldn't churn often
- For SPEC/PRODUCT, prefer updating existing statements over adding new ones
(e.g. change "not supported in V1" to "supported via X" rather than adding
a new section)
## Output
When the skill completes, report:
- How many commits were scanned
- How many notable changes were found
- How many doc edits were made (and to which files)
- PR link (if edits were made)
- Any follow-up items that need larger doc work

View File

@@ -1,85 +0,0 @@
# Doc Maintenance Audit Checklist
Use this checklist when auditing each target document. For each item, compare
against the change summary from git history.
## README.md
### Features table
- [ ] Each feature card reflects a shipped capability
- [ ] No feature cards for things that don't exist yet
- [ ] No major shipped features missing from the table
### Roadmap
- [ ] Nothing listed as "planned" or "coming soon" that already shipped
- [ ] No removed/cancelled items still listed
- [ ] Items reflect current priorities (cross-check with recent PRs)
### Quickstart
- [ ] `npx paperclipai onboard` command is correct
- [ ] Manual install steps are accurate (clone URL, commands)
- [ ] Prerequisites (Node version, pnpm version) are current
- [ ] Server URL and port are correct
### "What is Paperclip" section
- [ ] High-level description is accurate
- [ ] Step table (Define goal / Hire team / Approve and run) is correct
### "Works with" table
- [ ] All supported adapters/runtimes are listed
- [ ] No removed adapters still listed
- [ ] Logos and labels match current adapter names
### "Paperclip is right for you if"
- [ ] Use cases are still accurate
- [ ] No claims about capabilities that don't exist
### "Why Paperclip is special"
- [ ] Technical claims are accurate (atomic execution, governance, etc.)
- [ ] No features listed that were removed or significantly changed
### FAQ
- [ ] Answers are still correct
- [ ] No references to removed features or outdated behavior
### Development section
- [ ] Commands are accurate (`pnpm dev`, `pnpm build`, etc.)
- [ ] Link to DEVELOPING.md is correct
## doc/SPEC.md
### Company Model
- [ ] Fields match current schema
- [ ] Governance model description is accurate
### Agent Model
- [ ] Adapter types match what's actually supported
- [ ] Agent configuration description is accurate
- [ ] No features described as "not supported" or "not V1" that shipped
### Task Model
- [ ] Task hierarchy description is accurate
- [ ] Status values match current implementation
### Extensions / Plugins
- [ ] If plugins are shipped, no "not in V1" or "future" language
- [ ] Plugin model description matches implementation
### Open Questions
- [ ] Resolved questions removed or updated
- [ ] No "TBD" items that have been decided
## doc/PRODUCT.md
### Core Concepts
- [ ] Company, Employees, Task Management descriptions accurate
- [ ] Agent Execution modes described correctly
- [ ] No missing major concepts
### Principles
- [ ] Principles haven't been contradicted by shipped features
- [ ] No principles referencing removed capabilities
### User Flow
- [ ] Dream scenario still reflects actual onboarding
- [ ] Steps are achievable with current features

View File

@@ -1,22 +0,0 @@
# Section Map
Maps feature areas to specific document sections so the skill knows where to
look when a feature ships or changes.
| Feature Area | README Section | SPEC Section | PRODUCT Section |
|-------------|---------------|-------------|----------------|
| Plugins / Extensions | Features table, Roadmap | Extensions, Agent Model | Core Concepts |
| Adapters (new runtimes) | "Works with" table, FAQ | Agent Model, Agent Configuration | Employees & Agents, Agent Execution |
| Governance / Approvals | Features table, "Why special" | Board Governance, Board Approval Gates | Principles |
| Budget / Cost Control | Features table, "Why special" | Budget Delegation | Company (revenue & expenses) |
| Task Management | Features table | Task Model | Task Management |
| Org Chart / Hierarchy | Features table | Agent Model (reporting) | Employees & Agents |
| Multi-Company | Features table, FAQ | Company Model | Company |
| Heartbeats | Features table, FAQ | Agent Execution | Agent Execution |
| CLI Commands | Development section | — | — |
| Onboarding / Quickstart | Quickstart, FAQ | — | User Flow |
| Skills / Skill Injection | "Why special" | — | — |
| Company Templates | "Why special", Roadmap (ClipMart) | — | — |
| Mobile / UI | Features table | — | — |
| Project Archiving | — | — | — |
| OpenClaw Integration | "Works with" table, FAQ | Agent Model | Agent Execution |

View File

@@ -1,7 +1,7 @@
---
name: release-changelog
description: >
Generate the stable Paperclip release changelog at releases/vYYYY.MDD.P.md by
Generate the stable Paperclip release changelog at releases/v{version}.md by
reading commits, changesets, and merged PR context since the last stable tag.
---
@@ -9,33 +9,20 @@ description: >
Generate the user-facing changelog for the **stable** Paperclip release.
## Versioning Model
Paperclip uses **calendar versioning (calver)**:
- Stable releases: `YYYY.MDD.P` (e.g. `2026.318.0`)
- Canary releases: `YYYY.MDD.P-canary.N` (e.g. `2026.318.1-canary.0`)
- Git tags: `vYYYY.MDD.P` for stable, `canary/vYYYY.MDD.P-canary.N` for canary
There are no major/minor/patch bumps. The stable version is derived from the
intended release date (UTC) plus the next same-day stable patch slot.
Output:
- `releases/vYYYY.MDD.P.md`
- `releases/v{version}.md`
Important rules:
Important rule:
- even if there are canary releases such as `2026.318.1-canary.0`, the changelog file stays `releases/v2026.318.1.md`
- do not derive versions from semver bump types
- do not create canary changelog files
- even if there are canary releases such as `1.2.3-canary.0`, the changelog file stays `releases/v1.2.3.md`
## Step 0 — Idempotency Check
Before generating anything, check whether the file already exists:
```bash
ls releases/vYYYY.MDD.P.md 2>/dev/null
ls releases/v{version}.md 2>/dev/null
```
If it exists:
@@ -54,14 +41,13 @@ git tag --list 'v*' --sort=-version:refname | head -1
git log v{last}..HEAD --oneline --no-merges
```
The stable version comes from one of:
The planned stable version comes from one of:
- an explicit maintainer request
- `./scripts/release.sh stable --date YYYY-MM-DD --print-version`
- the chosen bump type applied to the last stable tag
- the release plan already agreed in `doc/RELEASING.md`
Do not derive the changelog version from a canary tag or prerelease suffix.
Do not derive major/minor/patch bumps from API intent — calver uses the date and same-day stable slot.
## Step 2 — Gather the Raw Inputs
@@ -87,6 +73,7 @@ Look for:
- destructive migrations
- removed or changed API fields/endpoints
- renamed or removed config keys
- `major` changesets
- `BREAKING:` or `BREAKING CHANGE:` commit signals
Key commands:
@@ -98,8 +85,7 @@ git diff v{last}..HEAD -- server/src/routes/ server/src/api/
git log v{last}..HEAD --format="%s" | rg -n 'BREAKING CHANGE|BREAKING:|^[a-z]+!:' || true
```
If breaking changes are detected, flag them prominently — they must appear in the
Breaking Changes section with an upgrade path.
If the requested bump is lower than the minimum required bump, flag that before the release proceeds.
## Step 4 — Categorize for Users
@@ -144,9 +130,9 @@ Rules:
Template:
```markdown
# vYYYY.MDD.P
# v{version}
> Released: YYYY-MM-DD
> Released: {YYYY-MM-DD}
## Breaking Changes

View File

@@ -2,21 +2,23 @@
name: release
description: >
Coordinate a full Paperclip release across engineering verification, npm,
GitHub, smoke testing, and announcement follow-up. Use when leadership asks
to ship a release, not merely to discuss versioning.
GitHub, website publishing, and announcement follow-up. Use when leadership
asks to ship a release, not merely to discuss version bumps.
---
# Release Coordination Skill
Run the full Paperclip maintainer release workflow, not just an npm publish.
Run the full Paperclip release as a maintainer workflow, not just an npm publish.
This skill coordinates:
- stable changelog drafting via `release-changelog`
- canary verification and publish status from `master`
- release-train setup via `scripts/release-start.sh`
- prerelease canary publishing via `scripts/release.sh --canary`
- Docker smoke testing via `scripts/docker-onboard-smoke.sh`
- manual stable promotion from a chosen source ref
- GitHub Release creation
- stable publishing via `scripts/release.sh`
- pushing the stable branch commit and tag
- GitHub Release creation via `scripts/create-github-release.sh`
- website / announcement follow-up tasks
## Trigger
@@ -24,9 +26,8 @@ This skill coordinates:
Use this skill when leadership asks for:
- "do a release"
- "ship the release"
- "promote this canary to stable"
- "cut the stable release"
- "ship the next patch/minor/major"
- "release vX.Y.Z"
## Preconditions
@@ -34,10 +35,10 @@ Before proceeding, verify all of the following:
1. `.agents/skills/release-changelog/SKILL.md` exists and is usable.
2. The repo working tree is clean, including untracked files.
3. There is at least one canary or candidate commit since the last stable tag.
4. The candidate SHA has passed the verification gate or is about to.
5. If manifests changed, the CI-owned `pnpm-lock.yaml` refresh is already merged on `master`.
6. npm publish rights are available through GitHub trusted publishing, or through local npm auth for emergency/manual use.
3. There are commits since the last stable tag.
4. The release SHA has passed the verification gate or is about to.
5. If package manifests changed, the CI-owned `pnpm-lock.yaml` refresh is already merged on `master` before the release branch is cut.
6. npm publish rights are available locally, or the GitHub release workflow is being used with trusted publishing.
7. If running through Paperclip, you have issue context for status updates and follow-up task creation.
If any precondition fails, stop and report the blocker.
@@ -46,67 +47,78 @@ If any precondition fails, stop and report the blocker.
Collect these inputs up front:
- whether the target is a canary check or a stable promotion
- the candidate `source_ref` for stable
- whether the stable run is dry-run or live
- requested bump: `patch`, `minor`, or `major`
- whether this run is a dry run or live release
- whether the release is being run locally or from GitHub Actions
- release issue / company context for website and announcement follow-up
## Step 0 — Release Model
Paperclip now uses a commit-driven release model:
Paperclip now uses this release model:
1. every push to `master` publishes a canary automatically
2. canaries use `YYYY.MDD.P-canary.N`
3. stable releases use `YYYY.MDD.P`
4. the middle slot is `MDD`, where `M` is the UTC month and `DD` is the zero-padded UTC day
5. the stable patch slot increments when more than one stable ships on the same UTC date
6. stable releases are manually promoted from a chosen tested commit or canary source commit
7. only stable releases get `releases/vYYYY.MDD.P.md`, git tag `vYYYY.MDD.P`, and a GitHub Release
1. Start or resume `release/X.Y.Z`
2. Draft the **stable** changelog as `releases/vX.Y.Z.md`
3. Publish one or more **prerelease canaries** such as `X.Y.Z-canary.0`
4. Smoke test the canary via Docker
5. Publish the stable version `X.Y.Z`
6. Push the stable branch commit and tag
7. Create the GitHub Release
8. Merge `release/X.Y.Z` back to `master` without squash or rebase
9. Complete website and announcement surfaces
Critical consequences:
Critical consequence:
- do not use release branches as the default path
- do not derive major/minor/patch bumps
- do not create canary changelog files
- do not create canary GitHub Releases
- Canaries do **not** use promote-by-dist-tag anymore.
- The changelog remains stable-only. Do not create `releases/vX.Y.Z-canary.N.md`.
## Step 1 — Choose the Candidate
## Step 1 — Decide the Stable Version
For canary validation:
- inspect the latest successful canary run on `master`
- record the canary version and source SHA
For stable promotion:
1. choose the tested source ref
2. confirm it is the exact SHA you want to promote
3. resolve the target stable version with `./scripts/release.sh stable --date YYYY-MM-DD --print-version`
Useful commands:
Start the release train first:
```bash
git tag --list 'v*' --sort=-version:refname | head -1
git log --oneline --no-merges
npm view paperclipai@canary version
./scripts/release-start.sh {patch|minor|major}
```
Then run release preflight:
```bash
./scripts/release-preflight.sh canary {patch|minor|major}
# or
./scripts/release-preflight.sh stable {patch|minor|major}
```
Then use the last stable tag as the base:
```bash
LAST_TAG=$(git tag --list 'v*' --sort=-version:refname | head -1)
git log "${LAST_TAG}..HEAD" --oneline --no-merges
git diff --name-only "${LAST_TAG}..HEAD" -- packages/db/src/migrations/
git diff "${LAST_TAG}..HEAD" -- packages/db/src/schema/
git log "${LAST_TAG}..HEAD" --format="%s" | rg -n 'BREAKING CHANGE|BREAKING:|^[a-z]+!:' || true
```
Bump policy:
- destructive migrations, removed APIs, breaking config changes -> `major`
- additive migrations or clearly user-visible features -> at least `minor`
- fixes only -> `patch`
If the requested bump is too low, escalate it and explain why.
## Step 2 — Draft the Stable Changelog
Stable changelog files live at:
Invoke `release-changelog` and generate:
- `releases/vYYYY.MDD.P.md`
Invoke `release-changelog` and generate or update the stable notes only.
- `releases/vX.Y.Z.md`
Rules:
- review the draft with a human before publish
- preserve manual edits if the file already exists
- keep the filename stable-only
- do not create a canary changelog file
- keep the heading and filename stable-only, for example `v1.2.3`
- do not create a separate canary changelog file
## Step 3 — Verify the Candidate SHA
## Step 3 — Verify the Release SHA
Run the standard gate:
@@ -116,27 +128,41 @@ pnpm test:run
pnpm build
```
If the GitHub release workflow will run the publish, it can rerun this gate. Still report local status if you checked it.
If the release will be run through GitHub Actions, the workflow can rerun this gate. Still report whether the local tree currently passes.
For PRs that touch release logic, the repo also runs a canary release dry-run in CI. That is a release-specific guard, not a substitute for the standard gate.
The GitHub Actions release workflow installs with `pnpm install --frozen-lockfile`. Treat that as a release invariant, not a nuisance: if manifests changed and the lockfile refresh PR has not landed yet, stop and wait for `master` to contain the committed lockfile before shipping.
## Step 4 — Validate the Canary
## Step 4 — Publish a Canary
The normal canary path is automatic from `master` via:
Run from the `release/X.Y.Z` branch:
- `.github/workflows/release.yml`
```bash
./scripts/release.sh {patch|minor|major} --canary --dry-run
./scripts/release.sh {patch|minor|major} --canary
```
Confirm:
What this means:
1. verification passed
2. npm canary publish succeeded
3. git tag `canary/vYYYY.MDD.P-canary.N` exists
- npm receives `X.Y.Z-canary.N` under dist-tag `canary`
- `latest` remains unchanged
- no git tag is created
- the script cleans the working tree afterward
Useful checks:
Guard:
- if the current stable is `0.2.7`, the next patch canary is `0.2.8-canary.0`
- the tooling must never publish `0.2.7-canary.N` after `0.2.7` is already stable
After publish, verify:
```bash
npm view paperclipai@canary version
git tag --list 'canary/v*' --sort=-version:refname | head -5
```
The user install path is:
```bash
npx paperclipai@canary onboard
```
## Step 5 — Smoke Test the Canary
@@ -147,70 +173,60 @@ Run:
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
```
Useful isolated variant:
```bash
HOST_PORT=3232 DATA_DIR=./data/release-smoke-canary PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
```
Confirm:
1. install succeeds
2. onboarding completes without crashes
3. the server boots
4. the UI loads
5. basic company creation and dashboard load work
2. onboarding completes
3. server boots
4. UI loads
5. basic company/dashboard flow works
If smoke testing fails:
- stop the stable release
- fix the issue on `master`
- wait for the next automatic canary
- rerun smoke testing
- fix the issue
- publish another canary
- repeat the smoke test
## Step 6 — Preview or Publish Stable
Each retry should create a higher canary ordinal, while the stable target version can stay the same.
The normal stable path is manual `workflow_dispatch` on:
## Step 6 — Publish Stable
- `.github/workflows/release.yml`
Inputs:
- `source_ref`
- `stable_date`
- `dry_run`
Before live stable:
1. resolve the target stable version with `./scripts/release.sh stable --date YYYY-MM-DD --print-version`
2. ensure `releases/vYYYY.MDD.P.md` exists on the source ref
3. run the stable workflow in dry-run mode first when practical
4. then run the real stable publish
The stable workflow:
- re-verifies the exact source ref
- computes the next stable patch slot for the chosen UTC date
- publishes `YYYY.MDD.P` under dist-tag `latest`
- creates git tag `vYYYY.MDD.P`
- creates or updates the GitHub Release from `releases/vYYYY.MDD.P.md`
Local emergency/manual commands:
Once the SHA is vetted, run:
```bash
./scripts/release.sh stable --dry-run
./scripts/release.sh stable
git push public-gh refs/tags/vYYYY.MDD.P
./scripts/create-github-release.sh YYYY.MDD.P
./scripts/release.sh {patch|minor|major} --dry-run
./scripts/release.sh {patch|minor|major}
```
## Step 7 — Finish the Other Surfaces
Stable publish does this:
- publishes `X.Y.Z` to npm under `latest`
- creates the local release commit
- creates the local git tag `vX.Y.Z`
Stable publish does **not** push the release for you.
## Step 7 — Push and Create GitHub Release
After stable publish succeeds:
```bash
git push public-gh HEAD --follow-tags
./scripts/create-github-release.sh X.Y.Z
```
Use the stable changelog file as the GitHub Release notes source.
Then open the PR from `release/X.Y.Z` back to `master` and merge without squash or rebase.
## Step 8 — Finish the Other Surfaces
Create or verify follow-up work for:
- website changelog publishing
- launch post / social announcement
- release summary in Paperclip issue context
- any release summary in Paperclip issue context
These should reference the stable release, not the canary.
@@ -220,9 +236,9 @@ If the canary is bad:
- publish another canary, do not ship stable
If stable npm publish succeeds but tag push or GitHub release creation fails:
If stable npm publish succeeds but push or GitHub release creation fails:
- fix the git/GitHub issue immediately from the same release result
- fix the git/GitHub issue immediately from the same checkout
- do not republish the same version
If `latest` is bad after stable publish:
@@ -231,17 +247,15 @@ If `latest` is bad after stable publish:
./scripts/rollback-latest.sh <last-good-version>
```
Then fix forward with a new stable release.
Then fix forward with a new patch release.
## Output
When the skill completes, provide:
- candidate SHA and tested canary version, if relevant
- stable version, if promoted
- stable version and, if relevant, the final canary version tested
- verification status
- npm status
- smoke-test status
- git tag / GitHub Release status
- website / announcement follow-up status
- rollback recommendation if anything is still partially complete

8
.changeset/README.md Normal file
View File

@@ -0,0 +1,8 @@
# Changesets
Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
with multi-package repos, or single-package repos to help you version and publish your code. You can
find the full documentation for it [in our repository](https://github.com/changesets/changesets).
We have a quick list of common questions to get you started engaging with this project in
[our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md).

11
.changeset/config.json Normal file
View File

@@ -0,0 +1,11 @@
{
"$schema": "https://unpkg.com/@changesets/config@3.1.3/schema.json",
"changelog": "@changesets/cli/changelog",
"commit": false,
"fixed": [["@paperclipai/*", "paperclipai"]],
"linked": [],
"access": "public",
"baseBranch": "master",
"updateInternalDependencies": "patch",
"ignore": ["@paperclipai/ui"]
}

10
.github/CODEOWNERS vendored
View File

@@ -1,10 +0,0 @@
# Replace @cryppadotta if a different maintainer or team should own release infrastructure.
.github/** @cryppadotta @devinfoley
scripts/release*.sh @cryppadotta @devinfoley
scripts/release-*.mjs @cryppadotta @devinfoley
scripts/create-github-release.sh @cryppadotta @devinfoley
scripts/rollback-latest.sh @cryppadotta @devinfoley
doc/RELEASING.md @cryppadotta @devinfoley
doc/PUBLISHING.md @cryppadotta @devinfoley
doc/RELEASE-AUTOMATION-SETUP.md @cryppadotta @devinfoley

View File

@@ -26,7 +26,7 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 24
node-version: 20
cache: pnpm
- name: Install dependencies
@@ -40,9 +40,3 @@ jobs:
- name: Build
run: pnpm build
- name: Release canary dry run
run: |
git checkout -B master HEAD
git checkout -- pnpm-lock.yaml
./scripts/release.sh canary --skip-verify --dry-run

View File

@@ -79,15 +79,3 @@ jobs:
else
echo "PR #$existing already exists, branch updated via force push."
fi
- name: Enable auto-merge for lockfile PR
env:
GH_TOKEN: ${{ github.token }}
run: |
pr_url="$(gh pr list --head chore/refresh-lockfile --json url --jq '.[0].url')"
if [ -z "$pr_url" ]; then
echo "Error: lockfile PR was not found." >&2
exit 1
fi
gh pr merge --auto --squash --delete-branch "$pr_url"

View File

@@ -1,118 +0,0 @@
name: Release Smoke
on:
workflow_dispatch:
inputs:
paperclip_version:
description: Published Paperclip dist-tag to test
required: true
default: canary
type: choice
options:
- canary
- latest
host_port:
description: Host port for the Docker smoke container
required: false
default: "3232"
type: string
artifact_name:
description: Artifact name for uploaded diagnostics
required: false
default: release-smoke
type: string
workflow_call:
inputs:
paperclip_version:
required: true
type: string
host_port:
required: false
default: "3232"
type: string
artifact_name:
required: false
default: release-smoke
type: string
jobs:
smoke:
runs-on: ubuntu-latest
timeout-minutes: 45
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9.15.4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 24
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
- name: Install Playwright browser
run: npx playwright install --with-deps chromium
- name: Launch Docker smoke harness
run: |
metadata_file="$RUNNER_TEMP/release-smoke.env"
HOST_PORT="${{ inputs.host_port }}" \
DATA_DIR="$RUNNER_TEMP/release-smoke-data" \
PAPERCLIPAI_VERSION="${{ inputs.paperclip_version }}" \
SMOKE_DETACH=true \
SMOKE_METADATA_FILE="$metadata_file" \
./scripts/docker-onboard-smoke.sh
set -a
source "$metadata_file"
set +a
{
echo "SMOKE_BASE_URL=$SMOKE_BASE_URL"
echo "SMOKE_ADMIN_EMAIL=$SMOKE_ADMIN_EMAIL"
echo "SMOKE_ADMIN_PASSWORD=$SMOKE_ADMIN_PASSWORD"
echo "SMOKE_CONTAINER_NAME=$SMOKE_CONTAINER_NAME"
echo "SMOKE_DATA_DIR=$SMOKE_DATA_DIR"
echo "SMOKE_IMAGE_NAME=$SMOKE_IMAGE_NAME"
echo "SMOKE_PAPERCLIPAI_VERSION=$SMOKE_PAPERCLIPAI_VERSION"
echo "SMOKE_METADATA_FILE=$metadata_file"
} >> "$GITHUB_ENV"
- name: Run release smoke Playwright suite
env:
PAPERCLIP_RELEASE_SMOKE_BASE_URL: ${{ env.SMOKE_BASE_URL }}
PAPERCLIP_RELEASE_SMOKE_EMAIL: ${{ env.SMOKE_ADMIN_EMAIL }}
PAPERCLIP_RELEASE_SMOKE_PASSWORD: ${{ env.SMOKE_ADMIN_PASSWORD }}
run: pnpm run test:release-smoke
- name: Capture Docker logs
if: always()
run: |
if [[ -n "${SMOKE_CONTAINER_NAME:-}" ]]; then
docker logs "$SMOKE_CONTAINER_NAME" >"$RUNNER_TEMP/docker-onboard-smoke.log" 2>&1 || true
fi
- name: Upload diagnostics
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.artifact_name }}
path: |
${{ runner.temp }}/docker-onboard-smoke.log
${{ env.SMOKE_METADATA_FILE }}
tests/release-smoke/playwright-report/
tests/release-smoke/test-results/
retention-days: 14
- name: Stop Docker smoke container
if: always()
run: |
if [[ -n "${SMOKE_CONTAINER_NAME:-}" ]]; then
docker rm -f "$SMOKE_CONTAINER_NAME" >/dev/null 2>&1 || true
fi

View File

@@ -1,33 +1,38 @@
name: Release
on:
push:
branches:
- master
workflow_dispatch:
inputs:
source_ref:
description: Commit SHA, branch, or tag to publish as stable
channel:
description: Release channel
required: true
type: string
default: master
stable_date:
description: Enter a UTC date in YYYY-MM-DD format, for example 2026-03-18. Do not enter a version string. The workflow will resolve that date to a stable version such as 2026.318.0, then 2026.318.1 for the next same-day stable.
required: false
type: string
type: choice
default: canary
options:
- canary
- stable
bump:
description: Semantic version bump
required: true
type: choice
default: patch
options:
- patch
- minor
- major
dry_run:
description: Preview the stable release without publishing
description: Preview the release without publishing
required: true
type: boolean
default: false
default: true
concurrency:
group: release-${{ github.event_name }}-${{ github.ref }}
group: release-${{ github.ref }}
cancel-in-progress: false
jobs:
verify_canary:
if: github.event_name == 'push'
verify:
if: startsWith(github.ref, 'refs/heads/release/')
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
@@ -51,7 +56,7 @@ jobs:
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
run: pnpm install --frozen-lockfile
- name: Typecheck
run: pnpm -r typecheck
@@ -62,12 +67,12 @@ jobs:
- name: Build
run: pnpm build
publish_canary:
if: github.event_name == 'push'
needs: verify_canary
publish:
if: startsWith(github.ref, 'refs/heads/release/')
needs: verify
runs-on: ubuntu-latest
timeout-minutes: 45
environment: npm-canary
environment: npm-release
permissions:
contents: write
id-token: write
@@ -90,168 +95,34 @@ jobs:
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
- name: Restore tracked install-time changes
run: git checkout -- pnpm-lock.yaml
run: pnpm install --frozen-lockfile
- name: Configure git author
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
- name: Publish canary
- name: Run release script
env:
GITHUB_ACTIONS: "true"
run: ./scripts/release.sh canary --skip-verify
- name: Push canary tag
run: |
tag="$(git tag --points-at HEAD | grep '^canary/v' | head -1)"
if [ -z "$tag" ]; then
echo "Error: no canary tag points at HEAD after release." >&2
exit 1
args=("${{ inputs.bump }}")
if [ "${{ inputs.channel }}" = "canary" ]; then
args+=("--canary")
fi
git push origin "refs/tags/${tag}"
verify_stable:
if: github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ inputs.source_ref }}
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9.15.4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 24
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
- name: Typecheck
run: pnpm -r typecheck
- name: Run tests
run: pnpm test:run
- name: Build
run: pnpm build
preview_stable:
if: github.event_name == 'workflow_dispatch' && inputs.dry_run
needs: verify_stable
runs-on: ubuntu-latest
timeout-minutes: 45
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ inputs.source_ref }}
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9.15.4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 24
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
- name: Dry-run stable release
env:
GITHUB_ACTIONS: "true"
run: |
args=(stable --skip-verify --dry-run)
if [ -n "${{ inputs.stable_date }}" ]; then
args+=(--date "${{ inputs.stable_date }}")
if [ "${{ inputs.dry_run }}" = "true" ]; then
args+=("--dry-run")
fi
./scripts/release.sh "${args[@]}"
publish_stable:
if: github.event_name == 'workflow_dispatch' && !inputs.dry_run
needs: verify_stable
runs-on: ubuntu-latest
timeout-minutes: 45
environment: npm-stable
permissions:
contents: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ inputs.source_ref }}
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9.15.4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 24
cache: pnpm
- name: Install dependencies
run: pnpm install --no-frozen-lockfile
- name: Restore tracked install-time changes
run: git checkout -- pnpm-lock.yaml
- name: Configure git author
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
- name: Publish stable
env:
GITHUB_ACTIONS: "true"
run: |
args=(stable --skip-verify)
if [ -n "${{ inputs.stable_date }}" ]; then
args+=(--date "${{ inputs.stable_date }}")
fi
./scripts/release.sh "${args[@]}"
- name: Push stable tag
run: |
tag="$(git tag --points-at HEAD | grep '^v' | head -1)"
if [ -z "$tag" ]; then
echo "Error: no stable tag points at HEAD after release." >&2
exit 1
fi
git push origin "refs/tags/${tag}"
- name: Push stable release branch commit and tag
if: inputs.channel == 'stable' && !inputs.dry_run
run: git push origin "HEAD:${GITHUB_REF_NAME}" --follow-tags
- name: Create GitHub Release
if: inputs.channel == 'stable' && !inputs.dry_run
env:
GH_TOKEN: ${{ github.token }}
PUBLISH_REMOTE: origin
run: |
version="$(git tag --points-at HEAD | grep '^v' | head -1 | sed 's/^v//')"
if [ -z "$version" ]; then

11
.gitignore vendored
View File

@@ -37,16 +37,7 @@ tmp/
.vscode/
.claude/settings.local.json
.paperclip-local/
/.idea/
/.agents/
# Doc maintenance cursor
.doc-review-cursor
# Playwright
tests/e2e/test-results/
tests/e2e/playwright-report/
tests/release-smoke/test-results/
tests/release-smoke/playwright-report/
.superset/
.claude/worktrees/
tests/e2e/playwright-report/

View File

@@ -7,7 +7,6 @@ We really appreciate both small fixes and thoughtful larger changes.
## Two Paths to Get Your Pull Request Accepted
### Path 1: Small, Focused Changes (Fastest way to get merged)
- Pick **one** clear thing to fix/improve
- Touch the **smallest possible number of files**
- Make sure the change is very targeted and easy to review
@@ -17,7 +16,6 @@ We really appreciate both small fixes and thoughtful larger changes.
These almost always get merged quickly when they're clean.
### Path 2: Bigger or Impactful Changes
- **First** talk about it in Discord → #dev channel
→ Describe what you're trying to solve
→ Share rough ideas / approach
@@ -32,43 +30,12 @@ These almost always get merged quickly when they're clean.
PRs that follow this path are **much** more likely to be accepted, even when they're large.
## General Rules (both paths)
- Write clear commit messages
- Keep PR title + description meaningful
- One PR = one logical change (unless it's a small related group)
- Run tests locally first
- Be kind in discussions 😄
## Writing a Good PR message
Please include a "thinking path" at the top of your PR message that explains from the top of the project down to what you fixed. E.g.:
### Thinking Path Example 1:
> - Paperclip orchestrates ai-agents for zero-human companies
> - There are many types of adapters for each LLM model provider
> - But LLM's have a context limit and not all agents can automatically compact their context
> - So we need to have an adapter-specific configuration for which adapters can and cannot automatically compact their context
> - This pull request adds per-adapter configuration of compaction, either auto or paperclip managed
> - That way we can get optimal performance from any adapter/provider in Paperclip
### Thinking Path Example 2:
> - Paperclip orchestrates ai-agents for zero-human companies
> - But humans want to watch the agents and oversee their work
> - Human users also operate in teams and so they need their own logins, profiles, views etc.
> - So we have a multi-user system for humans
> - But humans want to be able to update their own profile picture and avatar
> - But the avatar upload form wasn't saving the avatar to the file storage system
> - So this PR fixes the avatar upload form to use the file storage service
> - The benefit is we don't have a one-off file storage for just one aspect of the system, which would cause confusion and extra configuration
Then have the rest of your normal PR message after the Thinking Path.
This should include details about what you did, why you did it, why it matters & the benefits, how we can verify it works, and any risks.
Please include screenshots if possible if you have a visible change. (use something like the [agent-browser skill](https://github.com/vercel-labs/agent-browser/blob/main/skills/agent-browser/SKILL.md) or similar to take screenshots). Ideally, you include before and after screenshots.
Questions? Just ask in #dev — we're happy to help.
Happy hacking!

View File

@@ -239,7 +239,7 @@ See [doc/DEVELOPING.md](doc/DEVELOPING.md) for the full development guide.
- ⚪ ClipMart - buy and sell entire agent companies
- ⚪ Easy agent configurations / easier to understand
- ⚪ Better support for harness engineering
- 🟢 Plugin system (e.g. if you want to add a knowledgebase, custom tracing, queues, etc)
- Plugin system (e.g. if you want to add a knowledgebase, custom tracing, queues, etc)
- ⚪ Better docs
<br/>

View File

@@ -16,13 +16,10 @@
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"url": "https://github.com/paperclipai/paperclip.git",
"directory": "cli"
},
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"files": [
"dist"
],

View File

@@ -8,16 +8,12 @@ function makeCompany(overrides: Partial<Company>): Company {
name: "Alpha",
description: null,
status: "active",
pauseReason: null,
pausedAt: null,
issuePrefix: "ALP",
issueCounter: 1,
budgetMonthlyCents: 0,
spentMonthlyCents: 0,
requireBoardApprovalForNewAgents: false,
brandColor: null,
logoAssetId: null,
logoUrl: null,
createdAt: new Date(),
updatedAt: new Date(),
...overrides,

View File

@@ -89,10 +89,6 @@ docker compose -f docker-compose.quickstart.yml up --build
See `doc/DOCKER.md` for API key wiring (`OPENAI_API_KEY` / `ANTHROPIC_API_KEY`) and persistence details.
## Docker For Untrusted PR Review
For a separate review-oriented container that keeps `codex`/`claude` login state in Docker volumes and checks out PRs into an isolated scratch workspace, see `doc/UNTRUSTED-PR-REVIEW.md`.
## Database in Dev (Auto-Handled)
For local development, leave `DATABASE_URL` unset.

View File

@@ -93,12 +93,6 @@ Notes:
- Without API keys, the app still runs normally.
- Adapter environment checks in Paperclip will surface missing auth/CLI prerequisites.
## Untrusted PR Review Container
If you want a separate Docker environment for reviewing untrusted pull requests with `codex` or `claude`, use the dedicated review workflow in `doc/UNTRUSTED-PR-REVIEW.md`.
That setup keeps CLI auth state in Docker volumes instead of your host home directory and uses a separate scratch workspace for PR checkouts and preview runs.
## Onboard Smoke Test (Ubuntu + npm only)
Use this when you want to mimic a fresh machine that only has Ubuntu + npm and verify:
@@ -120,7 +114,6 @@ Useful overrides:
```sh
HOST_PORT=3200 PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
PAPERCLIP_DEPLOYMENT_MODE=authenticated PAPERCLIP_DEPLOYMENT_EXPOSURE=private ./scripts/docker-onboard-smoke.sh
SMOKE_DETACH=true SMOKE_METADATA_FILE=/tmp/paperclip-smoke.env PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
```
Notes:
@@ -132,5 +125,4 @@ Notes:
- Smoke script also defaults `PAPERCLIP_PUBLIC_URL` to `http://localhost:<HOST_PORT>` so bootstrap invite URLs and auth callbacks use the reachable host port instead of the container's internal `3100`.
- In authenticated mode, the smoke script defaults `SMOKE_AUTO_BOOTSTRAP=true` and drives the real bootstrap path automatically: it signs up a real user, runs `paperclipai auth bootstrap-ceo` inside the container to mint a real bootstrap invite, accepts that invite over HTTP, and verifies board session access.
- Run the script in the foreground to watch the onboarding flow; stop with `Ctrl+C` after validation.
- Set `SMOKE_DETACH=true` to leave the container running for automation and optionally write shell-ready metadata to `SMOKE_METADATA_FILE`.
- The image definition is in `Dockerfile.onboard-smoke`.

View File

@@ -1,19 +1,18 @@
# Publishing to npm
Low-level reference for how Paperclip packages are prepared and published to npm.
Low-level reference for how Paperclip packages are built for npm.
For the maintainer workflow, use [doc/RELEASING.md](RELEASING.md). This document focuses on packaging internals.
For the maintainer release workflow, use [doc/RELEASING.md](RELEASING.md). This document is only about packaging internals and the scripts that produce publishable artifacts.
## Current Release Entry Points
Use these scripts:
Use these scripts instead of older one-off publish commands:
- [`scripts/release.sh`](../scripts/release.sh) for canary and stable publish flows
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh) after pushing a stable tag
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh) to repoint `latest`
- [`scripts/build-npm.sh`](../scripts/build-npm.sh) for the CLI packaging build
Paperclip no longer uses release branches or Changesets for publishing.
- [`scripts/release-start.sh`](../scripts/release-start.sh) to create or resume `release/X.Y.Z`
- [`scripts/release-preflight.sh`](../scripts/release-preflight.sh) before any canary or stable release
- [`scripts/release.sh`](../scripts/release.sh) for canary and stable npm publishes
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh) to repoint `latest` during rollback
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh) after pushing the stable branch tag
## Why the CLI needs special packaging
@@ -24,7 +23,7 @@ The CLI package, `paperclipai`, imports code from workspace packages such as:
- `@paperclipai/shared`
- adapter packages under `packages/adapters/`
Those workspace references are valid in development but not in a publishable npm package. The release flow rewrites versions temporarily, then builds a publishable CLI bundle.
Those workspace references use `workspace:*` during development. npm cannot install those references directly for end users, so the release build has to transform the CLI into a publishable standalone package.
## `build-npm.sh`
@@ -34,107 +33,89 @@ Run:
./scripts/build-npm.sh
```
This script:
This script does six things:
1. runs the forbidden token check unless `--skip-checks` is supplied
2. runs `pnpm -r typecheck`
3. bundles the CLI entrypoint with esbuild into `cli/dist/index.js`
4. verifies the bundled entrypoint with `node --check`
5. rewrites `cli/package.json` into a publishable npm manifest and stores the dev copy as `cli/package.dev.json`
6. copies the repo `README.md` into `cli/README.md` for npm metadata
1. Runs the forbidden token check unless `--skip-checks` is supplied
2. Runs `pnpm -r typecheck`
3. Bundles the CLI entrypoint with esbuild into `cli/dist/index.js`
4. Verifies the bundled entrypoint with `node --check`
5. Rewrites `cli/package.json` into a publishable npm manifest and stores the dev copy as `cli/package.dev.json`
6. Copies the repo `README.md` into `cli/README.md` for npm package metadata
After the release script exits, the dev manifest and temporary files are restored automatically.
`build-npm.sh` is used by the release script so that npm users install a real package rather than unresolved workspace dependencies.
## Package discovery and versioning
## Publishable CLI layout
Public packages are discovered from:
During development, [`cli/package.json`](../cli/package.json) contains workspace references.
During release preparation:
- `cli/package.json` becomes a publishable manifest with external npm dependency ranges
- `cli/package.dev.json` stores the development manifest temporarily
- `cli/dist/index.js` contains the bundled CLI entrypoint
- `cli/README.md` is copied in for npm metadata
After release finalization, the release script restores the development manifest and removes the temporary README copy.
## Package discovery
The release tooling scans the workspace for public packages under:
- `packages/`
- `server/`
- `cli/`
`ui/` is ignored because it is private.
`ui/` remains ignored for npm publishing because it is private.
The version rewrite step now uses [`scripts/release-package-map.mjs`](../scripts/release-package-map.mjs), which:
This matters because all public packages are versioned and published together as one release unit.
- finds all public packages
- sorts them topologically by internal dependencies
- rewrites each package version to the target release version
- rewrites internal `workspace:*` dependency references to the exact target version
- updates the CLI's displayed version string
## Canary packaging model
Those rewrites are temporary. The working tree is restored after publish or dry-run.
Canaries are published as semver prereleases such as:
## Version formats
- `1.2.3-canary.0`
- `1.2.3-canary.1`
Paperclip uses calendar versions:
They are published under the npm dist-tag `canary`.
- stable: `YYYY.MDD.P`
- canary: `YYYY.MDD.P-canary.N`
This means:
Examples:
- `npx paperclipai@canary onboard` can install them explicitly
- `npx paperclipai onboard` continues to resolve `latest`
- the stable changelog can stay at `releases/v1.2.3.md`
- stable: `2026.318.0`
- canary: `2026.318.1-canary.2`
## Stable packaging model
## Publish model
Stable releases publish normal semver versions such as `1.2.3` under the npm dist-tag `latest`.
### Canary
Canaries publish under the npm dist-tag `canary`.
Example:
- `paperclipai@2026.318.1-canary.2`
This keeps the default install path unchanged while allowing explicit installs with:
```bash
npx paperclipai@canary onboard
```
### Stable
Stable publishes use the npm dist-tag `latest`.
Example:
- `paperclipai@2026.318.0`
Stable publishes do not create a release commit. Instead:
- package versions are rewritten temporarily
- packages are published from the chosen source commit
- git tag `vYYYY.MDD.P` points at that original commit
## Trusted publishing
The intended CI model is npm trusted publishing through GitHub OIDC.
That means:
- no long-lived `NPM_TOKEN` in repository secrets
- GitHub Actions obtains short-lived publish credentials
- trusted publisher rules are configured per workflow file
See [doc/RELEASE-AUTOMATION-SETUP.md](RELEASE-AUTOMATION-SETUP.md) for the GitHub/npm setup steps.
The stable publish flow also creates the local release commit and git tag on `release/X.Y.Z`. Pushing that branch commit/tag, creating the GitHub Release, and merging the release branch back to `master` happen afterward as separate maintainer steps.
## Rollback model
Rollback does not unpublish anything.
Rollback does not unpublish packages.
It repoints the `latest` dist-tag to a prior stable version:
Instead, the maintainer should move the `latest` dist-tag back to the previous good stable version with:
```bash
./scripts/rollback-latest.sh 2026.318.0
./scripts/rollback-latest.sh <stable-version>
```
This is the fastest way to restore the default install path if a stable release is bad.
That keeps history intact while restoring the default install path quickly.
## Notes for CI
The repo includes a manual GitHub Actions release workflow at [`.github/workflows/release.yml`](../.github/workflows/release.yml).
Recommended CI release setup:
- use npm trusted publishing via GitHub OIDC
- require approval through the `npm-release` environment
- run releases from `release/X.Y.Z`
- use canary first, then stable
## Related Files
- [`scripts/build-npm.sh`](../scripts/build-npm.sh)
- [`scripts/generate-npm-package-json.mjs`](../scripts/generate-npm-package-json.mjs)
- [`scripts/release-package-map.mjs`](../scripts/release-package-map.mjs)
- [`cli/esbuild.config.mjs`](../cli/esbuild.config.mjs)
- [`doc/RELEASING.md`](RELEASING.md)

View File

@@ -1,281 +0,0 @@
# Release Automation Setup
This document covers the GitHub and npm setup required for the current Paperclip release model:
- automatic canaries from `master`
- manual stable promotion from a chosen source ref
- npm trusted publishing via GitHub OIDC
- protected release infrastructure in a public repository
Repo-side files that depend on this setup:
- `.github/workflows/release.yml`
- `.github/CODEOWNERS`
Note:
- the release workflows intentionally use `pnpm install --no-frozen-lockfile`
- this matches the repo's current policy where `pnpm-lock.yaml` is refreshed by GitHub automation after manifest changes land on `master`
- the publish jobs then restore `pnpm-lock.yaml` before running `scripts/release.sh`, so the release script still sees a clean worktree
## 1. Merge the Repo Changes First
Before touching GitHub or npm settings, merge the release automation code so the referenced workflow filenames already exist on the default branch.
Required files:
- `.github/workflows/release.yml`
- `.github/CODEOWNERS`
## 2. Configure npm Trusted Publishing
Do this for every public package that Paperclip publishes.
At minimum that includes:
- `paperclipai`
- `@paperclipai/server`
- public packages under `packages/`
### 2.1. In npm, open each package settings page
For each package:
1. open npm as an owner of the package
2. go to the package settings / publishing access area
3. add a trusted publisher for the GitHub repository `paperclipai/paperclip`
### 2.2. Add one trusted publisher entry per package
npm currently allows one trusted publisher configuration per package.
Configure:
- workflow: `.github/workflows/release.yml`
Repository:
- `paperclipai/paperclip`
Environment name:
- leave the npm trusted-publisher environment field blank
Why:
- the single `release.yml` workflow handles both canary and stable publishing
- GitHub environments `npm-canary` and `npm-stable` still enforce different approval rules on the GitHub side
### 2.3. Verify trusted publishing before removing old auth
After the workflows are live:
1. run a canary publish
2. confirm npm publish succeeds without any `NPM_TOKEN`
3. run a stable dry-run
4. run one real stable publish
Only after that should you remove old token-based access.
## 3. Remove Legacy npm Tokens
After trusted publishing works:
1. revoke any repository or organization `NPM_TOKEN` secrets used for publish
2. revoke any personal automation token that used to publish Paperclip
3. if npm offers a package-level setting to restrict publishing to trusted publishers, enable it
Goal:
- no long-lived npm publishing token should remain in GitHub Actions
## 4. Create GitHub Environments
Create two environments in the GitHub repository:
- `npm-canary`
- `npm-stable`
Path:
1. GitHub repository
2. `Settings`
3. `Environments`
4. `New environment`
## 5. Configure `npm-canary`
Recommended settings for `npm-canary`:
- environment name: `npm-canary`
- required reviewers: none
- wait timer: none
- deployment branches and tags:
- selected branches only
- allow `master`
Reasoning:
- every push to `master` should be able to publish a canary automatically
- no human approval should be required for canaries
## 6. Configure `npm-stable`
Recommended settings for `npm-stable`:
- environment name: `npm-stable`
- required reviewers: at least one maintainer other than the person triggering the workflow when possible
- prevent self-review: enabled
- admin bypass: disabled if your team can tolerate it
- wait timer: optional
- deployment branches and tags:
- selected branches only
- allow `master`
Reasoning:
- stable publishes should require an explicit human approval gate
- the workflow is manual, but the environment should still be the real control point
## 7. Protect `master`
Open the branch protection settings for `master`.
Recommended rules:
1. require pull requests before merging
2. require status checks to pass before merging
3. require review from code owners
4. dismiss stale approvals when new commits are pushed
5. restrict who can push directly to `master`
At minimum, make sure workflow and release script changes cannot land without review.
## 8. Enforce CODEOWNERS Review
This repo now includes `.github/CODEOWNERS`, but GitHub only enforces it if branch protection requires code owner reviews.
In branch protection for `master`, enable:
- `Require review from Code Owners`
Then verify the owner entries are correct for your actual maintainer set.
Current file:
- `.github/CODEOWNERS`
If `@cryppadotta` is not the right reviewer identity in the public repo, change it before enabling enforcement.
## 9. Protect Release Infrastructure Specifically
These files should always trigger code owner review:
- `.github/workflows/release.yml`
- `scripts/release.sh`
- `scripts/release-lib.sh`
- `scripts/release-package-map.mjs`
- `scripts/create-github-release.sh`
- `scripts/rollback-latest.sh`
- `doc/RELEASING.md`
- `doc/PUBLISHING.md`
If you want stronger controls, add a repository ruleset that explicitly blocks direct pushes to:
- `.github/workflows/**`
- `scripts/release*`
## 10. Do Not Store a Claude Token in GitHub Actions
Do not add a personal Claude or Anthropic token for automatic changelog generation.
Recommended policy:
- stable changelog generation happens locally from a trusted maintainer machine
- canaries never generate changelogs
This keeps LLM spending intentional and avoids a high-value token sitting in Actions.
## 11. Verify the Canary Workflow
After setup:
1. merge a harmless commit to `master`
2. open the `Release` workflow run triggered by that push
3. confirm it passes verification
4. confirm publish succeeds under the `npm-canary` environment
5. confirm npm now shows a new `canary` release
6. confirm a git tag named `canary/vYYYY.MDD.P-canary.N` was pushed
Install-path check:
```bash
npx paperclipai@canary onboard
```
## 12. Verify the Stable Workflow
After at least one good canary exists:
1. resolve the target stable version with `./scripts/release.sh stable --date YYYY-MM-DD --print-version`
2. prepare `releases/vYYYY.MDD.P.md` on the source commit you want to promote
3. open `Actions` -> `Release`
4. run it with:
- `source_ref`: the tested commit SHA or canary tag source commit
- `stable_date`: leave blank or set the intended UTC date like `2026-03-18`
do not enter a version like `2026.318.0`; the workflow computes that from the date
- `dry_run`: `true`
5. confirm the dry-run succeeds
6. rerun with `dry_run: false`
7. approve the `npm-stable` environment when prompted
8. confirm npm `latest` points to the new stable version
9. confirm git tag `vYYYY.MDD.P` exists
10. confirm the GitHub Release was created
Implementation note:
- the GitHub Actions stable workflow calls `create-github-release.sh` with `PUBLISH_REMOTE=origin`
- local maintainer usage can still pass `PUBLISH_REMOTE=public-gh` explicitly when needed
## 13. Suggested Maintainer Policy
Use this policy going forward:
- canaries are automatic and cheap
- stables are manual and approved
- only stables get public notes and announcements
- release notes are committed before stable publish
- rollback uses `npm dist-tag`, not unpublish
## 14. Troubleshooting
### Trusted publishing fails with an auth error
Check:
1. the workflow filename on GitHub exactly matches the filename configured in npm
2. the package has the trusted publisher entry for the correct repository
3. the job has `id-token: write`
4. the job is running from the expected repository, not a fork
### Stable workflow runs but never asks for approval
Check:
1. the `publish` job uses environment `npm-stable`
2. the environment actually has required reviewers configured
3. the workflow is running in the canonical repository, not a fork
### CODEOWNERS does not trigger
Check:
1. `.github/CODEOWNERS` is on the default branch
2. branch protection on `master` requires code owner review
3. the owner identities in the file are valid reviewers with repository access
## Related Docs
- [doc/RELEASING.md](RELEASING.md)
- [doc/PUBLISHING.md](PUBLISHING.md)
- [doc/plans/2026-03-17-release-automation-and-versioning.md](plans/2026-03-17-release-automation-and-versioning.md)

View File

@@ -1,174 +1,220 @@
# Releasing Paperclip
Maintainer runbook for shipping Paperclip across npm, GitHub, and the website-facing changelog surface.
Maintainer runbook for shipping a full Paperclip release across npm, GitHub, and the website-facing changelog surface.
The release model is now commit-driven:
The release model is branch-driven:
1. Every push to `master` publishes a canary automatically.
2. Stable releases are manually promoted from a chosen tested commit or canary tag.
3. Stable release notes live in `releases/vYYYY.MDD.P.md`.
4. Only stable releases get GitHub Releases.
## Versioning Model
Paperclip uses calendar versions that still fit semver syntax:
- stable: `YYYY.MDD.P`
- canary: `YYYY.MDD.P-canary.N`
Examples:
- first stable on March 18, 2026: `2026.318.0`
- second stable on March 18, 2026: `2026.318.1`
- fourth canary for the `2026.318.1` line: `2026.318.1-canary.3`
Important constraints:
- the middle numeric slot is `MDD`, where `M` is the UTC month and `DD` is the zero-padded UTC day
- use `2026.303.0` for March 3, not `2026.33.0`
- do not use leading zeroes such as `2026.0318.0`
- do not use four numeric segments such as `2026.3.18.1`
- the semver-safe canary form is `2026.318.0-canary.1`
1. Start a release train on `release/X.Y.Z`
2. Draft the stable changelog on that branch
3. Publish one or more canaries from that branch
4. Publish stable from that same branch head
5. Push the branch commit and tag
6. Create the GitHub Release
7. Merge `release/X.Y.Z` back to `master` without squash or rebase
## Release Surfaces
Every stable release has four separate surfaces:
Every release has four separate surfaces:
1. **Verification** — the exact git SHA passes typecheck, tests, and build
2. **npm**`paperclipai` and public workspace packages are published
3. **GitHub** — the stable release gets a git tag and GitHub Release
4. **Website / announcements** — the stable changelog is published externally and announced
A stable release is done only when all four surfaces are handled.
Canaries only cover the first two surfaces plus an internal traceability tag.
A release is done only when all four surfaces are handled.
## Core Invariants
- canaries publish from `master`
- stables publish from an explicitly chosen source ref
- tags point at the original source commit, not a generated release commit
- stable notes are always `releases/vYYYY.MDD.P.md`
- canaries never create GitHub Releases
- canaries never require changelog generation
- Canary and stable for `X.Y.Z` must come from the same `release/X.Y.Z` branch.
- The release scripts must run from the matching `release/X.Y.Z` branch.
- Once `vX.Y.Z` exists locally, on GitHub, or on npm, that release train is frozen.
- Do not squash-merge or rebase-merge a release branch PR back to `master`.
- The stable changelog is always `releases/vX.Y.Z.md`. Never create canary changelog files.
The reason for the merge rule is simple: the tag must keep pointing at the exact published commit. Squash or rebase breaks that property.
## TL;DR
### Canary
### 1. Start the release train
Every push to `master` runs the canary path inside [`.github/workflows/release.yml`](../.github/workflows/release.yml).
Use this to compute the next version, create or resume the branch, create or resume a dedicated worktree, and push the branch to GitHub.
It:
```bash
./scripts/release-start.sh patch
```
- verifies the pushed commit
- computes the canary version for the current UTC date
- publishes under npm dist-tag `canary`
- creates a git tag `canary/vYYYY.MDD.P-canary.N`
That script:
- fetches the release remote and tags
- computes the next stable version from the latest `v*` tag
- creates or resumes `release/X.Y.Z`
- creates or resumes a dedicated worktree
- pushes the branch to the remote by default
- refuses to reuse a frozen release train
### 2. Draft the stable changelog
From the release worktree:
```bash
VERSION=X.Y.Z
claude --print --output-format stream-json --verbose --dangerously-skip-permissions --model claude-opus-4-6 "Use the release-changelog skill to draft or update releases/v${VERSION}.md for Paperclip. Read doc/RELEASING.md and .agents/skills/release-changelog/SKILL.md, then generate the stable changelog for v${VERSION} from commits since the last stable tag. Do not create a canary changelog."
```
### 3. Verify and publish a canary
```bash
./scripts/release-preflight.sh canary patch
./scripts/release.sh patch --canary --dry-run
./scripts/release.sh patch --canary
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
```
Users install canaries with:
```bash
npx paperclipai@canary onboard
```
### 4. Publish stable
```bash
./scripts/release-preflight.sh stable patch
./scripts/release.sh patch --dry-run
./scripts/release.sh patch
git push public-gh HEAD --follow-tags
./scripts/create-github-release.sh X.Y.Z
```
Then open a PR from `release/X.Y.Z` to `master` and merge without squash or rebase.
## Release Branches
Paperclip uses one release branch per target stable version:
- `release/0.3.0`
- `release/0.3.1`
- `release/1.0.0`
Do not create separate per-canary branches like `canary/0.3.0-1`. A canary is just a prerelease snapshot of the same stable train.
## Script Entry Points
- [`scripts/release-start.sh`](../scripts/release-start.sh) — create or resume the release train branch/worktree
- [`scripts/release-preflight.sh`](../scripts/release-preflight.sh) — validate branch, version plan, git/npm state, and verification gate
- [`scripts/release.sh`](../scripts/release.sh) — publish canary or stable from the release branch
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh) — create or update the GitHub Release after pushing the tag
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh) — repoint `latest` to the last good stable version
## Detailed Workflow
### 1. Start or resume the release train
Run:
```bash
./scripts/release-start.sh <patch|minor|major>
```
Useful options:
```bash
./scripts/release-start.sh patch --dry-run
./scripts/release-start.sh minor --worktree-dir ../paperclip-release-0.4.0
./scripts/release-start.sh patch --no-push
```
The script is intentionally idempotent:
- if `release/X.Y.Z` already exists locally, it reuses it
- if the branch already exists on the remote, it resumes it locally
- if the branch is already checked out in another worktree, it points you there
- if `vX.Y.Z` already exists locally, remotely, or on npm, it refuses to reuse that train
### 2. Write the stable changelog early
Create or update:
- `releases/vX.Y.Z.md`
That file is for the eventual stable release. It should not include `-canary` in the filename or heading.
Recommended structure:
- `Breaking Changes` when needed
- `Highlights`
- `Improvements`
- `Fixes`
- `Upgrade Guide` when needed
- `Contributors` — @-mention every contributor by GitHub username (no emails)
Package-level `CHANGELOG.md` files are generated as part of the release mechanics. They are not the main release narrative.
### 3. Run release preflight
From the `release/X.Y.Z` worktree:
```bash
./scripts/release-preflight.sh canary <patch|minor|major>
# or
npx paperclipai@canary onboard --data-dir "$(mktemp -d /tmp/paperclip-canary.XXXXXX)"
./scripts/release-preflight.sh stable <patch|minor|major>
```
### Stable
The preflight script now checks all of the following before it runs the verification gate:
Use [`.github/workflows/release.yml`](../.github/workflows/release.yml) from the Actions tab with the manual `workflow_dispatch` inputs.
- the worktree is clean, including untracked files
- the current branch matches the computed `release/X.Y.Z`
- the release train is not frozen
- the target version is still free on npm
- the target tag does not already exist locally or remotely
- whether the remote release branch already exists
- whether `releases/vX.Y.Z.md` is present
[Run the action here](https://github.com/paperclipai/paperclip/actions/workflows/release.yml)
Inputs:
- `source_ref`
- commit SHA, branch, or tag
- `stable_date`
- optional UTC date override in `YYYY-MM-DD`
- enter a date like `2026-03-18`, not a version like `2026.318.0`
- `dry_run`
- preview only when true
Before running stable:
1. pick the canary commit or tag you trust
2. resolve the target stable version with `./scripts/release.sh stable --date "$(date +%F)" --print-version`
3. create or update `releases/vYYYY.MDD.P.md` on that source ref
4. run the stable workflow from that source ref
Example:
- `source_ref`: `master`
- `stable_date`: `2026-03-18`
- resulting stable version: `2026.318.0`
The workflow:
- re-verifies the exact source ref
- computes the next stable patch slot for the chosen UTC date
- publishes `YYYY.MDD.P` under npm dist-tag `latest`
- creates git tag `vYYYY.MDD.P`
- creates or updates the GitHub Release from `releases/vYYYY.MDD.P.md`
## Local Commands
### Preview a canary locally
Then it runs:
```bash
./scripts/release.sh canary --dry-run
pnpm -r typecheck
pnpm test:run
pnpm build
```
### Preview a stable locally
### 4. Publish one or more canaries
Run:
```bash
./scripts/release.sh stable --dry-run
./scripts/release.sh <patch|minor|major> --canary --dry-run
./scripts/release.sh <patch|minor|major> --canary
```
### Publish a stable locally
Result:
This is mainly for emergency/manual use. The normal path is the GitHub workflow.
- npm gets a prerelease such as `1.2.3-canary.0` under dist-tag `canary`
- `latest` is unchanged
- no git tag is created
- no GitHub Release is created
- the worktree returns to clean after the script finishes
```bash
./scripts/release.sh stable
git push public-gh refs/tags/vYYYY.MDD.P
PUBLISH_REMOTE=public-gh ./scripts/create-github-release.sh YYYY.MDD.P
```
Guardrails:
## Stable Changelog Workflow
- the script refuses to run from the wrong branch
- the script refuses to publish from a frozen train
- the canary is always derived from the next stable version
- if the stable notes file is missing, the script warns before you forget it
Stable changelog files live at:
Concrete example:
- `releases/vYYYY.MDD.P.md`
- if the latest stable is `0.2.7`, a patch canary targets `0.2.8-canary.0`
- `0.2.7-canary.N` is invalid because `0.2.7` is already stable
Canaries do not get changelog files.
### 5. Smoke test the canary
Recommended local generation flow:
```bash
VERSION="$(./scripts/release.sh stable --date 2026-03-18 --print-version)"
claude --print --output-format stream-json --verbose --dangerously-skip-permissions --model claude-opus-4-6 "Use the release-changelog skill to draft or update releases/v${VERSION}.md for Paperclip. Read doc/RELEASING.md and .agents/skills/release-changelog/SKILL.md, then generate the stable changelog for v${VERSION} from commits since the last stable tag. Do not create a canary changelog."
```
The repo intentionally does not run this through GitHub Actions because:
- canaries are too frequent
- stable notes are the only public narrative surface that needs LLM help
- maintainer LLM tokens should not live in Actions
## Smoke Testing
For a canary:
Run the actual install path in Docker:
```bash
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
```
For the current stable:
```bash
PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
```
Useful isolated variants:
```bash
@@ -176,76 +222,201 @@ HOST_PORT=3232 DATA_DIR=./data/release-smoke-canary PAPERCLIPAI_VERSION=canary .
HOST_PORT=3233 DATA_DIR=./data/release-smoke-stable PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
```
Automated browser smoke is also available:
If you want to exercise onboarding from the current committed ref instead of npm, use:
```bash
gh workflow run release-smoke.yml -f paperclip_version=canary
gh workflow run release-smoke.yml -f paperclip_version=latest
./scripts/clean-onboard-ref.sh
PAPERCLIP_PORT=3234 ./scripts/clean-onboard-ref.sh
./scripts/clean-onboard-ref.sh HEAD
```
Minimum checks:
- `npx paperclipai@canary onboard` installs
- onboarding completes without crashes
- authenticated login works with the smoke credentials
- the browser lands in onboarding on a fresh instance
- company creation succeeds
- the first CEO agent is created
- the first CEO heartbeat run is triggered
- the server boots
- the UI loads
- basic company creation and dashboard load work
## Rollback
If smoke testing fails:
Rollback does not unpublish versions.
1. stop the stable release
2. fix the issue on the same `release/X.Y.Z` branch
3. publish another canary
4. rerun smoke testing
It only moves the `latest` dist-tag back to a previous stable:
### 6. Publish stable from the same release branch
Once the branch head is vetted, run:
```bash
./scripts/rollback-latest.sh 2026.318.0 --dry-run
./scripts/rollback-latest.sh 2026.318.0
./scripts/release.sh <patch|minor|major> --dry-run
./scripts/release.sh <patch|minor|major>
```
Then fix forward with a new stable patch slot or release date.
Stable publish:
- publishes `X.Y.Z` to npm under `latest`
- creates the local release commit
- creates the local tag `vX.Y.Z`
Stable publish refuses to proceed if:
- the current branch is not `release/X.Y.Z`
- the remote release branch does not exist yet
- the stable notes file is missing
- the target tag already exists locally or remotely
- the stable version already exists on npm
Those checks intentionally freeze the train after stable publish.
### 7. Push the stable branch commit and tag
After stable publish succeeds:
```bash
git push public-gh HEAD --follow-tags
./scripts/create-github-release.sh X.Y.Z
```
The GitHub Release notes come from:
- `releases/vX.Y.Z.md`
### 8. Merge the release branch back to `master`
Open a PR:
- base: `master`
- head: `release/X.Y.Z`
Merge rule:
- allowed: merge commit or fast-forward
- forbidden: squash merge
- forbidden: rebase merge
Post-merge verification:
```bash
git fetch public-gh --tags
git merge-base --is-ancestor "vX.Y.Z" "public-gh/master"
```
That command must succeed. If it fails, the published tagged commit is not reachable from `master`, which means the merge strategy was wrong.
### 9. Finish the external surfaces
After GitHub is correct:
- publish the changelog on the website
- write and send the announcement copy
- ensure public docs and install guidance point to the stable version
## GitHub Actions Release
There is also a manual workflow at [`.github/workflows/release.yml`](../.github/workflows/release.yml).
Use it from the Actions tab on the relevant `release/X.Y.Z` branch:
1. Choose `Release`
2. Choose `channel`: `canary` or `stable`
3. Choose `bump`: `patch`, `minor`, or `major`
4. Choose whether this is a `dry_run`
5. Run it from the release branch, not from `master`
The workflow:
- reruns `typecheck`, `test:run`, and `build`
- gates publish behind the `npm-release` environment
- can publish canaries without touching `latest`
- can publish stable, push the stable branch commit and tag, and create the GitHub Release
It does not merge the release branch back to `master` for you.
## Release Checklist
### Before any publish
- [ ] The release train exists on `release/X.Y.Z`
- [ ] The working tree is clean, including untracked files
- [ ] If package manifests changed, the CI-owned `pnpm-lock.yaml` refresh is already merged on `master` before the train is cut
- [ ] The required verification gate passed on the exact branch head you want to publish
- [ ] The bump type is correct for the user-visible impact
- [ ] The stable changelog file exists or is ready at `releases/vX.Y.Z.md`
- [ ] You know which previous stable version you would roll back to if needed
### Before a stable
- [ ] The candidate has already passed smoke testing
- [ ] The remote `release/X.Y.Z` branch exists
- [ ] You are ready to push the stable branch commit and tag immediately after npm publish
- [ ] You are ready to create the GitHub Release immediately after the push
- [ ] You are ready to open the PR back to `master`
### After a stable
- [ ] `npm view paperclipai@latest version` matches the new stable version
- [ ] The git tag exists on GitHub
- [ ] The GitHub Release exists and uses `releases/vX.Y.Z.md`
- [ ] `vX.Y.Z` is reachable from `master`
- [ ] The website changelog is updated
- [ ] Announcement copy matches the stable release, not the canary
## Failure Playbooks
### If the canary publishes but smoke testing fails
### If the canary publishes but the smoke test fails
Do not run stable.
Do not publish stable.
Instead:
1. fix the issue on `master`
2. merge the fix
3. wait for the next automatic canary
4. rerun smoke testing
1. fix the issue on `release/X.Y.Z`
2. publish another canary
3. rerun smoke testing
### If stable npm publish succeeds but tag push or GitHub release creation fails
### If stable npm publish succeeds but push or GitHub release creation fails
This is a partial release. npm is already live.
Do this immediately:
1. push the missing tag
2. rerun `PUBLISH_REMOTE=public-gh ./scripts/create-github-release.sh YYYY.MDD.P`
3. verify the GitHub Release notes point at `releases/vYYYY.MDD.P.md`
1. fix the git or GitHub issue from the same checkout
2. push the stable branch commit and tag
3. create the GitHub Release
Do not republish the same version.
### If `latest` is broken after stable publish
Roll back the dist-tag:
Preview:
```bash
./scripts/rollback-latest.sh YYYY.MDD.P
./scripts/rollback-latest.sh X.Y.Z --dry-run
```
Then fix forward with a new stable release.
Roll back:
## Related Files
```bash
./scripts/rollback-latest.sh X.Y.Z
```
- [`scripts/release.sh`](../scripts/release.sh)
- [`scripts/release-package-map.mjs`](../scripts/release-package-map.mjs)
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh)
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh)
- [`doc/PUBLISHING.md`](PUBLISHING.md)
- [`doc/RELEASE-AUTOMATION-SETUP.md`](RELEASE-AUTOMATION-SETUP.md)
This does not unpublish anything. It only moves the `latest` dist-tag back to the last good stable release.
Then fix forward with a new patch release.
### If the GitHub Release notes are wrong
Re-run:
```bash
./scripts/create-github-release.sh X.Y.Z
```
If the release already exists, the script updates it.
## Related Docs
- [doc/PUBLISHING.md](PUBLISHING.md) — low-level npm build and packaging internals
- [.agents/skills/release/SKILL.md](../.agents/skills/release/SKILL.md) — maintainer release coordination workflow
- [.agents/skills/release-changelog/SKILL.md](../.agents/skills/release-changelog/SKILL.md) — stable changelog drafting workflow

View File

@@ -188,15 +188,12 @@ The heartbeat is a protocol, not a runtime. Paperclip defines how to initiate an
Agent configuration includes an **adapter** that defines how Paperclip invokes the agent. Initial adapters:
| Adapter | Mechanism | Example |
| -------------------- | ----------------------- | --------------------------------------------- |
| `process` | Execute a child process | `python run_agent.py --agent-id {id}` |
| `http` | Send an HTTP request | `POST https://openclaw.example.com/hook/{id}` |
| `openclaw_gateway` | OpenClaw gateway API | Managed OpenClaw agent via gateway |
| `gemini_local` | Gemini CLI process | Local Gemini CLI with sandbox and approval |
| `hermes_local` | Hermes agent process | Local Hermes agent |
| Adapter | Mechanism | Example |
| --------- | ----------------------- | --------------------------------------------- |
| `process` | Execute a child process | `python run_agent.py --agent-id {id}` |
| `http` | Send an HTTP request | `POST https://openclaw.example.com/hook/{id}` |
The `process` and `http` adapters ship as defaults. Additional adapters have been added for specific agent runtimes (see list above), and new adapter types can be registered via the plugin system (see Plugin / Extension Architecture).
The `process` and `http` adapters ship as defaults. Additional adapters can be added via the plugin system (see Plugin / Extension Architecture).
### Adapter Interface
@@ -432,7 +429,7 @@ The core Paperclip system must be extensible. Features like knowledge bases, ext
- **Agent Adapter plugins** — new Adapter types can be registered via the plugin system
- Plugin-registrable UI components (future)
The plugin framework has shipped. Plugins can register new adapter types, hook into lifecycle events, and contribute UI components (e.g. global toolbar buttons). A plugin SDK and CLI commands (`paperclipai plugin`) are available for authoring and installing plugins.
This isn't a V1 deliverable (we're not building a plugin framework upfront), but the architecture should not paint us into a corner. Keep boundaries clean so extensions are possible.
---

View File

@@ -1,135 +0,0 @@
# Untrusted PR Review In Docker
Use this workflow when you want Codex or Claude to inspect a pull request that you do not want touching your host machine directly.
This is intentionally separate from the normal Paperclip dev image.
## What this container isolates
- `codex` auth/session state in a Docker volume, not your host `~/.codex`
- `claude` auth/session state in a Docker volume, not your host `~/.claude`
- `gh` auth state in the same container-local home volume
- review clones, worktrees, dependency installs, and local databases in a writable scratch volume under `/work`
By default this workflow does **not** mount your host repo checkout, your host home directory, or your SSH agent.
## Files
- `docker/untrusted-review/Dockerfile`
- `docker-compose.untrusted-review.yml`
- `review-checkout-pr` inside the container
## Build and start a shell
```sh
docker compose -f docker-compose.untrusted-review.yml build
docker compose -f docker-compose.untrusted-review.yml run --rm --service-ports review
```
That opens an interactive shell in the review container with:
- Node + Corepack/pnpm
- `codex`
- `claude`
- `gh`
- `git`, `rg`, `fd`, `jq`
## First-time login inside the container
Run these once. The resulting login state persists in the `review-home` Docker volume.
```sh
gh auth login
codex login
claude login
```
If you prefer API-key auth instead of CLI login, pass keys through Compose env:
```sh
OPENAI_API_KEY=... ANTHROPIC_API_KEY=... docker compose -f docker-compose.untrusted-review.yml run --rm review
```
## Check out a PR safely
Inside the container:
```sh
review-checkout-pr paperclipai/paperclip 432
cd /work/checkouts/paperclipai-paperclip/pr-432
```
What this does:
1. Creates or reuses a repo clone under `/work/repos/...`
2. Fetches `pull/<pr>/head` from GitHub
3. Creates a detached git worktree under `/work/checkouts/...`
The checkout lives entirely inside the container volume.
## Ask Codex or Claude to review it
Inside the PR checkout:
```sh
codex
```
Then give it a prompt like:
```text
Review this PR as hostile input. Focus on security issues, data exfiltration paths, sandbox escapes, dangerous install/runtime scripts, auth changes, and subtle behavioral regressions. Do not modify files. Produce findings ordered by severity with file references.
```
Or with Claude:
```sh
claude
```
## Preview the Paperclip app from the PR
Only do this when you intentionally want to execute the PR's code inside the container.
Inside the PR checkout:
```sh
pnpm install
HOST=0.0.0.0 pnpm dev
```
Open from the host:
- `http://localhost:3100`
The Compose file also exposes Vite's default port:
- `http://localhost:5173`
Notes:
- `pnpm install` can run untrusted lifecycle scripts from the PR. That is why this happens inside the isolated container instead of on your host.
- If you only want static inspection, do not run install/dev commands.
- Paperclip's embedded PostgreSQL and local storage stay inside the container home volume via `PAPERCLIP_HOME=/home/reviewer/.paperclip-review`.
## Reset state
Remove the review container volumes when you want a clean environment:
```sh
docker compose -f docker-compose.untrusted-review.yml down -v
```
That deletes:
- Codex/Claude/GitHub login state stored in `review-home`
- cloned repos, worktrees, installs, and scratch data stored in `review-work`
## Security limits
This is a useful isolation boundary, but it is still Docker, not a full VM.
- A reviewed PR can still access the container's network unless you disable it.
- Any secrets you pass into the container are available to code you execute inside it.
- Do not mount your host repo, host home, `.ssh`, or Docker socket unless you are intentionally weakening the boundary.
- If you need a stronger boundary than this, use a disposable VM instead of Docker.

View File

@@ -1,172 +0,0 @@
# Memory Landscape
Date: 2026-03-17
This document summarizes the memory systems referenced in task `PAP-530` and extracts the design patterns that matter for Paperclip.
## What Paperclip Needs From This Survey
Paperclip is not trying to become a single opinionated memory engine. The more useful target is a control-plane memory surface that:
- stays company-scoped
- lets each company choose a default memory provider
- lets specific agents override that default
- keeps provenance back to Paperclip runs, issues, comments, and documents
- records memory-related cost and latency the same way the rest of the control plane records work
- works with plugin-provided providers, not only built-ins
The question is not "which memory project wins?" The question is "what is the smallest Paperclip contract that can sit above several very different memory systems without flattening away the useful differences?"
## Quick Grouping
### Hosted memory APIs
- `mem0`
- `supermemory`
- `Memori`
These optimize for a simple application integration story: send conversation/content plus an identity, then query for relevant memory or user context later.
### Agent-centric memory frameworks / memory OSes
- `MemOS`
- `memU`
- `EverMemOS`
- `OpenViking`
These treat memory as an agent runtime subsystem, not only as a search index. They usually add task memory, profiles, filesystem-style organization, async ingestion, or skill/resource management.
### Local-first memory stores / indexes
- `nuggets`
- `memsearch`
These emphasize local persistence, inspectability, and low operational overhead. They are useful because Paperclip is local-first today and needs at least one zero-config path.
## Per-Project Notes
| Project | Shape | Notable API / model | Strong fit for Paperclip | Main mismatch |
|---|---|---|---|---|
| [nuggets](https://github.com/NeoVertex1/nuggets) | local memory engine + messaging gateway | topic-scoped HRR memory with `remember`, `recall`, `forget`, fact promotion into `MEMORY.md` | good example of lightweight local memory and automatic promotion | very specific architecture; not a general multi-tenant service |
| [mem0](https://github.com/mem0ai/mem0) | hosted + OSS SDK | `add`, `search`, `getAll`, `get`, `update`, `delete`, `deleteAll`; entity partitioning via `user_id`, `agent_id`, `run_id`, `app_id` | closest to a clean provider API with identities and metadata filters | provider owns extraction heavily; Paperclip should not assume every backend behaves like mem0 |
| [MemOS](https://github.com/MemTensor/MemOS) | memory OS / framework | unified add-retrieve-edit-delete, memory cubes, multimodal memory, tool memory, async scheduler, feedback/correction | strong source for optional capabilities beyond plain search | much broader than the minimal contract Paperclip should standardize first |
| [supermemory](https://github.com/supermemoryai/supermemory) | hosted memory + context API | `add`, `profile`, `search.memories`, `search.documents`, document upload, settings; automatic profile building and forgetting | strong example of "context bundle" rather than raw search results | heavily productized around its own ontology and hosted flow |
| [memU](https://github.com/NevaMind-AI/memU) | proactive agent memory framework | file-system metaphor, proactive loop, intent prediction, always-on companion model | good source for when memory should trigger agent behavior, not just retrieval | proactive assistant framing is broader than Paperclip's task-centric control plane |
| [Memori](https://github.com/MemoriLabs/Memori) | hosted memory fabric + SDK wrappers | registers against LLM SDKs, attribution via `entity_id` + `process_id`, sessions, cloud + BYODB | strong example of automatic capture around model clients | wrapper-centric design does not map 1:1 to Paperclip's run / issue / comment lifecycle |
| [EverMemOS](https://github.com/EverMind-AI/EverMemOS) | conversational long-term memory system | MemCell extraction, structured narratives, user profiles, hybrid retrieval / reranking | useful model for provenance-rich structured memories and evolving profiles | focused on conversational memory rather than generalized control-plane events |
| [memsearch](https://github.com/zilliztech/memsearch) | markdown-first local memory index | markdown as source of truth, `index`, `search`, `watch`, transcript parsing, plugin hooks | excellent baseline for a local built-in provider and inspectable provenance | intentionally simple; no hosted service semantics or rich correction workflow |
| [OpenViking](https://github.com/volcengine/OpenViking) | context database | filesystem-style organization of memories/resources/skills, tiered loading, visualized retrieval trajectories | strong source for browse/inspect UX and context provenance | treats "context database" as a larger product surface than Paperclip should own |
## Common Primitives Across The Landscape
Even though the systems disagree on architecture, they converge on a few primitives:
- `ingest`: add memory from text, messages, documents, or transcripts
- `query`: search or retrieve memory given a task, question, or scope
- `scope`: partition memory by user, agent, project, process, or session
- `provenance`: carry enough metadata to explain where a memory came from
- `maintenance`: update, forget, dedupe, compact, or correct memories over time
- `context assembly`: turn raw memories into a prompt-ready bundle for the agent
If Paperclip does not expose these, it will not adapt well to the systems above.
## Where The Systems Differ
These differences are exactly why Paperclip needs a layered contract instead of a single hard-coded engine.
### 1. Who owns extraction?
- `mem0`, `supermemory`, and `Memori` expect the provider to infer memories from conversations.
- `memsearch` expects the host to decide what markdown to write, then indexes it.
- `MemOS`, `memU`, `EverMemOS`, and `OpenViking` sit somewhere in between and often expose richer memory construction pipelines.
Paperclip should support both:
- provider-managed extraction
- Paperclip-managed extraction with provider-managed storage / retrieval
### 2. What is the source of truth?
- `memsearch` and `nuggets` make the source inspectable on disk.
- hosted APIs often make the provider store canonical.
- filesystem-style systems like `OpenViking` and `memU` treat hierarchy itself as part of the memory model.
Paperclip should not require a single storage shape. It should require normalized references back to Paperclip entities.
### 3. Is memory just search, or also profile and planning state?
- `mem0` and `memsearch` center search and CRUD.
- `supermemory` adds user profiles as a first-class output.
- `MemOS`, `memU`, `EverMemOS`, and `OpenViking` expand into tool traces, task memory, resources, and skills.
Paperclip should make plain search the minimum contract and richer outputs optional capabilities.
### 4. Is memory synchronous or asynchronous?
- local tools often work synchronously in-process.
- larger systems add schedulers, background indexing, compaction, or sync jobs.
Paperclip needs both direct request/response operations and background maintenance hooks.
## Paperclip-Specific Takeaways
### Paperclip should own these concerns
- binding a provider to a company and optionally overriding it per agent
- mapping Paperclip entities into provider scopes
- provenance back to issue comments, documents, runs, and activity
- cost / token / latency reporting for memory work
- browse and inspect surfaces in the Paperclip UI
- governance on destructive operations
### Providers should own these concerns
- extraction heuristics
- embedding / indexing strategy
- ranking and reranking
- profile synthesis
- contradiction resolution and forgetting logic
- storage engine details
### The control-plane contract should stay small
Paperclip does not need to standardize every feature from every provider. It needs:
- a required portable core
- optional capability flags for richer providers
- a way to record provider-native ids and metadata without pretending all providers are equivalent internally
## Recommended Direction
Paperclip should adopt a two-layer memory model:
1. `Memory binding + control plane layer`
Paperclip decides which provider key is in effect for a company, agent, or project, and it logs every memory operation with provenance and usage.
2. `Provider adapter layer`
A built-in or plugin-supplied adapter turns Paperclip memory requests into provider-specific calls.
The portable core should cover:
- ingest / write
- search / recall
- browse / inspect
- get by provider record handle
- forget / correction
- usage reporting
Optional capabilities can cover:
- profile synthesis
- async ingestion
- multimodal content
- tool / resource / skill memory
- provider-native graph browsing
That is enough to support:
- a local markdown-first baseline similar to `memsearch`
- hosted services similar to `mem0`, `supermemory`, or `Memori`
- richer agent-memory systems like `MemOS` or `OpenViking`
without forcing Paperclip itself to become a monolithic memory engine.

View File

@@ -1,424 +0,0 @@
# Docker Release Browser E2E Plan
## Context
Today release smoke testing for published Paperclip packages is manual and shell-driven:
```sh
HOST_PORT=3232 DATA_DIR=./data/release-smoke-canary PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
HOST_PORT=3233 DATA_DIR=./data/release-smoke-stable PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
```
That is useful because it exercises the same public install surface users hit:
- Docker
- `npx paperclipai@canary`
- `npx paperclipai@latest`
- authenticated bootstrap flow
But it still leaves the most important release questions to a human with a browser:
- can I sign in with the smoke credentials?
- do I land in onboarding?
- can I complete onboarding?
- does the initial CEO agent actually get created and run?
The repo already has two adjacent pieces:
- `tests/e2e/onboarding.spec.ts` covers the onboarding wizard against the local source tree
- `scripts/docker-onboard-smoke.sh` boots a published Docker install and auto-bootstraps authenticated mode, but only verifies the API/session layer
What is missing is one deterministic browser test that joins those two paths.
## Goal
Add a release-grade Docker-backed browser E2E that validates the published `canary` and `latest` installs end to end:
1. boot the published package in Docker
2. sign in with known smoke credentials
3. verify the user is routed into onboarding
4. complete onboarding in the browser
5. verify the first CEO agent exists
6. verify the initial CEO run was triggered and reached a terminal or active state
Then wire that test into GitHub Actions so release validation is no longer manual-only.
## Recommendation In One Sentence
Turn the current Docker smoke script into a machine-friendly test harness, add a dedicated Playwright release-smoke spec that drives the authenticated browser flow against published Docker installs, and run it in GitHub Actions for both `canary` and `latest`.
## What We Have Today
### Existing local browser coverage
`tests/e2e/onboarding.spec.ts` already proves the onboarding wizard can:
- create a company
- create a CEO agent
- create an initial issue
- optionally observe task progress
That is a good base, but it does not validate the public npm package, Docker path, authenticated login flow, or release dist-tags.
### Existing Docker smoke coverage
`scripts/docker-onboard-smoke.sh` already does useful setup work:
- builds `Dockerfile.onboard-smoke`
- runs `paperclipai@${PAPERCLIPAI_VERSION}` inside Docker
- waits for health
- signs up or signs in a smoke admin user
- generates and accepts the bootstrap CEO invite in authenticated mode
- verifies a board session and `/api/companies`
That means the hard bootstrap problem is mostly solved already. The main gap is that the script is human-oriented and never hands control to a browser test.
### Existing CI shape
The repo already has:
- `.github/workflows/e2e.yml` for manual Playwright runs against local source
- `.github/workflows/release.yml` for canary publish on `master` and manual stable promotion
So the right move is to extend the current test/release system, not create a parallel one.
## Product Decision
### 1. The release smoke should stay deterministic and token-free
The first version should not require OpenAI, Anthropic, or external agent credentials.
Use the onboarding flow with a deterministic adapter that can run on a stock GitHub runner and inside the published Docker install. The existing `process` adapter with a trivial command is the right base path for this release gate.
That keeps this test focused on:
- release packaging
- auth/bootstrap
- UI routing
- onboarding contract
- agent creation
- heartbeat invocation plumbing
Later we can add a second credentialed smoke lane for real model-backed agents.
### 2. Smoke credentials become an explicit test contract
The current defaults in `scripts/docker-onboard-smoke.sh` should be treated as stable test fixtures:
- email: `smoke-admin@paperclip.local`
- password: `paperclip-smoke-password`
The browser test should log in with those exact values unless overridden by env vars.
### 3. Published-package smoke and source-tree E2E stay separate
Keep two lanes:
- source-tree E2E for feature development
- published Docker release smoke for release confidence
They overlap on onboarding assertions, but they guard different failure classes.
## Proposed Design
## 1. Add a CI-friendly Docker smoke harness
Refactor `scripts/docker-onboard-smoke.sh` so it can run in two modes:
- interactive mode
- current behavior
- streams logs and waits in foreground for manual inspection
- CI mode
- starts the container
- waits for health and authenticated bootstrap
- prints machine-readable metadata
- exits while leaving the container running for Playwright
Recommended shape:
- keep `scripts/docker-onboard-smoke.sh` as the public entry point
- add a `SMOKE_DETACH=true` or `--detach` mode
- emit a JSON blob or `.env` file containing:
- `SMOKE_BASE_URL`
- `SMOKE_ADMIN_EMAIL`
- `SMOKE_ADMIN_PASSWORD`
- `SMOKE_CONTAINER_NAME`
- `SMOKE_DATA_DIR`
The workflow and Playwright tests can then consume the emitted metadata instead of scraping logs.
### Why this matters
The current script always tails logs and then blocks on `wait "$LOG_PID"`. That is convenient for manual smoke testing, but it is the wrong shape for CI orchestration.
## 2. Add a dedicated Playwright release-smoke spec
Create a second Playwright entry point specifically for published Docker installs, for example:
- `tests/release-smoke/playwright.config.ts`
- `tests/release-smoke/docker-auth-onboarding.spec.ts`
This suite should not use Playwright `webServer`, because the app server will already be running inside Docker.
### Browser scenario
The first release-smoke scenario should validate:
1. open `/`
2. unauthenticated user is redirected to `/auth`
3. sign in using the smoke credentials
4. authenticated user lands on onboarding when no companies exist
5. onboarding wizard appears with the expected step labels
6. create a company
7. create the first agent using `process`
8. create the initial issue
9. finish onboarding and open the created issue
10. verify via API:
- company exists
- CEO agent exists
- issue exists and is assigned to the CEO
11. verify the first heartbeat run was triggered:
- either by checking issue status changed from initial state, or
- by checking agent/runs API shows a run for the CEO, or
- both
The test should tolerate the run completing quickly. For this reason, the assertion should accept:
- `queued`
- `running`
- `succeeded`
and similarly for issue progression if the issue status changes before the assertion runs.
### Why a separate spec instead of reusing `tests/e2e/onboarding.spec.ts`
The local-source test and release-smoke test have different assumptions:
- different server lifecycle
- different auth path
- different deployment mode
- published npm package instead of local workspace code
Trying to force both through one spec will make both worse.
## 3. Add a release-smoke workflow in GitHub Actions
Add a workflow dedicated to this surface, ideally reusable:
- `.github/workflows/release-smoke.yml`
Recommended triggers:
- `workflow_dispatch`
- `workflow_call`
Recommended inputs:
- `paperclip_version`
- `canary` or `latest`
- `host_port`
- optional, default runner-safe port
- `artifact_name`
- optional for clearer uploads
### Job outline
1. checkout repo
2. install Node/pnpm
3. install Playwright browser dependencies
4. launch Docker smoke harness in detached mode with the chosen dist-tag
5. run the release-smoke Playwright suite against the returned base URL
6. always collect diagnostics:
- Playwright report
- screenshots
- trace
- `docker logs`
- harness metadata file
7. stop and remove container
### Why a reusable workflow
This lets us:
- run the smoke manually on demand
- call it from `release.yml`
- reuse the same job for both `canary` and `latest`
## 4. Integrate it into release automation incrementally
### Phase A: Manual workflow only
First ship the workflow as manual-only so the harness and test can be stabilized without blocking releases.
### Phase B: Run automatically after canary publish
After `publish_canary` succeeds in `.github/workflows/release.yml`, call the reusable release-smoke workflow with:
- `paperclip_version=canary`
This proves the just-published public canary really boots and onboards.
### Phase C: Run automatically after stable publish
After `publish_stable` succeeds, call the same workflow with:
- `paperclip_version=latest`
This gives us post-publish confirmation that the stable dist-tag is healthy.
### Important nuance
Testing `latest` from npm cannot happen before stable publish, because the package under test does not exist under `latest` yet. So the `latest` smoke is a post-publish verification, not a pre-publish gate.
If we later want a true pre-publish stable gate, that should be a separate source-ref or locally built package smoke job.
## 5. Make diagnostics first-class
This workflow is only valuable if failures are fast to debug.
Always capture:
- Playwright HTML report
- Playwright trace on failure
- final screenshot on failure
- full `docker logs` output
- emitted smoke metadata
- optional `curl /api/health` snapshot
Without that, the test will become a flaky black box and people will stop trusting it.
## Implementation Plan
## Phase 1: Harness refactor
Files:
- `scripts/docker-onboard-smoke.sh`
- optionally `scripts/lib/docker-onboard-smoke.sh` or similar helper
- `doc/DOCKER.md`
- `doc/RELEASING.md`
Tasks:
1. Add detached/CI mode to the Docker smoke script.
2. Make the script emit machine-readable connection metadata.
3. Keep the current interactive manual mode intact.
4. Add reliable cleanup commands for CI.
Acceptance:
- a script invocation can start the published Docker app, auto-bootstrap it, and return control to the caller with enough metadata for browser automation
## Phase 2: Browser release-smoke suite
Files:
- `tests/release-smoke/playwright.config.ts`
- `tests/release-smoke/docker-auth-onboarding.spec.ts`
- root `package.json`
Tasks:
1. Add a dedicated Playwright config for external server testing.
2. Implement login + onboarding + CEO creation flow.
3. Assert a CEO run was created or completed.
4. Add a root script such as:
- `test:release-smoke`
Acceptance:
- the suite passes locally against both:
- `PAPERCLIPAI_VERSION=canary`
- `PAPERCLIPAI_VERSION=latest`
## Phase 3: GitHub Actions workflow
Files:
- `.github/workflows/release-smoke.yml`
Tasks:
1. Add manual and reusable workflow entry points.
2. Install Chromium and runner dependencies.
3. Start Docker smoke in detached mode.
4. Run the release-smoke Playwright suite.
5. Upload diagnostics artifacts.
Acceptance:
- a maintainer can run the workflow manually for either `canary` or `latest`
## Phase 4: Release workflow integration
Files:
- `.github/workflows/release.yml`
- `doc/RELEASING.md`
Tasks:
1. Trigger release smoke automatically after canary publish.
2. Trigger release smoke automatically after stable publish.
3. Document expected behavior and failure handling.
Acceptance:
- canary releases automatically produce a published-package browser smoke result
- stable releases automatically produce a `latest` browser smoke result
## Phase 5: Future extension for real model-backed agent validation
Not part of the first implementation, but this should be the next layer after the deterministic lane is stable.
Possible additions:
- a second Playwright project gated on repo secrets
- real `claude_local` or `codex_local` adapter validation in Docker-capable environments
- assertion that the CEO posts a real task/comment artifact
- stable release holdback until the credentialed lane passes
This should stay optional until the token-free lane is trustworthy.
## Acceptance Criteria
The plan is complete when the implemented system can demonstrate all of the following:
1. A published `paperclipai@canary` Docker install can be smoke-tested by Playwright in CI.
2. A published `paperclipai@latest` Docker install can be smoke-tested by Playwright in CI.
3. The test logs into authenticated mode with the smoke credentials.
4. The test sees onboarding for a fresh instance.
5. The test completes onboarding in the browser.
6. The test verifies the initial CEO agent was created.
7. The test verifies at least one CEO heartbeat run was triggered.
8. Failures produce actionable artifacts rather than just a red job.
## Risks And Decisions To Make
### 1. Fast process runs may finish before the UI visibly updates
That is expected. The assertions should prefer API polling for run existence/status rather than only visual indicators.
### 2. `latest` smoke is post-publish, not preventive
This is a real limitation of testing the published dist-tag itself. It is still valuable, but it should not be confused with a pre-publish gate.
### 3. We should not overcouple the test to cosmetic onboarding text
The important contract is flow success, created entities, and run creation. Use visible labels sparingly and prefer stable semantic selectors where possible.
### 4. Keep the smoke adapter path boring
For release safety, the first test should use the most boring runnable adapter possible. This is not the place to validate every adapter.
## Recommended First Slice
If we want the fastest path to value, ship this in order:
1. add detached mode to `scripts/docker-onboard-smoke.sh`
2. add one Playwright spec for authenticated login + onboarding + CEO run verification
3. add manual `release-smoke.yml`
4. once stable, wire canary into `release.yml`
5. after that, wire stable `latest` smoke into `release.yml`
That gives release confidence quickly without turning the first version into a large CI redesign.

View File

@@ -1,426 +0,0 @@
# Paperclip Memory Service Plan
## Goal
Define a Paperclip memory service and surface API that can sit above multiple memory backends, while preserving Paperclip's control-plane requirements:
- company scoping
- auditability
- provenance back to Paperclip work objects
- budget / cost visibility
- plugin-first extensibility
This plan is based on the external landscape summarized in `doc/memory-landscape.md` and on the current Paperclip architecture in:
- `doc/SPEC-implementation.md`
- `doc/plugins/PLUGIN_SPEC.md`
- `doc/plugins/PLUGIN_AUTHORING_GUIDE.md`
- `packages/plugins/sdk/src/types.ts`
## Recommendation In One Sentence
Paperclip should not embed one opinionated memory engine into core. It should add a company-scoped memory control plane with a small normalized adapter contract, then let built-ins and plugins implement the provider-specific behavior.
## Product Decisions
### 1. Memory is company-scoped by default
Every memory binding belongs to exactly one company.
That binding can then be:
- the company default
- an agent override
- a project override later if we need it
No cross-company memory sharing in the initial design.
### 2. Providers are selected by key
Each configured memory provider gets a stable key inside a company, for example:
- `default`
- `mem0-prod`
- `local-markdown`
- `research-kb`
Agents and services resolve the active provider by key, not by hard-coded vendor logic.
### 3. Plugins are the primary provider path
Built-ins are useful for a zero-config local path, but most providers should arrive through the existing Paperclip plugin runtime.
That keeps the core small and matches the current direction that optional knowledge-like systems live at the edges.
### 4. Paperclip owns routing, provenance, and accounting
Providers should not decide how Paperclip entities map to governance.
Paperclip core should own:
- who is allowed to call a memory operation
- which company / agent / project scope is active
- what issue / run / comment / document the operation belongs to
- how usage gets recorded
### 5. Automatic memory should be narrow at first
Automatic capture is useful, but broad silent capture is dangerous.
Initial automatic hooks should be:
- post-run capture from agent runs
- issue comment / document capture when the binding enables it
- pre-run recall for agent context hydration
Everything else should start explicit.
## Proposed Concepts
### Memory provider
A built-in or plugin-supplied implementation that stores and retrieves memory.
Examples:
- local markdown + vector index
- mem0 adapter
- supermemory adapter
- MemOS adapter
### Memory binding
A company-scoped configuration record that points to a provider and carries provider-specific config.
This is the object selected by key.
### Memory scope
The normalized Paperclip scope passed into a provider request.
At minimum:
- `companyId`
- optional `agentId`
- optional `projectId`
- optional `issueId`
- optional `runId`
- optional `subjectId` for external/user identity
### Memory source reference
The provenance handle that explains where a memory came from.
Supported source kinds should include:
- `issue_comment`
- `issue_document`
- `issue`
- `run`
- `activity`
- `manual_note`
- `external_document`
### Memory operation
A normalized write, query, browse, or delete action performed through Paperclip.
Paperclip should log every operation, whether the provider is local or external.
## Required Adapter Contract
The required core should be small enough to fit `memsearch`, `mem0`, `Memori`, `MemOS`, or `OpenViking`.
```ts
export interface MemoryAdapterCapabilities {
profile?: boolean;
browse?: boolean;
correction?: boolean;
asyncIngestion?: boolean;
multimodal?: boolean;
providerManagedExtraction?: boolean;
}
export interface MemoryScope {
companyId: string;
agentId?: string;
projectId?: string;
issueId?: string;
runId?: string;
subjectId?: string;
}
export interface MemorySourceRef {
kind:
| "issue_comment"
| "issue_document"
| "issue"
| "run"
| "activity"
| "manual_note"
| "external_document";
companyId: string;
issueId?: string;
commentId?: string;
documentKey?: string;
runId?: string;
activityId?: string;
externalRef?: string;
}
export interface MemoryUsage {
provider: string;
model?: string;
inputTokens?: number;
outputTokens?: number;
embeddingTokens?: number;
costCents?: number;
latencyMs?: number;
details?: Record<string, unknown>;
}
export interface MemoryWriteRequest {
bindingKey: string;
scope: MemoryScope;
source: MemorySourceRef;
content: string;
metadata?: Record<string, unknown>;
mode?: "append" | "upsert" | "summarize";
}
export interface MemoryRecordHandle {
providerKey: string;
providerRecordId: string;
}
export interface MemoryQueryRequest {
bindingKey: string;
scope: MemoryScope;
query: string;
topK?: number;
intent?: "agent_preamble" | "answer" | "browse";
metadataFilter?: Record<string, unknown>;
}
export interface MemorySnippet {
handle: MemoryRecordHandle;
text: string;
score?: number;
summary?: string;
source?: MemorySourceRef;
metadata?: Record<string, unknown>;
}
export interface MemoryContextBundle {
snippets: MemorySnippet[];
profileSummary?: string;
usage?: MemoryUsage[];
}
export interface MemoryAdapter {
key: string;
capabilities: MemoryAdapterCapabilities;
write(req: MemoryWriteRequest): Promise<{
records?: MemoryRecordHandle[];
usage?: MemoryUsage[];
}>;
query(req: MemoryQueryRequest): Promise<MemoryContextBundle>;
get(handle: MemoryRecordHandle, scope: MemoryScope): Promise<MemorySnippet | null>;
forget(handles: MemoryRecordHandle[], scope: MemoryScope): Promise<{ usage?: MemoryUsage[] }>;
}
```
This contract intentionally does not force a provider to expose its internal graph, filesystem, or ontology.
## Optional Adapter Surfaces
These should be capability-gated, not required:
- `browse(scope, filters)` for file-system / graph / timeline inspection
- `correct(handle, patch)` for natural-language correction flows
- `profile(scope)` when the provider can synthesize stable preferences or summaries
- `sync(source)` for connectors or background ingestion
- `explain(queryResult)` for providers that can expose retrieval traces
## What Paperclip Should Persist
Paperclip should not mirror the full provider memory corpus into Postgres unless the provider is a Paperclip-managed local provider.
Paperclip core should persist:
- memory bindings and overrides
- provider keys and capability metadata
- normalized memory operation logs
- provider record handles returned by operations when available
- source references back to issue comments, documents, runs, and activity
- usage and cost data
For external providers, the memory payload itself can remain in the provider.
## Hook Model
### Automatic hooks
These should be low-risk and easy to reason about:
1. `pre-run hydrate`
Before an agent run starts, Paperclip may call `query(... intent = "agent_preamble")` using the active binding.
2. `post-run capture`
After a run finishes, Paperclip may write a summary or transcript-derived note tied to the run.
3. `issue comment / document capture`
When enabled on the binding, Paperclip may capture selected issue comments or issue documents as memory sources.
### Explicit hooks
These should be tool- or UI-driven first:
- `memory.search`
- `memory.note`
- `memory.forget`
- `memory.correct`
- `memory.browse`
### Not automatic in the first version
- broad web crawling
- silent import of arbitrary repo files
- cross-company memory sharing
- automatic destructive deletion
- provider migration between bindings
## Agent UX Rules
Paperclip should give agents both automatic recall and explicit tools, with simple guidance:
- use `memory.search` when the task depends on prior decisions, people, projects, or long-running context that is not in the current issue thread
- use `memory.note` when a durable fact, preference, or decision should survive this run
- use `memory.correct` when the user explicitly says prior context is wrong
- rely on post-run auto-capture for ordinary session residue so agents do not have to write memory notes for every trivial exchange
This keeps memory available without forcing every agent prompt to become a memory-management protocol.
## Browse And Inspect Surface
Paperclip needs a first-class UI for memory, otherwise providers become black boxes.
The initial browse surface should support:
- active binding by company and agent
- recent memory operations
- recent write sources
- query results with source backlinks
- filters by agent, issue, run, source kind, and date
- provider usage / cost / latency summaries
When a provider supports richer browsing, the plugin can add deeper views through the existing plugin UI surfaces.
## Cost And Evaluation
Every adapter response should be able to return usage records.
Paperclip should roll up:
- memory inference tokens
- embedding tokens
- external provider cost
- latency
- query count
- write count
It should also record evaluation-oriented metrics where possible:
- recall hit rate
- empty query rate
- manual correction count
- per-binding success / failure counts
This is important because a memory system that "works" but silently burns budget is not acceptable in Paperclip.
## Suggested Data Model Additions
At the control-plane level, the likely new core tables are:
- `memory_bindings`
- company-scoped key
- provider id / plugin id
- config blob
- enabled status
- `memory_binding_targets`
- target type (`company`, `agent`, later `project`)
- target id
- binding id
- `memory_operations`
- company id
- binding id
- operation type (`write`, `query`, `forget`, `browse`, `correct`)
- scope fields
- source refs
- usage / latency / cost
- success / error
Provider-specific long-form state should stay in plugin state or the provider itself unless a built-in local provider needs its own schema.
## Recommended First Built-In
The best zero-config built-in is a local markdown-first provider with optional semantic indexing.
Why:
- it matches Paperclip's local-first posture
- it is inspectable
- it is easy to back up and debug
- it gives the system a baseline even without external API keys
The design should still treat that built-in as just another provider behind the same control-plane contract.
## Rollout Phases
### Phase 1: Control-plane contract
- add memory binding models and API types
- add plugin capability / registration surface for memory providers
- add operation logging and usage reporting
### Phase 2: One built-in + one plugin example
- ship a local markdown-first provider
- ship one hosted adapter example to validate the external-provider path
### Phase 3: UI inspection
- add company / agent memory settings
- add a memory operation explorer
- add source backlinks to issues and runs
### Phase 4: Automatic hooks
- pre-run hydrate
- post-run capture
- selected issue comment / document capture
### Phase 5: Rich capabilities
- correction flows
- provider-native browse / graph views
- project-level overrides if needed
- evaluation dashboards
## Open Questions
- Should project overrides exist in V1 of the memory service, or should we force company default + agent override first?
- Do we want Paperclip-managed extraction pipelines at all, or should built-ins be the only place where Paperclip owns extraction?
- Should memory usage extend the current `cost_events` model directly, or should memory operations keep a parallel usage log and roll up into `cost_events` secondarily?
- Do we want provider install / binding changes to require approvals for some companies?
## Bottom Line
The right abstraction is:
- Paperclip owns memory bindings, scopes, provenance, governance, and usage reporting.
- Providers own extraction, ranking, storage, and provider-native memory semantics.
That gives Paperclip a stable "memory service" without locking the product to one memory philosophy or one vendor.

View File

@@ -1,488 +0,0 @@
# Release Automation and Versioning Simplification Plan
## Context
Paperclip's current release flow is documented in `doc/RELEASING.md` and implemented through:
- `.github/workflows/release.yml`
- `scripts/release-lib.sh`
- `scripts/release-start.sh`
- `scripts/release-preflight.sh`
- `scripts/release.sh`
- `scripts/create-github-release.sh`
Today the model is:
1. pick `patch`, `minor`, or `major`
2. create `release/X.Y.Z`
3. draft `releases/vX.Y.Z.md`
4. publish one or more canaries from that release branch
5. publish stable from that same branch
6. push tag + create GitHub Release
7. merge the release branch back to `master`
That is workable, but it creates friction in exactly the places that should be cheap:
- deciding `patch` vs `minor` vs `major`
- cutting and carrying release branches
- manually publishing canaries
- thinking about changelog generation for canaries
- handling npm credentials safely in a public repo
The target state from this discussion is simpler:
- every push to `master` publishes a canary automatically
- stable releases are promoted deliberately from a vetted commit
- versioning is date-driven instead of semantics-driven
- stable publishing is secure even in a public open-source repository
- changelog generation happens only for real stable releases
## Recommendation In One Sentence
Move Paperclip to semver-compatible calendar versioning, auto-publish canaries from `master`, promote stable from a chosen tested commit, and use npm trusted publishing plus GitHub environments so no long-lived npm or LLM token needs to live in Actions.
## Core Decisions
### 1. Use calendar versions, but keep semver syntax
The repo and npm tooling still assume semver-shaped version strings in many places. That does not mean Paperclip must keep semver as a product policy. It does mean the version format should remain semver-valid.
Recommended format:
- stable: `YYYY.MDD.P`
- canary: `YYYY.MDD.P-canary.N`
Examples:
- first stable on March 17, 2026: `2026.317.0`
- third canary on the `2026.317.0` line: `2026.317.0-canary.2`
Why this shape:
- it removes `patch/minor/major` decisions
- it is valid semver syntax
- it stays compatible with npm, dist-tags, and existing semver validators
- it is close to the format you actually want
Important constraints:
- the middle numeric slot should be `MDD`, where `M` is the month and `DD` is the zero-padded day
- `2026.03.17` is not the format to use
- numeric semver identifiers do not allow leading zeroes
- `2026.3.17.1` is not the format to use
- semver has three numeric components, not four
- the practical semver-safe equivalent is `2026.317.0-canary.8`
This is effectively CalVer on semver rails.
### 2. Accept that CalVer changes the compatibility contract
This is not semver in spirit anymore. It is semver in syntax only.
That tradeoff is probably acceptable for Paperclip, but it should be explicit:
- consumers no longer infer compatibility from `major/minor/patch`
- release notes become the compatibility signal
- downstream users should prefer exact pins or deliberate upgrades
This is especially relevant for public library packages like `@paperclipai/shared`, `@paperclipai/db`, and the adapter packages.
### 3. Drop release branches for normal publishing
If every merge to `master` publishes a canary, the current `release/X.Y.Z` train model becomes more ceremony than value.
Recommended replacement:
- `master` is the only canary train
- every push to `master` can publish a canary
- stable is published from a chosen commit or canary tag on `master`
This matches the workflow you actually want:
- merge continuously
- let npm always have a fresh canary
- choose a known-good canary later and promote that commit to stable
### 4. Promote by source ref, not by "renaming" a canary
This is the most important mechanical constraint.
npm can move dist-tags, but it does not let you rename an already-published version. That means:
- you can move `latest` to `paperclipai@1.2.3`
- you cannot turn `paperclipai@2026.317.0-canary.8` into `paperclipai@2026.317.0`
So "promote canary to stable" really means:
1. choose the commit or canary tag you trust
2. rebuild from that exact commit
3. publish it again with the stable version string
Because of that, the stable workflow should take a source ref, not just a bump type.
Recommended stable input:
- `source_ref`
- commit SHA, or
- a canary git tag such as `canary/v2026.317.1-canary.8`
### 5. Only stable releases get release notes, tags, and GitHub Releases
Canaries should stay lightweight:
- publish to npm under `canary`
- optionally create a lightweight or annotated git tag
- do not create GitHub Releases
- do not require `releases/v*.md`
- do not spend LLM tokens
Stable releases should remain the public narrative surface:
- git tag `v2026.317.0`
- GitHub Release `v2026.317.0`
- stable changelog file `releases/v2026.317.0.md`
## Security Model
### Recommendation
Use npm trusted publishing with GitHub Actions OIDC, then disable token-based publishing access for the packages.
Why:
- no long-lived `NPM_TOKEN` in repo or org secrets
- no personal npm token in Actions
- short-lived credentials minted only for the authorized workflow
- automatic npm provenance for public packages in public repos
This is the cleanest answer to the open-repo security concern.
### Concrete controls
#### 1. Use one release workflow file
Use one workflow filename for both canary and stable publishing:
- `.github/workflows/release.yml`
Why:
- npm trusted publishing is configured per workflow filename
- npm currently allows one trusted publisher configuration per package
- GitHub environments can still provide separate canary/stable approval rules inside the same workflow
#### 2. Use separate GitHub environments
Recommended environments:
- `npm-canary`
- `npm-stable`
Recommended policy:
- `npm-canary`
- allowed branch: `master`
- no human reviewer required
- `npm-stable`
- allowed branch: `master`
- required reviewer enabled
- prevent self-review enabled
- admin bypass disabled
Stable should require an explicit second human gate even if the workflow is manually dispatched.
#### 3. Lock down workflow edits
Add or tighten `CODEOWNERS` coverage for:
- `.github/workflows/*`
- `scripts/release*`
- `doc/RELEASING.md`
This matters because trusted publishing authorizes a workflow file. The biggest remaining risk is not secret exfiltration from forks. It is a maintainer-approved change to the release workflow itself.
#### 4. Remove traditional npm token access after OIDC works
After trusted publishing is verified:
- set package publishing access to require 2FA and disallow tokens
- revoke any legacy automation tokens
That eliminates the "someone stole the npm token" class of failure.
### What not to do
- do not put your personal Claude or npm token in GitHub Actions
- do not run release logic from `pull_request_target`
- do not make stable publishing depend on a repo secret if OIDC can handle it
- do not create canary GitHub Releases
## Changelog Strategy
### Recommendation
Generate stable changelogs only, and keep LLM-assisted changelog generation out of CI for now.
Reasoning:
- canaries happen too often
- canaries do not need polished public notes
- putting a personal Claude token into Actions is not worth the risk
- stable release cadence is low enough that a human-in-the-loop step is acceptable
Recommended stable path:
1. pick a canary commit or tag
2. run changelog generation locally from a trusted machine
3. commit `releases/vYYYY.MDD.P.md`
4. run stable promotion
If the notes are not ready yet, a fallback is acceptable:
- publish stable
- create a minimal GitHub Release
- update `releases/vYYYY.MDD.P.md` immediately afterward
But the better steady-state is to have the stable notes committed before stable publish.
### Future option
If you later want CI-assisted changelog drafting, do it with:
- a dedicated service account
- a token scoped only for changelog generation
- a manual workflow
- a dedicated environment with required reviewers
That is phase-two hardening work, not a phase-one requirement.
## Proposed Future Workflow
### Canary workflow
Trigger:
- `push` on `master`
Steps:
1. checkout the merged `master` commit
2. run verification on that exact commit
3. compute canary version for current UTC date
4. version public packages to `YYYY.MDD.P-canary.N`
5. publish to npm with dist-tag `canary`
6. create a canary git tag for traceability
Recommended canary tag format:
- `canary/v2026.317.1-canary.4`
Outputs:
- npm canary published
- git tag created
- no GitHub Release
- no changelog file required
### Stable workflow
Trigger:
- `workflow_dispatch`
Inputs:
- `source_ref`
- optional `stable_date`
- `dry_run`
Steps:
1. checkout `source_ref`
2. run verification on that exact commit
3. compute the next stable patch slot for the UTC date or provided override
4. fail if `vYYYY.MDD.P` already exists
5. require `releases/vYYYY.MDD.P.md`
6. version public packages to `YYYY.MDD.P`
7. publish to npm under `latest`
8. create git tag `vYYYY.MDD.P`
9. push tag
10. create GitHub Release from `releases/vYYYY.MDD.P.md`
Outputs:
- stable npm release
- stable git tag
- GitHub Release
- clean public changelog surface
## Implementation Guidance
### 1. Replace bump-type version math with explicit version computation
The current release scripts depend on:
- `patch`
- `minor`
- `major`
That logic should be replaced with:
- `compute_canary_version_for_date`
- `compute_stable_version_for_date`
For example:
- `next_stable_version(2026-03-17) -> 2026.317.0`
- `next_canary_for_utc_date(2026-03-17) -> 2026.317.0-canary.0`
### 2. Stop requiring `release/X.Y.Z`
These current invariants should be removed from the happy path:
- "must run from branch `release/X.Y.Z`"
- "stable and canary for `X.Y.Z` come from the same release branch"
- `release-start.sh`
Replace them with:
- canary must run from `master`
- stable may run from a pinned `source_ref`
### 3. Keep Changesets only if it stays helpful
The current system uses Changesets to:
- rewrite package versions
- maintain package-level `CHANGELOG.md` files
- publish packages
With CalVer, Changesets may still be useful for publish orchestration, but it should no longer own version selection.
Recommended implementation order:
1. keep `changeset publish` if it works with explicitly-set versions
2. replace version computation with a small explicit versioning script
3. if Changesets keeps fighting the model, remove it from release publishing entirely
Paperclip's release problem is now "publish the whole fixed package set at one explicit version", not "derive the next semantic bump from human intent".
### 4. Add a dedicated versioning script
Recommended new script:
- `scripts/set-release-version.mjs`
Responsibilities:
- set the version in all public publishable packages
- update any internal exact-version references needed for publishing
- update CLI version strings
- avoid broad string replacement across unrelated files
This is safer than keeping a bump-oriented changeset flow and then forcing it into a date-based scheme.
### 5. Keep rollback based on dist-tags
`rollback-latest.sh` should stay, but it should stop assuming a semver meaning beyond syntax.
It should continue to:
- repoint `latest` to a prior stable version
- never unpublish
## Tradeoffs and Risks
### 1. The stable patch slot is now part of the version contract
With `YYYY.MDD.P`, same-day hotfixes are supported, but the stable patch slot is now part of the visible version format.
That is the right tradeoff because:
1. npm still gets semver-valid versions
2. same-day hotfixes stay possible
3. chronological ordering still works as long as the day is zero-padded inside `MDD`
### 2. Public package consumers lose semver intent signaling
This is the main downside of CalVer.
If that becomes a problem, one alternative is:
- use CalVer for the CLI package only
- keep semver for library packages
That is more complex operationally, so I would not start there unless package consumers actually need it.
### 3. Auto-canary means more publish traffic
Publishing on every `master` merge means:
- more npm versions
- more git tags
- more registry noise
That is acceptable if canaries stay clearly separate:
- npm dist-tag `canary`
- no GitHub Release
- no external announcement
## Rollout Plan
### Phase 1: Security foundation
1. Create `release.yml`
2. Configure npm trusted publishers for all public packages
3. Create `npm-canary` and `npm-stable` environments
4. Add `CODEOWNERS` protection for release files
5. Verify OIDC publishing works
6. Disable token-based publishing access and revoke old tokens
### Phase 2: Canary automation
1. Add canary workflow on `push` to `master`
2. Add explicit calendar-version computation
3. Add canary git tagging
4. Remove changelog requirement from canaries
5. Update `doc/RELEASING.md`
### Phase 3: Stable promotion
1. Add manual stable workflow with `source_ref`
2. Require stable notes file
3. Publish stable + tag + GitHub Release
4. Update rollback docs and scripts
5. Retire release-branch assumptions
### Phase 4: Cleanup
1. Remove `release-start.sh` from the primary path
2. Remove `patch/minor/major` from maintainer docs
3. Decide whether to keep or remove Changesets from publishing
4. Document the CalVer compatibility contract publicly
## Concrete Recommendation
Paperclip should adopt this model:
- stable versions: `YYYY.MDD.P`
- canary versions: `YYYY.MDD.P-canary.N`
- canaries auto-published on every push to `master`
- stables manually promoted from a chosen tested commit or canary tag
- no release branches in the default path
- no canary changelog files
- no canary GitHub Releases
- no Claude token in GitHub Actions
- no npm automation token in GitHub Actions
- npm trusted publishing plus GitHub environments for release security
That gets rid of the annoying part of semver without fighting npm, makes canaries cheap, keeps stables deliberate, and materially improves the security posture of the public repository.
## External References
- npm trusted publishing: https://docs.npmjs.com/trusted-publishers/
- npm dist-tags: https://docs.npmjs.com/adding-dist-tags-to-packages/
- npm semantic versioning guidance: https://docs.npmjs.com/about-semantic-versioning/
- GitHub environments and deployment protection rules: https://docs.github.com/en/actions/how-tos/deploy/configure-and-manage-deployments/manage-environments
- GitHub secrets behavior for forks: https://docs.github.com/en/actions/how-tos/write-workflows/choose-what-workflows-do/use-secrets

File diff suppressed because it is too large Load Diff

View File

@@ -1,882 +0,0 @@
# Workspace Technical Implementation Spec
## Role of This Document
This document translates [workspace-product-model-and-work-product.md](/Users/dotta/paperclip-subissues/doc/plans/workspace-product-model-and-work-product.md) into an implementation-ready engineering plan.
It is intentionally concrete:
- schema and migration shape
- shared contract updates
- route and service changes
- UI changes
- rollout and compatibility rules
This is the implementation target for the first workspace-aware delivery slice.
## Locked Decisions
These decisions are treated as settled for this implementation:
1. Add a new durable `execution_workspaces` table now.
2. Each issue has at most one current execution workspace at a time.
3. `issues` get explicit `project_workspace_id` and `execution_workspace_id`.
4. Workspace reuse is in scope for V1.
5. The feature is gated in the UI by `/instance/settings > Experimental > Workspaces`.
6. The gate is UI-only. Backend model changes and migrations always ship.
7. Existing users upgrade into compatibility-preserving defaults.
8. `project_workspaces` evolves in place rather than being replaced.
9. Work product is issue-first, with optional links to execution workspaces and runtime services.
10. GitHub is the only PR provider in the first slice.
11. Both `adapter_managed` and `cloud_sandbox` execution modes are in scope.
12. Workspace controls ship first inside existing project properties, not in a new global navigation area.
13. Subissues are out of scope for this implementation slice.
## Non-Goals
- Building a full code review system
- Solving subissue UX in this slice
- Implementing reusable shared workspace definitions across projects in this slice
- Reworking all current runtime service behavior before introducing execution workspaces
## Existing Baseline
The repo already has:
- `project_workspaces`
- `projects.execution_workspace_policy`
- `issues.execution_workspace_settings`
- runtime service persistence in `workspace_runtime_services`
- local git-worktree realization in `workspace-runtime.ts`
This implementation should build on that baseline rather than fork it.
## Terminology
- `Project workspace`: durable configured codebase/root for a project
- `Execution workspace`: actual runtime workspace used for one or more issues
- `Work product`: user-facing output such as PR, preview, branch, commit, artifact, document
- `Runtime service`: process or service owned or tracked for a workspace
- `Compatibility mode`: existing behavior preserved for upgraded installs with no explicit workspace opt-in
## Architecture Summary
The first slice should introduce three explicit layers:
1. `Project workspace`
- existing durable project-scoped codebase record
- extended to support local, git, non-git, and remote-managed shapes
2. `Execution workspace`
- new durable runtime record
- represents shared, isolated, operator-branch, or remote-managed execution context
3. `Issue work product`
- new durable output record
- stores PRs, previews, branches, commits, artifacts, and documents
The issue remains the planning and ownership unit.
The execution workspace remains the runtime unit.
The work product remains the deliverable/output unit.
## Configuration and Deployment Topology
## Important correction
This repo already uses `PAPERCLIP_DEPLOYMENT_MODE` for auth/deployment behavior (`local_trusted | authenticated`).
Do not overload that variable for workspace execution topology.
## New env var
Add a separate execution-host hint:
- `PAPERCLIP_EXECUTION_TOPOLOGY=local|cloud|hybrid`
Default:
- if unset, treat as `local`
Purpose:
- influences defaults and validation for workspace configuration
- does not change current auth/deployment semantics
- does not break existing installs
### Semantics
- `local`
- Paperclip may create host-local worktrees, processes, and paths
- `cloud`
- Paperclip should assume no durable host-local execution workspace management
- adapter-managed and cloud-sandbox flows should be treated as first-class
- `hybrid`
- both local and remote execution strategies may exist
This is a guardrail and defaulting aid, not a hard policy engine in the first slice.
## Instance Settings
Add a new `Experimental` section under `/instance/settings`.
### New setting
- `experimental.workspaces: boolean`
Rules:
- default `false`
- UI-only gate
- stored in instance config or instance settings API response
- backend routes and migrations remain available even when false
### UI behavior when off
- hide workspace-specific issue controls
- hide workspace-specific project configuration
- hide issue `Work Product` tab if it would otherwise be empty
- do not remove or invalidate any stored workspace data
## Data Model
## 1. Extend `project_workspaces`
Current table exists and should evolve in place.
### New columns
- `source_type text not null default 'local_path'`
- `local_path | git_repo | non_git_path | remote_managed`
- `default_ref text null`
- `visibility text not null default 'default'`
- `default | advanced`
- `setup_command text null`
- `cleanup_command text null`
- `remote_provider text null`
- examples: `github`, `openai`, `anthropic`, `custom`
- `remote_workspace_ref text null`
- `shared_workspace_key text null`
- reserved for future cross-project shared workspace definitions
### Backfill rules
- if existing row has `repo_url`, backfill `source_type='git_repo'`
- else if existing row has `cwd`, backfill `source_type='local_path'`
- else backfill `source_type='remote_managed'`
- copy existing `repo_ref` into `default_ref`
### Indexes
- retain current indexes
- add `(project_id, source_type)`
- add `(company_id, shared_workspace_key)` non-unique for future support
## 2. Add `execution_workspaces`
Create a new durable table.
### Columns
- `id uuid pk`
- `company_id uuid not null`
- `project_id uuid not null`
- `project_workspace_id uuid null`
- `source_issue_id uuid null`
- `mode text not null`
- `shared_workspace | isolated_workspace | operator_branch | adapter_managed | cloud_sandbox`
- `strategy_type text not null`
- `project_primary | git_worktree | adapter_managed | cloud_sandbox`
- `name text not null`
- `status text not null default 'active'`
- `active | idle | in_review | archived | cleanup_failed`
- `cwd text null`
- `repo_url text null`
- `base_ref text null`
- `branch_name text null`
- `provider_type text not null default 'local_fs'`
- `local_fs | git_worktree | adapter_managed | cloud_sandbox`
- `provider_ref text null`
- `derived_from_execution_workspace_id uuid null`
- `last_used_at timestamptz not null default now()`
- `opened_at timestamptz not null default now()`
- `closed_at timestamptz null`
- `cleanup_eligible_at timestamptz null`
- `cleanup_reason text null`
- `metadata jsonb null`
- `created_at timestamptz not null default now()`
- `updated_at timestamptz not null default now()`
### Foreign keys
- `company_id -> companies.id`
- `project_id -> projects.id`
- `project_workspace_id -> project_workspaces.id on delete set null`
- `source_issue_id -> issues.id on delete set null`
- `derived_from_execution_workspace_id -> execution_workspaces.id on delete set null`
### Indexes
- `(company_id, project_id, status)`
- `(company_id, project_workspace_id, status)`
- `(company_id, source_issue_id)`
- `(company_id, last_used_at desc)`
- `(company_id, branch_name)` non-unique
## 3. Extend `issues`
Add explicit workspace linkage.
### New columns
- `project_workspace_id uuid null`
- `execution_workspace_id uuid null`
- `execution_workspace_preference text null`
- `inherit | shared_workspace | isolated_workspace | operator_branch | reuse_existing`
### Foreign keys
- `project_workspace_id -> project_workspaces.id on delete set null`
- `execution_workspace_id -> execution_workspaces.id on delete set null`
### Backfill rules
- all existing issues get null values
- null should be interpreted as compatibility/inherit behavior
### Invariants
- if `project_workspace_id` is set, it must belong to the issue's project and company
- if `execution_workspace_id` is set, it must belong to the issue's company
- if `execution_workspace_id` is set, the referenced workspace's `project_id` must match the issue's `project_id`
## 4. Add `issue_work_products`
Create a new durable table for outputs.
### Columns
- `id uuid pk`
- `company_id uuid not null`
- `project_id uuid null`
- `issue_id uuid not null`
- `execution_workspace_id uuid null`
- `runtime_service_id uuid null`
- `type text not null`
- `preview_url | runtime_service | pull_request | branch | commit | artifact | document`
- `provider text not null`
- `paperclip | github | vercel | s3 | custom`
- `external_id text null`
- `title text not null`
- `url text null`
- `status text not null`
- `active | ready_for_review | approved | changes_requested | merged | closed | failed | archived`
- `review_state text not null default 'none'`
- `none | needs_board_review | approved | changes_requested`
- `is_primary boolean not null default false`
- `health_status text not null default 'unknown'`
- `unknown | healthy | unhealthy`
- `summary text null`
- `metadata jsonb null`
- `created_by_run_id uuid null`
- `created_at timestamptz not null default now()`
- `updated_at timestamptz not null default now()`
### Foreign keys
- `company_id -> companies.id`
- `project_id -> projects.id on delete set null`
- `issue_id -> issues.id on delete cascade`
- `execution_workspace_id -> execution_workspaces.id on delete set null`
- `runtime_service_id -> workspace_runtime_services.id on delete set null`
- `created_by_run_id -> heartbeat_runs.id on delete set null`
### Indexes
- `(company_id, issue_id, type)`
- `(company_id, execution_workspace_id, type)`
- `(company_id, provider, external_id)`
- `(company_id, updated_at desc)`
## 5. Extend `workspace_runtime_services`
This table already exists and should remain the system of record for owned/tracked services.
### New column
- `execution_workspace_id uuid null`
### Foreign key
- `execution_workspace_id -> execution_workspaces.id on delete set null`
### Behavior
- runtime services remain workspace-first
- issue UIs should surface them through linked execution workspaces and work products
## Shared Contracts
## 1. `packages/shared`
### Update project workspace types and validators
Add fields:
- `sourceType`
- `defaultRef`
- `visibility`
- `setupCommand`
- `cleanupCommand`
- `remoteProvider`
- `remoteWorkspaceRef`
- `sharedWorkspaceKey`
### Add execution workspace types and validators
New shared types:
- `ExecutionWorkspace`
- `ExecutionWorkspaceMode`
- `ExecutionWorkspaceStatus`
- `ExecutionWorkspaceProviderType`
### Add work product types and validators
New shared types:
- `IssueWorkProduct`
- `IssueWorkProductType`
- `IssueWorkProductStatus`
- `IssueWorkProductReviewState`
### Update issue types and validators
Add:
- `projectWorkspaceId`
- `executionWorkspaceId`
- `executionWorkspacePreference`
- `workProducts?: IssueWorkProduct[]`
### Extend project execution policy contract
Replace the current narrow policy with a more explicit shape:
- `enabled`
- `defaultMode`
- `shared_workspace | isolated_workspace | operator_branch | adapter_default`
- `allowIssueOverride`
- `defaultProjectWorkspaceId`
- `workspaceStrategy`
- `branchPolicy`
- `pullRequestPolicy`
- `runtimePolicy`
- `cleanupPolicy`
Do not try to encode every possible provider-specific field in V1. Keep provider-specific extensibility in nested JSON where needed.
## Service Layer Changes
## 1. Project service
Update project workspace CRUD to handle the extended schema.
### Required rules
- when setting a primary workspace, clear `is_primary` on siblings
- `source_type=remote_managed` may have null `cwd`
- local/git-backed workspaces should still require one of `cwd` or `repo_url`
- preserve current behavior for existing callers that only send `cwd/repoUrl/repoRef`
## 2. Issue service
Update create/update flows to handle explicit workspace binding.
### Create behavior
Resolve defaults in this order:
1. explicit `projectWorkspaceId` from request
2. `project.executionWorkspacePolicy.defaultProjectWorkspaceId`
3. project's primary workspace
4. null
Resolve `executionWorkspacePreference`:
1. explicit request field
2. project policy default
3. compatibility fallback to `inherit`
Do not create an execution workspace at issue creation time unless:
- `reuse_existing` is explicitly chosen and `executionWorkspaceId` is provided
Otherwise, workspace realization happens when execution starts.
### Update behavior
- allow changing `projectWorkspaceId` only if the workspace belongs to the same project
- allow setting `executionWorkspaceId` only if it belongs to the same company and project
- do not automatically destroy or relink historical work products when workspace linkage changes
## 3. Workspace realization service
Refactor `workspace-runtime.ts` so realization produces or reuses an `execution_workspaces` row.
### New flow
Input:
- issue
- project workspace
- project execution policy
- execution topology hint
- adapter/runtime configuration
Output:
- realized execution workspace record
- runtime cwd/provider metadata
### Required modes
- `shared_workspace`
- reuse a stable execution workspace representing the project primary/shared workspace
- `isolated_workspace`
- create or reuse a derived isolated execution workspace
- `operator_branch`
- create or reuse a long-lived branch workspace
- `adapter_managed`
- create an execution workspace with provider references and optional null `cwd`
- `cloud_sandbox`
- same as adapter-managed, but explicit remote sandbox semantics
### Reuse rules
When `reuse_existing` is requested:
- only list active or recently used execution workspaces
- only for the same project
- only for the same project workspace if one is specified
- exclude archived and cleanup-failed workspaces
### Shared workspace realization
For compatibility mode and shared-workspace projects:
- create a stable execution workspace per project workspace when first needed
- reuse it for subsequent runs
This avoids a special-case branch in later work product linkage.
## 4. Runtime service integration
When runtime services are started or reused:
- populate `execution_workspace_id`
- continue populating `project_workspace_id`, `project_id`, and `issue_id`
When a runtime service yields a URL:
- optionally create or update a linked `issue_work_products` row of type `runtime_service` or `preview_url`
## 5. PR and preview reporting
Add a service for creating/updating `issue_work_products`.
### Supported V1 product types
- `pull_request`
- `preview_url`
- `runtime_service`
- `branch`
- `commit`
- `artifact`
- `document`
### GitHub PR reporting
For V1, GitHub is the only provider with richer semantics.
Supported statuses:
- `draft`
- `ready_for_review`
- `approved`
- `changes_requested`
- `merged`
- `closed`
Represent these in `status` and `review_state` rather than inventing a separate PR table in V1.
## Routes and API
## 1. Project workspace routes
Extend existing routes:
- `GET /projects/:id/workspaces`
- `POST /projects/:id/workspaces`
- `PATCH /projects/:id/workspaces/:workspaceId`
- `DELETE /projects/:id/workspaces/:workspaceId`
### New accepted/returned fields
- `sourceType`
- `defaultRef`
- `visibility`
- `setupCommand`
- `cleanupCommand`
- `remoteProvider`
- `remoteWorkspaceRef`
## 2. Execution workspace routes
Add:
- `GET /companies/:companyId/execution-workspaces`
- filters:
- `projectId`
- `projectWorkspaceId`
- `status`
- `issueId`
- `reuseEligible=true`
- `GET /execution-workspaces/:id`
- `PATCH /execution-workspaces/:id`
- update status/metadata/cleanup fields only in V1
Do not add top-level navigation for these routes yet.
## 3. Work product routes
Add:
- `GET /issues/:id/work-products`
- `POST /issues/:id/work-products`
- `PATCH /work-products/:id`
- `DELETE /work-products/:id`
### V1 mutation permissions
- board can create/update/delete all
- agents can create/update for issues they are assigned or currently executing
- deletion should generally archive rather than hard-delete once linked to historical output
## 4. Issue routes
Extend existing create/update payloads to accept:
- `projectWorkspaceId`
- `executionWorkspacePreference`
- `executionWorkspaceId`
Extend `GET /issues/:id` to return:
- `projectWorkspaceId`
- `executionWorkspaceId`
- `executionWorkspacePreference`
- `currentExecutionWorkspace`
- `workProducts[]`
## 5. Instance settings routes
Add support for:
- reading/writing `experimental.workspaces`
This is a UI gate only.
If there is no generic instance settings storage yet, the first slice can store this in the existing config/instance settings mechanism used by `/instance/settings`.
## UI Changes
## 1. `/instance/settings`
Add section:
- `Experimental`
- `Enable Workspaces`
When off:
- hide new workspace-specific affordances
- do not alter existing project or issue behavior
## 2. Project properties
Do not create a separate `Code` tab yet.
Ship inside existing project properties first.
### Add or re-enable sections
- `Project Workspaces`
- `Execution Defaults`
- `Provisioning`
- `Pull Requests`
- `Previews and Runtime`
- `Cleanup`
### Display rules
- only show when `experimental.workspaces=true`
- keep wording generic enough for local and remote setups
- only show git-specific fields when `sourceType=git_repo`
- only show local-path-specific fields when not `remote_managed`
## 3. Issue create dialog
When the workspace experimental flag is on and the selected project has workspace automation or workspaces:
### Basic fields
- `Codebase`
- select from project workspaces
- default to policy default or primary workspace
- `Execution mode`
- `Project default`
- `Shared workspace`
- `Isolated workspace`
- `Operator branch`
### Advanced section
- `Reuse existing execution workspace`
This control should query only:
- same project
- same codebase if selected
- active/recent workspaces
- compact labels with branch or workspace name
Do not expose all execution workspaces in a noisy unfiltered list.
## 4. Issue detail
Add a `Work Product` tab when:
- the experimental flag is on, or
- the issue already has work products
### Show
- current execution workspace summary
- PR cards
- preview cards
- branch/commit rows
- artifacts/documents
Add compact header chips:
- codebase
- workspace
- PR count/status
- preview status
## 5. Execution workspace detail page
Add a detail route but no nav item.
Linked from:
- issue work product tab
- project workspace/execution panels
### Show
- identity and status
- project workspace origin
- source issue
- linked issues
- branch/ref/provider info
- runtime services
- work products
- cleanup state
## Runtime and Adapter Behavior
## 1. Local adapters
For local adapters:
- continue to use existing cwd/worktree realization paths
- persist the result as execution workspaces
- attach runtime services and work product to the execution workspace and issue
## 2. Remote or cloud adapters
For remote adapters:
- allow execution workspaces with null `cwd`
- require provider metadata sufficient to identify the remote workspace/session
- allow work product creation without any host-local process ownership
Examples:
- cloud coding agent opens a branch and PR on GitHub
- Vercel preview URL is reported back as a preview work product
- remote sandbox emits artifact URLs
## 3. Approval-aware PR workflow
V1 should support richer PR state tracking, but not a full review engine.
### Required actions
- `open_pr`
- `mark_ready`
### Required review states
- `draft`
- `ready_for_review`
- `approved`
- `changes_requested`
- `merged`
- `closed`
### Storage approach
- represent these as `issue_work_products` with `type='pull_request'`
- use `status` and `review_state`
- store provider-specific details in `metadata`
## Migration Plan
## 1. Existing installs
The migration posture is backward-compatible by default.
### Guarantees
- no existing project must be edited before it keeps working
- no existing issue flow should start requiring workspace input
- all new nullable columns must preserve current behavior when absent
## 2. Project workspace migration
Migrate `project_workspaces` in place.
### Backfill
- derive `source_type`
- copy `repo_ref` to `default_ref`
- leave new optional fields null
## 3. Issue migration
Do not backfill `project_workspace_id` or `execution_workspace_id` on all existing issues.
Reason:
- the safest migration is to preserve current runtime behavior and bind explicitly only when new workspace-aware flows are used
Interpret old issues as:
- `executionWorkspacePreference = inherit`
- compatibility/shared behavior
## 4. Runtime history migration
Do not attempt a perfect historical reconstruction of execution workspaces in the migration itself.
Instead:
- create execution workspace records forward from first new run
- optionally add a later backfill tool for recent runtime services if it proves valuable
## Rollout Order
## Phase 1: Schema and shared contracts
1. extend `project_workspaces`
2. add `execution_workspaces`
3. add `issue_work_products`
4. extend `issues`
5. extend `workspace_runtime_services`
6. update shared types and validators
## Phase 2: Service wiring
1. update project workspace CRUD
2. update issue create/update resolution
3. refactor workspace realization to persist execution workspaces
4. attach runtime services to execution workspaces
5. add work product service and persistence
## Phase 3: API and UI
1. add execution workspace routes
2. add work product routes
3. add instance experimental settings toggle
4. re-enable and revise project workspace UI behind the flag
5. add issue create/update controls behind the flag
6. add issue work product tab
7. add execution workspace detail page
## Phase 4: Provider integrations
1. GitHub PR reporting
2. preview URL reporting
3. runtime-service-to-work-product linking
4. remote/cloud provider references
## Acceptance Criteria
1. Existing installs continue to behave predictably with no required reconfiguration.
2. Projects can define local, git, non-git, and remote-managed project workspaces.
3. Issues can explicitly select a project workspace and execution preference.
4. Each issue can point to one current execution workspace.
5. Multiple issues can intentionally reuse the same execution workspace.
6. Execution workspaces are persisted for both local and remote execution flows.
7. Work products can be attached to issues with optional execution workspace linkage.
8. GitHub PRs can be represented with richer lifecycle states.
9. The main UI remains simple when the experimental flag is off.
10. No top-level workspace navigation is required for this first slice.
## Risks and Mitigations
## Risk: too many overlapping workspace concepts
Mitigation:
- keep issue UI to `Codebase` and `Execution mode`
- reserve execution workspace details for advanced pages
## Risk: breaking current projects on upgrade
Mitigation:
- nullable schema additions
- in-place `project_workspaces` migration
- compatibility defaults
## Risk: local-only assumptions leaking into cloud mode
Mitigation:
- make `cwd` optional for execution workspaces
- use `provider_type` and `provider_ref`
- use `PAPERCLIP_EXECUTION_TOPOLOGY` as a defaulting guardrail
## Risk: turning PRs into a bespoke subsystem too early
Mitigation:
- represent PRs as work products in V1
- keep provider-specific details in metadata
- defer a dedicated PR table unless usage proves it necessary
## Recommended First Engineering Slice
If we want the narrowest useful implementation:
1. extend `project_workspaces`
2. add `execution_workspaces`
3. extend `issues` with explicit workspace fields
4. persist execution workspaces from existing local workspace realization
5. add `issue_work_products`
6. show project workspace controls and issue workspace controls behind the experimental flag
7. add issue `Work Product` tab with PR/preview/runtime service display
This slice is enough to validate the model without yet building every provider integration or cleanup workflow.

View File

@@ -1,33 +0,0 @@
services:
review:
build:
context: .
dockerfile: docker/untrusted-review/Dockerfile
init: true
tty: true
stdin_open: true
working_dir: /work
environment:
HOME: "/home/reviewer"
CODEX_HOME: "/home/reviewer/.codex"
CLAUDE_HOME: "/home/reviewer/.claude"
PAPERCLIP_HOME: "/home/reviewer/.paperclip-review"
OPENAI_API_KEY: "${OPENAI_API_KEY:-}"
ANTHROPIC_API_KEY: "${ANTHROPIC_API_KEY:-}"
GITHUB_TOKEN: "${GITHUB_TOKEN:-}"
ports:
- "${REVIEW_PAPERCLIP_PORT:-3100}:3100"
- "${REVIEW_VITE_PORT:-5173}:5173"
volumes:
- review-home:/home/reviewer
- review-work:/work
cap_drop:
- ALL
security_opt:
- no-new-privileges:true
tmpfs:
- /tmp:mode=1777,size=1g
volumes:
review-home:
review-work:

View File

@@ -1,44 +0,0 @@
FROM node:lts-trixie-slim
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
bash \
ca-certificates \
curl \
fd-find \
gh \
git \
jq \
less \
openssh-client \
procps \
ripgrep \
&& rm -rf /var/lib/apt/lists/*
RUN ln -sf /usr/bin/fdfind /usr/local/bin/fd
RUN corepack enable \
&& npm install --global --omit=dev @anthropic-ai/claude-code@latest @openai/codex@latest
RUN useradd --create-home --shell /bin/bash reviewer
ENV HOME=/home/reviewer \
CODEX_HOME=/home/reviewer/.codex \
CLAUDE_HOME=/home/reviewer/.claude \
PAPERCLIP_HOME=/home/reviewer/.paperclip-review \
PNPM_HOME=/home/reviewer/.local/share/pnpm \
PATH=/home/reviewer/.local/share/pnpm:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
WORKDIR /work
COPY --chown=reviewer:reviewer docker/untrusted-review/bin/review-checkout-pr /usr/local/bin/review-checkout-pr
RUN chmod +x /usr/local/bin/review-checkout-pr \
&& mkdir -p /work \
&& chown -R reviewer:reviewer /work
USER reviewer
EXPOSE 3100 5173
CMD ["bash", "-l"]

View File

@@ -1,65 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: review-checkout-pr <owner/repo|github-url> <pr-number> [checkout-dir]
Examples:
review-checkout-pr paperclipai/paperclip 432
review-checkout-pr https://github.com/paperclipai/paperclip.git 432
EOF
}
if [[ $# -lt 2 || $# -gt 3 ]]; then
usage >&2
exit 1
fi
normalize_repo_slug() {
local raw="$1"
raw="${raw#git@github.com:}"
raw="${raw#ssh://git@github.com/}"
raw="${raw#https://github.com/}"
raw="${raw#http://github.com/}"
raw="${raw%.git}"
printf '%s\n' "${raw#/}"
}
repo_slug="$(normalize_repo_slug "$1")"
pr_number="$2"
if [[ ! "$repo_slug" =~ ^[^/]+/[^/]+$ ]]; then
echo "Expected GitHub repo slug like owner/repo or a GitHub repo URL, got: $1" >&2
exit 1
fi
if [[ ! "$pr_number" =~ ^[0-9]+$ ]]; then
echo "PR number must be numeric, got: $pr_number" >&2
exit 1
fi
repo_key="${repo_slug//\//-}"
mirror_dir="/work/repos/${repo_key}"
checkout_dir="${3:-/work/checkouts/${repo_key}/pr-${pr_number}}"
pr_ref="refs/remotes/origin/pr/${pr_number}"
mkdir -p "$(dirname "$mirror_dir")" "$(dirname "$checkout_dir")"
if [[ ! -d "$mirror_dir/.git" ]]; then
if command -v gh >/dev/null 2>&1; then
gh repo clone "$repo_slug" "$mirror_dir" -- --filter=blob:none
else
git clone --filter=blob:none "https://github.com/${repo_slug}.git" "$mirror_dir"
fi
fi
git -C "$mirror_dir" fetch --force origin "pull/${pr_number}/head:${pr_ref}"
if [[ -e "$checkout_dir" ]]; then
printf '%s\n' "$checkout_dir"
exit 0
fi
git -C "$mirror_dir" worktree add --detach "$checkout_dir" "$pr_ref" >/dev/null
printf '%s\n' "$checkout_dir"

View File

@@ -38,33 +38,10 @@ PATCH /api/companies/{companyId}
{
"name": "Updated Name",
"description": "Updated description",
"budgetMonthlyCents": 100000,
"logoAssetId": "b9f5e911-6de5-4cd0-8dc6-a55a13bc02f6"
"budgetMonthlyCents": 100000
}
```
## Upload Company Logo
Upload an image for a company icon and store it as that companys logo.
```
POST /api/companies/{companyId}/logo
Content-Type: multipart/form-data
```
Valid image content types:
- `image/png`
- `image/jpeg`
- `image/jpg`
- `image/webp`
- `image/gif`
- `image/svg+xml`
Company logo uploads use the normal Paperclip attachment size limit.
Then set the company logo by PATCHing the returned `assetId` into `logoAssetId`.
## Archive Company
```
@@ -81,8 +58,6 @@ Archives a company. Archived companies are hidden from default listings.
| `name` | string | Company name |
| `description` | string | Company description |
| `status` | string | `active`, `paused`, `archived` |
| `logoAssetId` | string | Optional asset id for the stored logo image |
| `logoUrl` | string | Optional Paperclip asset content path for the stored logo image |
| `budgetMonthlyCents` | number | Monthly budget limit |
| `createdAt` | string | ISO timestamp |
| `updatedAt` | string | ISO timestamp |

View File

@@ -18,22 +18,23 @@
"db:backup": "./scripts/backup-db.sh",
"paperclipai": "node cli/node_modules/tsx/dist/cli.mjs cli/src/index.ts",
"build:npm": "./scripts/build-npm.sh",
"release:start": "./scripts/release-start.sh",
"release": "./scripts/release.sh",
"release:canary": "./scripts/release.sh canary",
"release:stable": "./scripts/release.sh stable",
"release:preflight": "./scripts/release-preflight.sh",
"release:github": "./scripts/create-github-release.sh",
"release:rollback": "./scripts/rollback-latest.sh",
"changeset": "changeset",
"version-packages": "changeset version",
"check:tokens": "node scripts/check-forbidden-tokens.mjs",
"docs:dev": "cd docs && npx mintlify dev",
"smoke:openclaw-join": "./scripts/smoke/openclaw-join.sh",
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh",
"smoke:openclaw-sse-standalone": "./scripts/smoke/openclaw-sse-standalone.sh",
"test:e2e": "npx playwright test --config tests/e2e/playwright.config.ts",
"test:e2e:headed": "npx playwright test --config tests/e2e/playwright.config.ts --headed",
"test:release-smoke": "npx playwright test --config tests/release-smoke/playwright.config.ts",
"test:release-smoke:headed": "npx playwright test --config tests/release-smoke/playwright.config.ts --headed"
"test:e2e:headed": "npx playwright test --config tests/e2e/playwright.config.ts --headed"
},
"devDependencies": {
"@changesets/cli": "^2.30.0",
"cross-env": "^10.1.0",
"@playwright/test": "^1.58.2",
"esbuild": "^0.27.3",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-utils",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapter-utils"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -24,20 +24,6 @@ export type {
CLIAdapterModule,
CreateConfigValues,
} from "./types.js";
export type {
SessionCompactionPolicy,
NativeContextManagement,
AdapterSessionManagement,
ResolvedSessionCompactionPolicy,
} from "./session-compaction.js";
export {
ADAPTER_SESSION_MANAGEMENT,
LEGACY_SESSIONED_ADAPTER_TYPES,
getAdapterSessionManagement,
readSessionCompactionOverride,
resolveSessionCompactionPolicy,
hasSessionCompactionThresholds,
} from "./session-compaction.js";
export {
REDACTED_HOME_PATH_USER,
redactHomePathUserSegments,

View File

@@ -1,175 +0,0 @@
export interface SessionCompactionPolicy {
enabled: boolean;
maxSessionRuns: number;
maxRawInputTokens: number;
maxSessionAgeHours: number;
}
export type NativeContextManagement = "confirmed" | "likely" | "unknown" | "none";
export interface AdapterSessionManagement {
supportsSessionResume: boolean;
nativeContextManagement: NativeContextManagement;
defaultSessionCompaction: SessionCompactionPolicy;
}
export interface ResolvedSessionCompactionPolicy {
policy: SessionCompactionPolicy;
adapterSessionManagement: AdapterSessionManagement | null;
explicitOverride: Partial<SessionCompactionPolicy>;
source: "adapter_default" | "agent_override" | "legacy_fallback";
}
const DEFAULT_SESSION_COMPACTION_POLICY: SessionCompactionPolicy = {
enabled: true,
maxSessionRuns: 200,
maxRawInputTokens: 2_000_000,
maxSessionAgeHours: 72,
};
// Adapters with native context management still participate in session resume,
// but Paperclip should not rotate them using threshold-based compaction.
const ADAPTER_MANAGED_SESSION_POLICY: SessionCompactionPolicy = {
enabled: true,
maxSessionRuns: 0,
maxRawInputTokens: 0,
maxSessionAgeHours: 0,
};
export const LEGACY_SESSIONED_ADAPTER_TYPES = new Set([
"claude_local",
"codex_local",
"cursor",
"gemini_local",
"opencode_local",
"pi_local",
]);
export const ADAPTER_SESSION_MANAGEMENT: Record<string, AdapterSessionManagement> = {
claude_local: {
supportsSessionResume: true,
nativeContextManagement: "confirmed",
defaultSessionCompaction: ADAPTER_MANAGED_SESSION_POLICY,
},
codex_local: {
supportsSessionResume: true,
nativeContextManagement: "confirmed",
defaultSessionCompaction: ADAPTER_MANAGED_SESSION_POLICY,
},
cursor: {
supportsSessionResume: true,
nativeContextManagement: "unknown",
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
},
gemini_local: {
supportsSessionResume: true,
nativeContextManagement: "unknown",
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
},
opencode_local: {
supportsSessionResume: true,
nativeContextManagement: "unknown",
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
},
pi_local: {
supportsSessionResume: true,
nativeContextManagement: "unknown",
defaultSessionCompaction: DEFAULT_SESSION_COMPACTION_POLICY,
},
};
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
function readBoolean(value: unknown): boolean | undefined {
if (typeof value === "boolean") return value;
if (typeof value === "number") {
if (value === 1) return true;
if (value === 0) return false;
return undefined;
}
if (typeof value !== "string") return undefined;
const normalized = value.trim().toLowerCase();
if (normalized === "true" || normalized === "1" || normalized === "yes" || normalized === "on") {
return true;
}
if (normalized === "false" || normalized === "0" || normalized === "no" || normalized === "off") {
return false;
}
return undefined;
}
function readNumber(value: unknown): number | undefined {
if (typeof value === "number" && Number.isFinite(value)) {
return Math.max(0, Math.floor(value));
}
if (typeof value !== "string") return undefined;
const parsed = Number(value.trim());
return Number.isFinite(parsed) ? Math.max(0, Math.floor(parsed)) : undefined;
}
export function getAdapterSessionManagement(adapterType: string | null | undefined): AdapterSessionManagement | null {
if (!adapterType) return null;
return ADAPTER_SESSION_MANAGEMENT[adapterType] ?? null;
}
export function readSessionCompactionOverride(runtimeConfig: unknown): Partial<SessionCompactionPolicy> {
const runtime = isRecord(runtimeConfig) ? runtimeConfig : {};
const heartbeat = isRecord(runtime.heartbeat) ? runtime.heartbeat : {};
const compaction = isRecord(
heartbeat.sessionCompaction ?? heartbeat.sessionRotation ?? runtime.sessionCompaction,
)
? (heartbeat.sessionCompaction ?? heartbeat.sessionRotation ?? runtime.sessionCompaction) as Record<string, unknown>
: {};
const explicit: Partial<SessionCompactionPolicy> = {};
const enabled = readBoolean(compaction.enabled);
const maxSessionRuns = readNumber(compaction.maxSessionRuns);
const maxRawInputTokens = readNumber(compaction.maxRawInputTokens);
const maxSessionAgeHours = readNumber(compaction.maxSessionAgeHours);
if (enabled !== undefined) explicit.enabled = enabled;
if (maxSessionRuns !== undefined) explicit.maxSessionRuns = maxSessionRuns;
if (maxRawInputTokens !== undefined) explicit.maxRawInputTokens = maxRawInputTokens;
if (maxSessionAgeHours !== undefined) explicit.maxSessionAgeHours = maxSessionAgeHours;
return explicit;
}
export function resolveSessionCompactionPolicy(
adapterType: string | null | undefined,
runtimeConfig: unknown,
): ResolvedSessionCompactionPolicy {
const adapterSessionManagement = getAdapterSessionManagement(adapterType);
const explicitOverride = readSessionCompactionOverride(runtimeConfig);
const hasExplicitOverride = Object.keys(explicitOverride).length > 0;
const fallbackEnabled = Boolean(adapterType && LEGACY_SESSIONED_ADAPTER_TYPES.has(adapterType));
const basePolicy = adapterSessionManagement?.defaultSessionCompaction ?? {
...DEFAULT_SESSION_COMPACTION_POLICY,
enabled: fallbackEnabled,
};
return {
policy: {
enabled: explicitOverride.enabled ?? basePolicy.enabled,
maxSessionRuns: explicitOverride.maxSessionRuns ?? basePolicy.maxSessionRuns,
maxRawInputTokens: explicitOverride.maxRawInputTokens ?? basePolicy.maxRawInputTokens,
maxSessionAgeHours: explicitOverride.maxSessionAgeHours ?? basePolicy.maxSessionAgeHours,
},
adapterSessionManagement,
explicitOverride,
source: hasExplicitOverride
? "agent_override"
: adapterSessionManagement
? "adapter_default"
: "legacy_fallback",
};
}
export function hasSessionCompactionThresholds(policy: Pick<
SessionCompactionPolicy,
"maxSessionRuns" | "maxRawInputTokens" | "maxSessionAgeHours"
>) {
return policy.maxSessionRuns > 0 || policy.maxRawInputTokens > 0 || policy.maxSessionAgeHours > 0;
}

View File

@@ -216,7 +216,6 @@ export interface ServerAdapterModule {
execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult>;
testEnvironment(ctx: AdapterEnvironmentTestContext): Promise<AdapterEnvironmentTestResult>;
sessionCodec?: AdapterSessionCodec;
sessionManagement?: import("./session-compaction.js").AdapterSessionManagement;
supportsLocalAgentJwt?: boolean;
models?: AdapterModel[];
listModels?: () => Promise<AdapterModel[]>;

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-claude-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/claude-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-codex-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/codex-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -94,7 +94,7 @@ export async function prepareWorktreeCodexHome(
}
await onLog(
"stdout",
"stderr",
`[paperclip] Using worktree-isolated Codex home "${targetHome}" (seeded from "${sourceHome}").\n`,
);
return targetHome;

View File

@@ -116,7 +116,7 @@ export async function ensureCodexSkillsInjected(
);
for (const skillName of removedSkills) {
await onLog(
"stdout",
"stderr",
`[paperclip] Removed maintainer-only Codex skill "${skillName}" from ${skillsHome}\n`,
);
}
@@ -143,7 +143,7 @@ export async function ensureCodexSkillsInjected(
await fs.symlink(entry.source, target);
}
await onLog(
"stdout",
"stderr",
`[paperclip] Repaired Codex skill "${entry.name}" into ${skillsHome}\n`,
);
continue;
@@ -154,7 +154,7 @@ export async function ensureCodexSkillsInjected(
if (result === "skipped") continue;
await onLog(
"stdout",
"stderr",
`[paperclip] ${result === "repaired" ? "Repaired" : "Injected"} Codex skill "${entry.name}" into ${skillsHome}\n`,
);
} catch (err) {
@@ -364,7 +364,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
`Resolve any relative file references from ${instructionsDir}.\n\n`;
instructionsChars = instructionsPrefix.length;
await onLog(
"stdout",
"stderr",
`[paperclip] Loaded agent instructions file: ${instructionsFilePath}\n`,
);
} catch (err) {

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-cursor-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/cursor-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-gemini-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/gemini-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-openclaw-gateway",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/openclaw-gateway"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-opencode-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/opencode-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,5 +1,4 @@
import { createHash } from "node:crypto";
import os from "node:os";
import type { AdapterModel } from "@paperclipai/adapter-utils";
import {
asString,
@@ -21,7 +20,7 @@ function resolveOpenCodeCommand(input: unknown): string {
const discoveryCache = new Map<string, { expiresAt: number; models: AdapterModel[] }>();
const VOLATILE_ENV_KEY_PREFIXES = ["PAPERCLIP_", "npm_", "NPM_"] as const;
const VOLATILE_ENV_KEY_EXACT = new Set(["PWD", "OLDPWD", "SHLVL", "_", "TERM_SESSION_ID", "HOME"]);
const VOLATILE_ENV_KEY_EXACT = new Set(["PWD", "OLDPWD", "SHLVL", "_", "TERM_SESSION_ID"]);
function dedupeModels(models: AdapterModel[]): AdapterModel[] {
const seen = new Set<string>();
@@ -108,19 +107,7 @@ export async function discoverOpenCodeModels(input: {
const command = resolveOpenCodeCommand(input.command);
const cwd = asString(input.cwd, process.cwd());
const env = normalizeEnv(input.env);
// Ensure HOME points to the actual running user's home directory.
// When the server is started via `runuser -u <user>`, HOME may still
// reflect the parent process (e.g. /root), causing OpenCode to miss
// provider auth credentials stored under the target user's home.
let resolvedHome: string | undefined;
try {
resolvedHome = os.userInfo().homedir || undefined;
} catch {
// os.userInfo() throws a SystemError when the current UID has no
// /etc/passwd entry (e.g. `docker run --user 1234` with a minimal
// image). Fall back to process.env.HOME.
}
const runtimeEnv = normalizeEnv(ensurePathInEnv({ ...process.env, ...env, ...(resolvedHome ? { HOME: resolvedHome } : {}) }));
const runtimeEnv = normalizeEnv(ensurePathInEnv({ ...process.env, ...env }));
const result = await runChildProcess(
`opencode-models-${Date.now()}-${Math.random().toString(16).slice(2)}`,

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/adapter-pi-local",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/adapters/pi-local"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/db",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/db"
},
"type": "module",
"exports": {
".": "./src/index.ts",
@@ -43,9 +33,9 @@
"seed": "tsx src/seed.ts"
},
"dependencies": {
"embedded-postgres": "^18.1.0-beta.16",
"@paperclipai/shared": "workspace:*",
"drizzle-orm": "^0.38.4",
"embedded-postgres": "^18.1.0-beta.16",
"postgres": "^3.4.5"
},
"devDependencies": {

View File

@@ -1,157 +0,0 @@
import { createHash } from "node:crypto";
import fs from "node:fs";
import net from "node:net";
import os from "node:os";
import path from "node:path";
import { afterEach, describe, expect, it } from "vitest";
import postgres from "postgres";
import {
applyPendingMigrations,
ensurePostgresDatabase,
inspectMigrations,
} from "./client.js";
type EmbeddedPostgresInstance = {
initialise(): Promise<void>;
start(): Promise<void>;
stop(): Promise<void>;
};
type EmbeddedPostgresCtor = new (opts: {
databaseDir: string;
user: string;
password: string;
port: number;
persistent: boolean;
initdbFlags?: string[];
onLog?: (message: unknown) => void;
onError?: (message: unknown) => void;
}) => EmbeddedPostgresInstance;
const tempPaths: string[] = [];
const runningInstances: EmbeddedPostgresInstance[] = [];
async function getEmbeddedPostgresCtor(): Promise<EmbeddedPostgresCtor> {
const mod = await import("embedded-postgres");
return mod.default as EmbeddedPostgresCtor;
}
async function getAvailablePort(): Promise<number> {
return await new Promise((resolve, reject) => {
const server = net.createServer();
server.unref();
server.on("error", reject);
server.listen(0, "127.0.0.1", () => {
const address = server.address();
if (!address || typeof address === "string") {
server.close(() => reject(new Error("Failed to allocate test port")));
return;
}
const { port } = address;
server.close((error) => {
if (error) reject(error);
else resolve(port);
});
});
});
}
async function createTempDatabase(): Promise<string> {
const dataDir = fs.mkdtempSync(path.join(os.tmpdir(), "paperclip-db-client-"));
tempPaths.push(dataDir);
const port = await getAvailablePort();
const EmbeddedPostgres = await getEmbeddedPostgresCtor();
const instance = new EmbeddedPostgres({
databaseDir: dataDir,
user: "paperclip",
password: "paperclip",
port,
persistent: true,
initdbFlags: ["--encoding=UTF8", "--locale=C"],
onLog: () => {},
onError: () => {},
});
await instance.initialise();
await instance.start();
runningInstances.push(instance);
const adminUrl = `postgres://paperclip:paperclip@127.0.0.1:${port}/postgres`;
await ensurePostgresDatabase(adminUrl, "paperclip");
return `postgres://paperclip:paperclip@127.0.0.1:${port}/paperclip`;
}
async function migrationHash(migrationFile: string): Promise<string> {
const content = await fs.promises.readFile(
new URL(`./migrations/${migrationFile}`, import.meta.url),
"utf8",
);
return createHash("sha256").update(content).digest("hex");
}
afterEach(async () => {
while (runningInstances.length > 0) {
const instance = runningInstances.pop();
if (!instance) continue;
await instance.stop();
}
while (tempPaths.length > 0) {
const tempPath = tempPaths.pop();
if (!tempPath) continue;
fs.rmSync(tempPath, { recursive: true, force: true });
}
});
describe("applyPendingMigrations", () => {
it(
"applies an inserted earlier migration without replaying later legacy migrations",
async () => {
const connectionString = await createTempDatabase();
await applyPendingMigrations(connectionString);
const sql = postgres(connectionString, { max: 1, onnotice: () => {} });
try {
const richMagnetoHash = await migrationHash("0030_rich_magneto.sql");
await sql.unsafe(
`DELETE FROM "drizzle"."__drizzle_migrations" WHERE hash = '${richMagnetoHash}'`,
);
await sql.unsafe(`DROP TABLE "company_logos"`);
} finally {
await sql.end();
}
const pendingState = await inspectMigrations(connectionString);
expect(pendingState).toMatchObject({
status: "needsMigrations",
pendingMigrations: ["0030_rich_magneto.sql"],
reason: "pending-migrations",
});
await applyPendingMigrations(connectionString);
const finalState = await inspectMigrations(connectionString);
expect(finalState.status).toBe("upToDate");
const verifySql = postgres(connectionString, { max: 1, onnotice: () => {} });
try {
const rows = await verifySql.unsafe<{ table_name: string }[]>(
`
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name IN ('company_logos', 'execution_workspaces')
ORDER BY table_name
`,
);
expect(rows.map((row) => row.table_name)).toEqual([
"company_logos",
"execution_workspaces",
]);
} finally {
await verifySql.end();
}
},
20_000,
);
});

View File

@@ -50,21 +50,6 @@ export function createDb(url: string) {
return drizzlePg(sql, { schema });
}
export async function getPostgresDataDirectory(url: string): Promise<string | null> {
const sql = createUtilitySql(url);
try {
const rows = await sql<{ data_directory: string | null }[]>`
SELECT current_setting('data_directory', true) AS data_directory
`;
const actual = rows[0]?.data_directory;
return typeof actual === "string" && actual.length > 0 ? actual : null;
} catch {
return null;
} finally {
await sql.end();
}
}
async function listMigrationFiles(): Promise<string[]> {
const entries = await readdir(MIGRATIONS_FOLDER, { withFileTypes: true });
return entries
@@ -661,26 +646,13 @@ export async function applyPendingMigrations(url: string): Promise<void> {
const initialState = await inspectMigrations(url);
if (initialState.status === "upToDate") return;
if (initialState.reason === "no-migration-journal-empty-db") {
const sql = createUtilitySql(url);
try {
const db = drizzlePg(sql);
await migratePg(db, { migrationsFolder: MIGRATIONS_FOLDER });
} finally {
await sql.end();
}
const sql = createUtilitySql(url);
const bootstrappedState = await inspectMigrations(url);
if (bootstrappedState.status === "upToDate") return;
throw new Error(
`Failed to bootstrap migrations: ${bootstrappedState.pendingMigrations.join(", ")}`,
);
}
if (initialState.reason === "no-migration-journal-non-empty-db") {
throw new Error(
"Database has tables but no migration journal; automatic migration is unsafe. Initialize migration history manually.",
);
try {
const db = drizzlePg(sql);
await migratePg(db, { migrationsFolder: MIGRATIONS_FOLDER });
} finally {
await sql.end();
}
let state = await inspectMigrations(url);
@@ -693,7 +665,7 @@ export async function applyPendingMigrations(url: string): Promise<void> {
}
if (state.status !== "needsMigrations" || state.reason !== "pending-migrations") {
throw new Error("Migrations are still pending after migration-history reconciliation; run inspectMigrations for details.");
throw new Error("Migrations are still pending after attempted apply; run inspectMigrations for details.");
}
await applyPendingMigrationsManually(url, state.pendingMigrations);

View File

@@ -1,6 +1,5 @@
export {
createDb,
getPostgresDataDirectory,
ensurePostgresDatabase,
inspectMigrations,
applyPendingMigrations,

View File

@@ -1,7 +1,9 @@
import { existsSync, readFileSync, rmSync } from "node:fs";
import { createRequire } from "node:module";
import { createServer } from "node:net";
import path from "node:path";
import { ensurePostgresDatabase, getPostgresDataDirectory } from "./client.js";
import { fileURLToPath, pathToFileURL } from "node:url";
import { ensurePostgresDatabase } from "./client.js";
import { resolveDatabaseTarget } from "./runtime-config.js";
type EmbeddedPostgresInstance = {
@@ -88,8 +90,17 @@ async function findAvailablePort(startPort: number): Promise<number> {
}
async function loadEmbeddedPostgresCtor(): Promise<EmbeddedPostgresCtor> {
const require = createRequire(import.meta.url);
const resolveCandidates = [
path.resolve(fileURLToPath(new URL("../..", import.meta.url))),
path.resolve(fileURLToPath(new URL("../../server", import.meta.url))),
path.resolve(fileURLToPath(new URL("../../cli", import.meta.url))),
process.cwd(),
];
try {
const mod = await import("embedded-postgres");
const resolvedModulePath = require.resolve("embedded-postgres", { paths: resolveCandidates });
const mod = await import(pathToFileURL(resolvedModulePath).href);
return mod.default as EmbeddedPostgresCtor;
} catch {
throw new Error(
@@ -105,33 +116,8 @@ async function ensureEmbeddedPostgresConnection(
const EmbeddedPostgres = await loadEmbeddedPostgresCtor();
const selectedPort = await findAvailablePort(preferredPort);
const postmasterPidFile = path.resolve(dataDir, "postmaster.pid");
const pgVersionFile = path.resolve(dataDir, "PG_VERSION");
const runningPid = readRunningPostmasterPid(postmasterPidFile);
const runningPort = readPidFilePort(postmasterPidFile);
const preferredAdminConnectionString = `postgres://paperclip:paperclip@127.0.0.1:${preferredPort}/postgres`;
if (!runningPid && existsSync(pgVersionFile)) {
try {
const actualDataDir = await getPostgresDataDirectory(preferredAdminConnectionString);
const matchesDataDir =
typeof actualDataDir === "string" &&
path.resolve(actualDataDir) === path.resolve(dataDir);
if (!matchesDataDir) {
throw new Error("reachable postgres does not use the expected embedded data directory");
}
await ensurePostgresDatabase(preferredAdminConnectionString, "paperclip");
process.emitWarning(
`Adopting an existing PostgreSQL instance on port ${preferredPort} for embedded data dir ${dataDir} because postmaster.pid is missing.`,
);
return {
connectionString: `postgres://paperclip:paperclip@127.0.0.1:${preferredPort}/paperclip`,
source: `embedded-postgres@${preferredPort}`,
stop: async () => {},
};
} catch {
// Fall through and attempt to start the configured embedded cluster.
}
}
if (runningPid) {
const port = runningPort ?? preferredPort;

View File

@@ -1,12 +0,0 @@
CREATE TABLE "company_logos" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"company_id" uuid NOT NULL,
"asset_id" uuid NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "company_logos" ADD CONSTRAINT "company_logos_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "company_logos" ADD CONSTRAINT "company_logos_asset_id_assets_id_fk" FOREIGN KEY ("asset_id") REFERENCES "public"."assets"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
CREATE UNIQUE INDEX "company_logos_company_uq" ON "company_logos" USING btree ("company_id");--> statement-breakpoint
CREATE UNIQUE INDEX "company_logos_asset_uq" ON "company_logos" USING btree ("asset_id");

View File

@@ -1,2 +0,0 @@
ALTER TABLE "companies" ADD COLUMN "pause_reason" text;--> statement-breakpoint
ALTER TABLE "companies" ADD COLUMN "paused_at" timestamp with time zone;

View File

@@ -1,2 +0,0 @@
DROP INDEX "budget_incidents_policy_window_threshold_idx";--> statement-breakpoint
CREATE UNIQUE INDEX "budget_incidents_policy_window_threshold_idx" ON "budget_incidents" USING btree ("policy_id","window_start","threshold_type") WHERE "budget_incidents"."status" <> 'dismissed';

View File

@@ -1,91 +0,0 @@
CREATE TABLE "execution_workspaces" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"company_id" uuid NOT NULL,
"project_id" uuid NOT NULL,
"project_workspace_id" uuid,
"source_issue_id" uuid,
"mode" text NOT NULL,
"strategy_type" text NOT NULL,
"name" text NOT NULL,
"status" text DEFAULT 'active' NOT NULL,
"cwd" text,
"repo_url" text,
"base_ref" text,
"branch_name" text,
"provider_type" text DEFAULT 'local_fs' NOT NULL,
"provider_ref" text,
"derived_from_execution_workspace_id" uuid,
"last_used_at" timestamp with time zone DEFAULT now() NOT NULL,
"opened_at" timestamp with time zone DEFAULT now() NOT NULL,
"closed_at" timestamp with time zone,
"cleanup_eligible_at" timestamp with time zone,
"cleanup_reason" text,
"metadata" jsonb,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "issue_work_products" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"company_id" uuid NOT NULL,
"project_id" uuid,
"issue_id" uuid NOT NULL,
"execution_workspace_id" uuid,
"runtime_service_id" uuid,
"type" text NOT NULL,
"provider" text NOT NULL,
"external_id" text,
"title" text NOT NULL,
"url" text,
"status" text NOT NULL,
"review_state" text DEFAULT 'none' NOT NULL,
"is_primary" boolean DEFAULT false NOT NULL,
"health_status" text DEFAULT 'unknown' NOT NULL,
"summary" text,
"metadata" jsonb,
"created_by_run_id" uuid,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "issues" ADD COLUMN "project_workspace_id" uuid;--> statement-breakpoint
ALTER TABLE "issues" ADD COLUMN "execution_workspace_id" uuid;--> statement-breakpoint
ALTER TABLE "issues" ADD COLUMN "execution_workspace_preference" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "source_type" text DEFAULT 'local_path' NOT NULL;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "default_ref" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "visibility" text DEFAULT 'default' NOT NULL;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "setup_command" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "cleanup_command" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "remote_provider" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "remote_workspace_ref" text;--> statement-breakpoint
ALTER TABLE "project_workspaces" ADD COLUMN "shared_workspace_key" text;--> statement-breakpoint
ALTER TABLE "workspace_runtime_services" ADD COLUMN "execution_workspace_id" uuid;--> statement-breakpoint
ALTER TABLE "execution_workspaces" ADD CONSTRAINT "execution_workspaces_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "execution_workspaces" ADD CONSTRAINT "execution_workspaces_project_id_projects_id_fk" FOREIGN KEY ("project_id") REFERENCES "public"."projects"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "execution_workspaces" ADD CONSTRAINT "execution_workspaces_project_workspace_id_project_workspaces_id_fk" FOREIGN KEY ("project_workspace_id") REFERENCES "public"."project_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "execution_workspaces" ADD CONSTRAINT "execution_workspaces_source_issue_id_issues_id_fk" FOREIGN KEY ("source_issue_id") REFERENCES "public"."issues"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "execution_workspaces" ADD CONSTRAINT "execution_workspaces_derived_from_execution_workspace_id_execution_workspaces_id_fk" FOREIGN KEY ("derived_from_execution_workspace_id") REFERENCES "public"."execution_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_project_id_projects_id_fk" FOREIGN KEY ("project_id") REFERENCES "public"."projects"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_issue_id_issues_id_fk" FOREIGN KEY ("issue_id") REFERENCES "public"."issues"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_execution_workspace_id_execution_workspaces_id_fk" FOREIGN KEY ("execution_workspace_id") REFERENCES "public"."execution_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_runtime_service_id_workspace_runtime_services_id_fk" FOREIGN KEY ("runtime_service_id") REFERENCES "public"."workspace_runtime_services"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_work_products" ADD CONSTRAINT "issue_work_products_created_by_run_id_heartbeat_runs_id_fk" FOREIGN KEY ("created_by_run_id") REFERENCES "public"."heartbeat_runs"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "execution_workspaces_company_project_status_idx" ON "execution_workspaces" USING btree ("company_id","project_id","status");--> statement-breakpoint
CREATE INDEX "execution_workspaces_company_project_workspace_status_idx" ON "execution_workspaces" USING btree ("company_id","project_workspace_id","status");--> statement-breakpoint
CREATE INDEX "execution_workspaces_company_source_issue_idx" ON "execution_workspaces" USING btree ("company_id","source_issue_id");--> statement-breakpoint
CREATE INDEX "execution_workspaces_company_last_used_idx" ON "execution_workspaces" USING btree ("company_id","last_used_at");--> statement-breakpoint
CREATE INDEX "execution_workspaces_company_branch_idx" ON "execution_workspaces" USING btree ("company_id","branch_name");--> statement-breakpoint
CREATE INDEX "issue_work_products_company_issue_type_idx" ON "issue_work_products" USING btree ("company_id","issue_id","type");--> statement-breakpoint
CREATE INDEX "issue_work_products_company_execution_workspace_type_idx" ON "issue_work_products" USING btree ("company_id","execution_workspace_id","type");--> statement-breakpoint
CREATE INDEX "issue_work_products_company_provider_external_id_idx" ON "issue_work_products" USING btree ("company_id","provider","external_id");--> statement-breakpoint
CREATE INDEX "issue_work_products_company_updated_idx" ON "issue_work_products" USING btree ("company_id","updated_at");--> statement-breakpoint
ALTER TABLE "issues" ADD CONSTRAINT "issues_project_workspace_id_project_workspaces_id_fk" FOREIGN KEY ("project_workspace_id") REFERENCES "public"."project_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issues" ADD CONSTRAINT "issues_execution_workspace_id_execution_workspaces_id_fk" FOREIGN KEY ("execution_workspace_id") REFERENCES "public"."execution_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "workspace_runtime_services" ADD CONSTRAINT "workspace_runtime_services_execution_workspace_id_execution_workspaces_id_fk" FOREIGN KEY ("execution_workspace_id") REFERENCES "public"."execution_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "issues_company_project_workspace_idx" ON "issues" USING btree ("company_id","project_workspace_id");--> statement-breakpoint
CREATE INDEX "issues_company_execution_workspace_idx" ON "issues" USING btree ("company_id","execution_workspace_id");--> statement-breakpoint
CREATE INDEX "project_workspaces_project_source_type_idx" ON "project_workspaces" USING btree ("project_id","source_type");--> statement-breakpoint
CREATE INDEX "project_workspaces_company_shared_key_idx" ON "project_workspaces" USING btree ("company_id","shared_workspace_key");--> statement-breakpoint
CREATE UNIQUE INDEX "project_workspaces_project_remote_ref_idx" ON "project_workspaces" USING btree ("project_id","remote_provider","remote_workspace_ref");--> statement-breakpoint
CREATE INDEX "workspace_runtime_services_company_execution_workspace_status_idx" ON "workspace_runtime_services" USING btree ("company_id","execution_workspace_id","status");

View File

@@ -1,9 +0,0 @@
CREATE TABLE "instance_settings" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"singleton_key" text DEFAULT 'default' NOT NULL,
"experimental" jsonb DEFAULT '{}'::jsonb NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE UNIQUE INDEX "instance_settings_singleton_key_idx" ON "instance_settings" USING btree ("singleton_key");

View File

@@ -1,29 +0,0 @@
CREATE TABLE "workspace_operations" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"company_id" uuid NOT NULL,
"execution_workspace_id" uuid,
"heartbeat_run_id" uuid,
"phase" text NOT NULL,
"command" text,
"cwd" text,
"status" text DEFAULT 'running' NOT NULL,
"exit_code" integer,
"log_store" text,
"log_ref" text,
"log_bytes" bigint,
"log_sha256" text,
"log_compressed" boolean DEFAULT false NOT NULL,
"stdout_excerpt" text,
"stderr_excerpt" text,
"metadata" jsonb,
"started_at" timestamp with time zone DEFAULT now() NOT NULL,
"finished_at" timestamp with time zone,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "workspace_operations" ADD CONSTRAINT "workspace_operations_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "workspace_operations" ADD CONSTRAINT "workspace_operations_execution_workspace_id_execution_workspaces_id_fk" FOREIGN KEY ("execution_workspace_id") REFERENCES "public"."execution_workspaces"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "workspace_operations" ADD CONSTRAINT "workspace_operations_heartbeat_run_id_heartbeat_runs_id_fk" FOREIGN KEY ("heartbeat_run_id") REFERENCES "public"."heartbeat_runs"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "workspace_operations_company_run_started_idx" ON "workspace_operations" USING btree ("company_id","heartbeat_run_id","started_at");--> statement-breakpoint
CREATE INDEX "workspace_operations_company_workspace_started_idx" ON "workspace_operations" USING btree ("company_id","execution_workspace_id","started_at");

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"id": "67faba5f-0106-4163-81f6-cd0d90488574",
"prevId": "ff007d90-e1a0-4df3-beab-a5be4a47273c",
"id": "fd2770d1-d831-4d2a-989b-ad4bf92e575e",
"prevId": "67faba5f-0106-4163-81f6-cd0d90488574",
"version": "7",
"dialect": "postgresql",
"tables": {
@@ -1193,6 +1193,18 @@
"notNull": true,
"default": 0
},
"pause_reason": {
"name": "pause_reason",
"type": "text",
"primaryKey": false,
"notNull": false
},
"paused_at": {
"name": "paused_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": false
},
"permissions": {
"name": "permissions",
"type": "jsonb",
@@ -2069,6 +2081,473 @@
"checkConstraints": {},
"isRLSEnabled": false
},
"public.budget_incidents": {
"name": "budget_incidents",
"schema": "",
"columns": {
"id": {
"name": "id",
"type": "uuid",
"primaryKey": true,
"notNull": true,
"default": "gen_random_uuid()"
},
"company_id": {
"name": "company_id",
"type": "uuid",
"primaryKey": false,
"notNull": true
},
"policy_id": {
"name": "policy_id",
"type": "uuid",
"primaryKey": false,
"notNull": true
},
"scope_type": {
"name": "scope_type",
"type": "text",
"primaryKey": false,
"notNull": true
},
"scope_id": {
"name": "scope_id",
"type": "uuid",
"primaryKey": false,
"notNull": true
},
"metric": {
"name": "metric",
"type": "text",
"primaryKey": false,
"notNull": true
},
"window_kind": {
"name": "window_kind",
"type": "text",
"primaryKey": false,
"notNull": true
},
"window_start": {
"name": "window_start",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true
},
"window_end": {
"name": "window_end",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true
},
"threshold_type": {
"name": "threshold_type",
"type": "text",
"primaryKey": false,
"notNull": true
},
"amount_limit": {
"name": "amount_limit",
"type": "integer",
"primaryKey": false,
"notNull": true
},
"amount_observed": {
"name": "amount_observed",
"type": "integer",
"primaryKey": false,
"notNull": true
},
"status": {
"name": "status",
"type": "text",
"primaryKey": false,
"notNull": true,
"default": "'open'"
},
"approval_id": {
"name": "approval_id",
"type": "uuid",
"primaryKey": false,
"notNull": false
},
"resolved_at": {
"name": "resolved_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": false
},
"created_at": {
"name": "created_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true,
"default": "now()"
},
"updated_at": {
"name": "updated_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true,
"default": "now()"
}
},
"indexes": {
"budget_incidents_company_status_idx": {
"name": "budget_incidents_company_status_idx",
"columns": [
{
"expression": "company_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "status",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": false,
"concurrently": false,
"method": "btree",
"with": {}
},
"budget_incidents_company_scope_idx": {
"name": "budget_incidents_company_scope_idx",
"columns": [
{
"expression": "company_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_type",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "status",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": false,
"concurrently": false,
"method": "btree",
"with": {}
},
"budget_incidents_policy_window_threshold_idx": {
"name": "budget_incidents_policy_window_threshold_idx",
"columns": [
{
"expression": "policy_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "window_start",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "threshold_type",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": true,
"concurrently": false,
"method": "btree",
"with": {}
}
},
"foreignKeys": {
"budget_incidents_company_id_companies_id_fk": {
"name": "budget_incidents_company_id_companies_id_fk",
"tableFrom": "budget_incidents",
"tableTo": "companies",
"columnsFrom": [
"company_id"
],
"columnsTo": [
"id"
],
"onDelete": "no action",
"onUpdate": "no action"
},
"budget_incidents_policy_id_budget_policies_id_fk": {
"name": "budget_incidents_policy_id_budget_policies_id_fk",
"tableFrom": "budget_incidents",
"tableTo": "budget_policies",
"columnsFrom": [
"policy_id"
],
"columnsTo": [
"id"
],
"onDelete": "no action",
"onUpdate": "no action"
},
"budget_incidents_approval_id_approvals_id_fk": {
"name": "budget_incidents_approval_id_approvals_id_fk",
"tableFrom": "budget_incidents",
"tableTo": "approvals",
"columnsFrom": [
"approval_id"
],
"columnsTo": [
"id"
],
"onDelete": "no action",
"onUpdate": "no action"
}
},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"policies": {},
"checkConstraints": {},
"isRLSEnabled": false
},
"public.budget_policies": {
"name": "budget_policies",
"schema": "",
"columns": {
"id": {
"name": "id",
"type": "uuid",
"primaryKey": true,
"notNull": true,
"default": "gen_random_uuid()"
},
"company_id": {
"name": "company_id",
"type": "uuid",
"primaryKey": false,
"notNull": true
},
"scope_type": {
"name": "scope_type",
"type": "text",
"primaryKey": false,
"notNull": true
},
"scope_id": {
"name": "scope_id",
"type": "uuid",
"primaryKey": false,
"notNull": true
},
"metric": {
"name": "metric",
"type": "text",
"primaryKey": false,
"notNull": true,
"default": "'billed_cents'"
},
"window_kind": {
"name": "window_kind",
"type": "text",
"primaryKey": false,
"notNull": true
},
"amount": {
"name": "amount",
"type": "integer",
"primaryKey": false,
"notNull": true,
"default": 0
},
"warn_percent": {
"name": "warn_percent",
"type": "integer",
"primaryKey": false,
"notNull": true,
"default": 80
},
"hard_stop_enabled": {
"name": "hard_stop_enabled",
"type": "boolean",
"primaryKey": false,
"notNull": true,
"default": true
},
"notify_enabled": {
"name": "notify_enabled",
"type": "boolean",
"primaryKey": false,
"notNull": true,
"default": true
},
"is_active": {
"name": "is_active",
"type": "boolean",
"primaryKey": false,
"notNull": true,
"default": true
},
"created_by_user_id": {
"name": "created_by_user_id",
"type": "text",
"primaryKey": false,
"notNull": false
},
"updated_by_user_id": {
"name": "updated_by_user_id",
"type": "text",
"primaryKey": false,
"notNull": false
},
"created_at": {
"name": "created_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true,
"default": "now()"
},
"updated_at": {
"name": "updated_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": true,
"default": "now()"
}
},
"indexes": {
"budget_policies_company_scope_active_idx": {
"name": "budget_policies_company_scope_active_idx",
"columns": [
{
"expression": "company_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_type",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "is_active",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": false,
"concurrently": false,
"method": "btree",
"with": {}
},
"budget_policies_company_window_idx": {
"name": "budget_policies_company_window_idx",
"columns": [
{
"expression": "company_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "window_kind",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "metric",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": false,
"concurrently": false,
"method": "btree",
"with": {}
},
"budget_policies_company_scope_metric_unique_idx": {
"name": "budget_policies_company_scope_metric_unique_idx",
"columns": [
{
"expression": "company_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_type",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "scope_id",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "metric",
"isExpression": false,
"asc": true,
"nulls": "last"
},
{
"expression": "window_kind",
"isExpression": false,
"asc": true,
"nulls": "last"
}
],
"isUnique": true,
"concurrently": false,
"method": "btree",
"with": {}
}
},
"foreignKeys": {
"budget_policies_company_id_companies_id_fk": {
"name": "budget_policies_company_id_companies_id_fk",
"tableFrom": "budget_policies",
"tableTo": "companies",
"columnsFrom": [
"company_id"
],
"columnsTo": [
"id"
],
"onDelete": "no action",
"onUpdate": "no action"
}
},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"policies": {},
"checkConstraints": {},
"isRLSEnabled": false
},
"public.companies": {
"name": "companies",
"schema": "",
@@ -6792,6 +7271,18 @@
"primaryKey": false,
"notNull": false
},
"pause_reason": {
"name": "pause_reason",
"type": "text",
"primaryKey": false,
"notNull": false
},
"paused_at": {
"name": "paused_at",
"type": "timestamp with time zone",
"primaryKey": false,
"notNull": false
},
"execution_workspace_policy": {
"name": "execution_workspace_policy",
"type": "jsonb",
@@ -7239,4 +7730,4 @@
"schemas": {},
"tables": {}
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -215,58 +215,16 @@
{
"idx": 30,
"version": "7",
"when": 1773670925214,
"tag": "0030_rich_magneto",
"when": 1773511922713,
"tag": "0030_zippy_magma",
"breakpoints": true
},
{
"idx": 31,
"version": "7",
"when": 1773511922713,
"tag": "0031_zippy_magma",
"breakpoints": true
},
{
"idx": 32,
"version": "7",
"when": 1773542934499,
"tag": "0032_pretty_doctor_octopus",
"breakpoints": true
},
{
"idx": 33,
"version": "7",
"when": 1773664961967,
"tag": "0033_shiny_black_tarantula",
"breakpoints": true
},
{
"idx": 34,
"version": "7",
"when": 1773697572188,
"tag": "0034_fat_dormammu",
"breakpoints": true
},
{
"idx": 35,
"version": "7",
"when": 1773698696169,
"tag": "0035_marvelous_satana",
"breakpoints": true
},
{
"idx": 36,
"version": "7",
"when": 1773756213455,
"tag": "0036_cheerful_nitro",
"breakpoints": true
},
{
"idx": 37,
"version": "7",
"when": 1773756922363,
"tag": "0037_friendly_eddie_brock",
"tag": "0031_pretty_doctor_octopus",
"breakpoints": true
}
]
}
}

View File

@@ -1,4 +1,3 @@
import { sql } from "drizzle-orm";
import { index, integer, pgTable, text, timestamp, uuid, uniqueIndex } from "drizzle-orm/pg-core";
import { approvals } from "./approvals.js";
import { budgetPolicies } from "./budget_policies.js";
@@ -37,6 +36,6 @@ export const budgetIncidents = pgTable(
table.policyId,
table.windowStart,
table.thresholdType,
).where(sql`${table.status} <> 'dismissed'`),
),
}),
);

View File

@@ -7,8 +7,6 @@ export const companies = pgTable(
name: text("name").notNull(),
description: text("description"),
status: text("status").notNull().default("active"),
pauseReason: text("pause_reason"),
pausedAt: timestamp("paused_at", { withTimezone: true }),
issuePrefix: text("issue_prefix").notNull().default("PAP"),
issueCounter: integer("issue_counter").notNull().default(0),
budgetMonthlyCents: integer("budget_monthly_cents").notNull().default(0),

View File

@@ -1,18 +0,0 @@
import { pgTable, uuid, timestamp, uniqueIndex } from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
import { assets } from "./assets.js";
export const companyLogos = pgTable(
"company_logos",
{
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id, { onDelete: "cascade" }),
assetId: uuid("asset_id").notNull().references(() => assets.id, { onDelete: "cascade" }),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
companyUq: uniqueIndex("company_logos_company_uq").on(table.companyId),
assetUq: uniqueIndex("company_logos_asset_uq").on(table.assetId),
}),
);

View File

@@ -1,68 +0,0 @@
import {
type AnyPgColumn,
index,
jsonb,
pgTable,
text,
timestamp,
uuid,
} from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
import { issues } from "./issues.js";
import { projectWorkspaces } from "./project_workspaces.js";
import { projects } from "./projects.js";
export const executionWorkspaces = pgTable(
"execution_workspaces",
{
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id),
projectId: uuid("project_id").notNull().references(() => projects.id, { onDelete: "cascade" }),
projectWorkspaceId: uuid("project_workspace_id").references(() => projectWorkspaces.id, { onDelete: "set null" }),
sourceIssueId: uuid("source_issue_id").references((): AnyPgColumn => issues.id, { onDelete: "set null" }),
mode: text("mode").notNull(),
strategyType: text("strategy_type").notNull(),
name: text("name").notNull(),
status: text("status").notNull().default("active"),
cwd: text("cwd"),
repoUrl: text("repo_url"),
baseRef: text("base_ref"),
branchName: text("branch_name"),
providerType: text("provider_type").notNull().default("local_fs"),
providerRef: text("provider_ref"),
derivedFromExecutionWorkspaceId: uuid("derived_from_execution_workspace_id")
.references((): AnyPgColumn => executionWorkspaces.id, { onDelete: "set null" }),
lastUsedAt: timestamp("last_used_at", { withTimezone: true }).notNull().defaultNow(),
openedAt: timestamp("opened_at", { withTimezone: true }).notNull().defaultNow(),
closedAt: timestamp("closed_at", { withTimezone: true }),
cleanupEligibleAt: timestamp("cleanup_eligible_at", { withTimezone: true }),
cleanupReason: text("cleanup_reason"),
metadata: jsonb("metadata").$type<Record<string, unknown>>(),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
companyProjectStatusIdx: index("execution_workspaces_company_project_status_idx").on(
table.companyId,
table.projectId,
table.status,
),
companyProjectWorkspaceStatusIdx: index("execution_workspaces_company_project_workspace_status_idx").on(
table.companyId,
table.projectWorkspaceId,
table.status,
),
companySourceIssueIdx: index("execution_workspaces_company_source_issue_idx").on(
table.companyId,
table.sourceIssueId,
),
companyLastUsedIdx: index("execution_workspaces_company_last_used_idx").on(
table.companyId,
table.lastUsedAt,
),
companyBranchIdx: index("execution_workspaces_company_branch_idx").on(
table.companyId,
table.branchName,
),
}),
);

View File

@@ -1,7 +1,5 @@
export { companies } from "./companies.js";
export { companyLogos } from "./company_logos.js";
export { authUsers, authSessions, authAccounts, authVerifications } from "./auth.js";
export { instanceSettings } from "./instance_settings.js";
export { instanceUserRoles } from "./instance_user_roles.js";
export { agents } from "./agents.js";
export { companyMemberships } from "./company_memberships.js";
@@ -17,13 +15,10 @@ export { agentTaskSessions } from "./agent_task_sessions.js";
export { agentWakeupRequests } from "./agent_wakeup_requests.js";
export { projects } from "./projects.js";
export { projectWorkspaces } from "./project_workspaces.js";
export { executionWorkspaces } from "./execution_workspaces.js";
export { workspaceOperations } from "./workspace_operations.js";
export { workspaceRuntimeServices } from "./workspace_runtime_services.js";
export { projectGoals } from "./project_goals.js";
export { goals } from "./goals.js";
export { issues } from "./issues.js";
export { issueWorkProducts } from "./issue_work_products.js";
export { labels } from "./labels.js";
export { issueLabels } from "./issue_labels.js";
export { issueApprovals } from "./issue_approvals.js";

View File

@@ -1,15 +0,0 @@
import { pgTable, uuid, text, timestamp, jsonb, uniqueIndex } from "drizzle-orm/pg-core";
export const instanceSettings = pgTable(
"instance_settings",
{
id: uuid("id").primaryKey().defaultRandom(),
singletonKey: text("singleton_key").notNull().default("default"),
experimental: jsonb("experimental").$type<Record<string, unknown>>().notNull().default({}),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
singletonKeyIdx: uniqueIndex("instance_settings_singleton_key_idx").on(table.singletonKey),
}),
);

View File

@@ -1,64 +0,0 @@
import {
boolean,
index,
jsonb,
pgTable,
text,
timestamp,
uuid,
} from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
import { executionWorkspaces } from "./execution_workspaces.js";
import { heartbeatRuns } from "./heartbeat_runs.js";
import { issues } from "./issues.js";
import { projects } from "./projects.js";
import { workspaceRuntimeServices } from "./workspace_runtime_services.js";
export const issueWorkProducts = pgTable(
"issue_work_products",
{
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id),
projectId: uuid("project_id").references(() => projects.id, { onDelete: "set null" }),
issueId: uuid("issue_id").notNull().references(() => issues.id, { onDelete: "cascade" }),
executionWorkspaceId: uuid("execution_workspace_id")
.references(() => executionWorkspaces.id, { onDelete: "set null" }),
runtimeServiceId: uuid("runtime_service_id")
.references(() => workspaceRuntimeServices.id, { onDelete: "set null" }),
type: text("type").notNull(),
provider: text("provider").notNull(),
externalId: text("external_id"),
title: text("title").notNull(),
url: text("url"),
status: text("status").notNull(),
reviewState: text("review_state").notNull().default("none"),
isPrimary: boolean("is_primary").notNull().default(false),
healthStatus: text("health_status").notNull().default("unknown"),
summary: text("summary"),
metadata: jsonb("metadata").$type<Record<string, unknown>>(),
createdByRunId: uuid("created_by_run_id").references(() => heartbeatRuns.id, { onDelete: "set null" }),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
companyIssueTypeIdx: index("issue_work_products_company_issue_type_idx").on(
table.companyId,
table.issueId,
table.type,
),
companyExecutionWorkspaceTypeIdx: index("issue_work_products_company_execution_workspace_type_idx").on(
table.companyId,
table.executionWorkspaceId,
table.type,
),
companyProviderExternalIdIdx: index("issue_work_products_company_provider_external_id_idx").on(
table.companyId,
table.provider,
table.externalId,
),
companyUpdatedIdx: index("issue_work_products_company_updated_idx").on(
table.companyId,
table.updatedAt,
),
}),
);

View File

@@ -14,8 +14,6 @@ import { projects } from "./projects.js";
import { goals } from "./goals.js";
import { companies } from "./companies.js";
import { heartbeatRuns } from "./heartbeat_runs.js";
import { projectWorkspaces } from "./project_workspaces.js";
import { executionWorkspaces } from "./execution_workspaces.js";
export const issues = pgTable(
"issues",
@@ -23,7 +21,6 @@ export const issues = pgTable(
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id),
projectId: uuid("project_id").references(() => projects.id),
projectWorkspaceId: uuid("project_workspace_id").references(() => projectWorkspaces.id, { onDelete: "set null" }),
goalId: uuid("goal_id").references(() => goals.id),
parentId: uuid("parent_id").references((): AnyPgColumn => issues.id),
title: text("title").notNull(),
@@ -43,9 +40,6 @@ export const issues = pgTable(
requestDepth: integer("request_depth").notNull().default(0),
billingCode: text("billing_code"),
assigneeAdapterOverrides: jsonb("assignee_adapter_overrides").$type<Record<string, unknown>>(),
executionWorkspaceId: uuid("execution_workspace_id")
.references((): AnyPgColumn => executionWorkspaces.id, { onDelete: "set null" }),
executionWorkspacePreference: text("execution_workspace_preference"),
executionWorkspaceSettings: jsonb("execution_workspace_settings").$type<Record<string, unknown>>(),
startedAt: timestamp("started_at", { withTimezone: true }),
completedAt: timestamp("completed_at", { withTimezone: true }),
@@ -68,8 +62,6 @@ export const issues = pgTable(
),
parentIdx: index("issues_company_parent_idx").on(table.companyId, table.parentId),
projectIdx: index("issues_company_project_idx").on(table.companyId, table.projectId),
projectWorkspaceIdx: index("issues_company_project_workspace_idx").on(table.companyId, table.projectWorkspaceId),
executionWorkspaceIdx: index("issues_company_execution_workspace_idx").on(table.companyId, table.executionWorkspaceId),
identifierIdx: uniqueIndex("issues_identifier_idx").on(table.identifier),
}),
);

View File

@@ -5,7 +5,6 @@ import {
pgTable,
text,
timestamp,
uniqueIndex,
uuid,
} from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
@@ -18,17 +17,9 @@ export const projectWorkspaces = pgTable(
companyId: uuid("company_id").notNull().references(() => companies.id),
projectId: uuid("project_id").notNull().references(() => projects.id, { onDelete: "cascade" }),
name: text("name").notNull(),
sourceType: text("source_type").notNull().default("local_path"),
cwd: text("cwd"),
repoUrl: text("repo_url"),
repoRef: text("repo_ref"),
defaultRef: text("default_ref"),
visibility: text("visibility").notNull().default("default"),
setupCommand: text("setup_command"),
cleanupCommand: text("cleanup_command"),
remoteProvider: text("remote_provider"),
remoteWorkspaceRef: text("remote_workspace_ref"),
sharedWorkspaceKey: text("shared_workspace_key"),
metadata: jsonb("metadata").$type<Record<string, unknown>>(),
isPrimary: boolean("is_primary").notNull().default(false),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
@@ -37,9 +28,5 @@ export const projectWorkspaces = pgTable(
(table) => ({
companyProjectIdx: index("project_workspaces_company_project_idx").on(table.companyId, table.projectId),
projectPrimaryIdx: index("project_workspaces_project_primary_idx").on(table.projectId, table.isPrimary),
projectSourceTypeIdx: index("project_workspaces_project_source_type_idx").on(table.projectId, table.sourceType),
companySharedKeyIdx: index("project_workspaces_company_shared_key_idx").on(table.companyId, table.sharedWorkspaceKey),
projectRemoteRefIdx: uniqueIndex("project_workspaces_project_remote_ref_idx")
.on(table.projectId, table.remoteProvider, table.remoteWorkspaceRef),
}),
);

View File

@@ -1,57 +0,0 @@
import {
bigint,
boolean,
index,
integer,
jsonb,
pgTable,
text,
timestamp,
uuid,
} from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
import { executionWorkspaces } from "./execution_workspaces.js";
import { heartbeatRuns } from "./heartbeat_runs.js";
export const workspaceOperations = pgTable(
"workspace_operations",
{
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id),
executionWorkspaceId: uuid("execution_workspace_id").references(() => executionWorkspaces.id, {
onDelete: "set null",
}),
heartbeatRunId: uuid("heartbeat_run_id").references(() => heartbeatRuns.id, {
onDelete: "set null",
}),
phase: text("phase").notNull(),
command: text("command"),
cwd: text("cwd"),
status: text("status").notNull().default("running"),
exitCode: integer("exit_code"),
logStore: text("log_store"),
logRef: text("log_ref"),
logBytes: bigint("log_bytes", { mode: "number" }),
logSha256: text("log_sha256"),
logCompressed: boolean("log_compressed").notNull().default(false),
stdoutExcerpt: text("stdout_excerpt"),
stderrExcerpt: text("stderr_excerpt"),
metadata: jsonb("metadata").$type<Record<string, unknown>>(),
startedAt: timestamp("started_at", { withTimezone: true }).notNull().defaultNow(),
finishedAt: timestamp("finished_at", { withTimezone: true }),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
companyRunStartedIdx: index("workspace_operations_company_run_started_idx").on(
table.companyId,
table.heartbeatRunId,
table.startedAt,
),
companyWorkspaceStartedIdx: index("workspace_operations_company_workspace_started_idx").on(
table.companyId,
table.executionWorkspaceId,
table.startedAt,
),
}),
);

View File

@@ -10,7 +10,6 @@ import {
import { companies } from "./companies.js";
import { projects } from "./projects.js";
import { projectWorkspaces } from "./project_workspaces.js";
import { executionWorkspaces } from "./execution_workspaces.js";
import { issues } from "./issues.js";
import { agents } from "./agents.js";
import { heartbeatRuns } from "./heartbeat_runs.js";
@@ -22,7 +21,6 @@ export const workspaceRuntimeServices = pgTable(
companyId: uuid("company_id").notNull().references(() => companies.id),
projectId: uuid("project_id").references(() => projects.id, { onDelete: "set null" }),
projectWorkspaceId: uuid("project_workspace_id").references(() => projectWorkspaces.id, { onDelete: "set null" }),
executionWorkspaceId: uuid("execution_workspace_id").references(() => executionWorkspaces.id, { onDelete: "set null" }),
issueId: uuid("issue_id").references(() => issues.id, { onDelete: "set null" }),
scopeType: text("scope_type").notNull(),
scopeId: text("scope_id"),
@@ -52,11 +50,6 @@ export const workspaceRuntimeServices = pgTable(
table.projectWorkspaceId,
table.status,
),
companyExecutionWorkspaceStatusIdx: index("workspace_runtime_services_company_execution_workspace_status_idx").on(
table.companyId,
table.executionWorkspaceId,
table.status,
),
companyProjectStatusIdx: index("workspace_runtime_services_company_project_status_idx").on(
table.companyId,
table.projectId,

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/create-paperclip-plugin",
"version": "0.1.0",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/plugins/create-paperclip-plugin"
},
"type": "module",
"bin": {
"create-paperclip-plugin": "./dist/index.js"

View File

@@ -2,16 +2,6 @@
"name": "@paperclipai/plugin-sdk",
"version": "1.0.0",
"description": "Stable public API for Paperclip plugins — worker-side context and UI bridge hooks",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/plugins/sdk"
},
"type": "module",
"exports": {
".": {

View File

@@ -103,10 +103,9 @@ export interface HostServices {
list(params: WorkerToHostMethods["entities.list"][0]): Promise<WorkerToHostMethods["entities.list"][1]>;
};
/** Provides `events.emit` and `events.subscribe`. */
/** Provides `events.emit`. */
events: {
emit(params: WorkerToHostMethods["events.emit"][0]): Promise<void>;
subscribe(params: WorkerToHostMethods["events.subscribe"][0]): Promise<void>;
};
/** Provides `http.fetch`. */
@@ -165,14 +164,6 @@ export interface HostServices {
createComment(params: WorkerToHostMethods["issues.createComment"][0]): Promise<WorkerToHostMethods["issues.createComment"][1]>;
};
/** Provides `issues.documents.list`, `issues.documents.get`, `issues.documents.upsert`, `issues.documents.delete`. */
issueDocuments: {
list(params: WorkerToHostMethods["issues.documents.list"][0]): Promise<WorkerToHostMethods["issues.documents.list"][1]>;
get(params: WorkerToHostMethods["issues.documents.get"][0]): Promise<WorkerToHostMethods["issues.documents.get"][1]>;
upsert(params: WorkerToHostMethods["issues.documents.upsert"][0]): Promise<WorkerToHostMethods["issues.documents.upsert"][1]>;
delete(params: WorkerToHostMethods["issues.documents.delete"][0]): Promise<WorkerToHostMethods["issues.documents.delete"][1]>;
};
/** Provides `agents.list`, `agents.get`, `agents.pause`, `agents.resume`, `agents.invoke`. */
agents: {
list(params: WorkerToHostMethods["agents.list"][0]): Promise<WorkerToHostMethods["agents.list"][1]>;
@@ -270,7 +261,6 @@ const METHOD_CAPABILITY_MAP: Record<WorkerToHostMethodName, PluginCapability | n
// Events
"events.emit": "events.emit",
"events.subscribe": "events.subscribe",
// HTTP
"http.fetch": "http.outbound",
@@ -306,12 +296,6 @@ const METHOD_CAPABILITY_MAP: Record<WorkerToHostMethodName, PluginCapability | n
"issues.listComments": "issue.comments.read",
"issues.createComment": "issue.comments.create",
// Issue Documents
"issues.documents.list": "issue.documents.read",
"issues.documents.get": "issue.documents.read",
"issues.documents.upsert": "issue.documents.write",
"issues.documents.delete": "issue.documents.write",
// Agents
"agents.list": "agents.read",
"agents.get": "agents.read",
@@ -423,9 +407,6 @@ export function createHostClientHandlers(
"events.emit": gated("events.emit", async (params) => {
return services.events.emit(params);
}),
"events.subscribe": gated("events.subscribe", async (params) => {
return services.events.subscribe(params);
}),
// HTTP
"http.fetch": gated("http.fetch", async (params) => {
@@ -497,20 +478,6 @@ export function createHostClientHandlers(
return services.issues.createComment(params);
}),
// Issue Documents
"issues.documents.list": gated("issues.documents.list", async (params) => {
return services.issueDocuments.list(params);
}),
"issues.documents.get": gated("issues.documents.get", async (params) => {
return services.issueDocuments.get(params);
}),
"issues.documents.upsert": gated("issues.documents.upsert", async (params) => {
return services.issueDocuments.upsert(params);
}),
"issues.documents.delete": gated("issues.documents.delete", async (params) => {
return services.issueDocuments.delete(params);
}),
// Agents
"agents.list": gated("agents.list", async (params) => {
return services.agents.list(params);

View File

@@ -25,8 +25,6 @@ import type {
Project,
Issue,
IssueComment,
IssueDocument,
IssueDocumentSummary,
Agent,
Goal,
} from "@paperclipai/shared";
@@ -484,10 +482,6 @@ export interface WorkerToHostMethods {
params: { name: string; companyId: string; payload: unknown },
result: void,
];
"events.subscribe": [
params: { eventPattern: string; filter?: Record<string, unknown> | null },
result: void,
];
// HTTP
"http.fetch": [
@@ -603,32 +597,6 @@ export interface WorkerToHostMethods {
result: IssueComment,
];
// Issue Documents
"issues.documents.list": [
params: { issueId: string; companyId: string },
result: IssueDocumentSummary[],
];
"issues.documents.get": [
params: { issueId: string; key: string; companyId: string },
result: IssueDocument | null,
];
"issues.documents.upsert": [
params: {
issueId: string;
key: string;
body: string;
companyId: string;
title?: string;
format?: string;
changeSummary?: string;
},
result: IssueDocument,
];
"issues.documents.delete": [
params: { issueId: string; key: string; companyId: string },
result: void,
];
// Agents (read)
"agents.list": [
params: { companyId: string; status?: string; limit?: number; offset?: number },

View File

@@ -353,7 +353,6 @@ export function createTestHarness(options: TestHarnessOptions): TestHarness {
id: randomUUID(),
companyId: input.companyId,
projectId: input.projectId ?? null,
projectWorkspaceId: null,
goalId: input.goalId ?? null,
parentId: input.parentId ?? null,
title: input.title,
@@ -373,8 +372,6 @@ export function createTestHarness(options: TestHarnessOptions): TestHarness {
requestDepth: 0,
billingCode: null,
assigneeAdapterOverrides: null,
executionWorkspaceId: null,
executionWorkspacePreference: null,
executionWorkspaceSettings: null,
startedAt: null,
completedAt: null,
@@ -425,33 +422,6 @@ export function createTestHarness(options: TestHarnessOptions): TestHarness {
issueComments.set(issueId, current);
return comment;
},
documents: {
async list(issueId, companyId) {
requireCapability(manifest, capabilitySet, "issue.documents.read");
if (!isInCompany(issues.get(issueId), companyId)) return [];
return [];
},
async get(issueId, _key, companyId) {
requireCapability(manifest, capabilitySet, "issue.documents.read");
if (!isInCompany(issues.get(issueId), companyId)) return null;
return null;
},
async upsert(input) {
requireCapability(manifest, capabilitySet, "issue.documents.write");
const parentIssue = issues.get(input.issueId);
if (!isInCompany(parentIssue, input.companyId)) {
throw new Error(`Issue not found: ${input.issueId}`);
}
throw new Error("documents.upsert is not implemented in test context");
},
async delete(issueId, _key, companyId) {
requireCapability(manifest, capabilitySet, "issue.documents.write");
const parentIssue = issues.get(issueId);
if (!isInCompany(parentIssue, companyId)) {
throw new Error(`Issue not found: ${issueId}`);
}
},
},
},
agents: {
async list(input) {

View File

@@ -19,8 +19,6 @@ import type {
Project,
Issue,
IssueComment,
IssueDocument,
IssueDocumentSummary,
Agent,
Goal,
} from "@paperclipai/shared";
@@ -63,8 +61,6 @@ export type {
Project,
Issue,
IssueComment,
IssueDocument,
IssueDocumentSummary,
Agent,
Goal,
} from "@paperclipai/shared";
@@ -778,73 +774,6 @@ export interface PluginCompaniesClient {
get(companyId: string): Promise<Company | null>;
}
/**
* `ctx.issues.documents` — read and write issue documents.
*
* Requires:
* - `issue.documents.read` for `list` and `get`
* - `issue.documents.write` for `upsert` and `delete`
*
* @see PLUGIN_SPEC.md §14 — SDK Surface
*/
export interface PluginIssueDocumentsClient {
/**
* List all documents attached to an issue.
*
* Returns summary metadata (id, key, title, format, timestamps) without
* the full document body. Use `get()` to fetch a specific document's body.
*
* Requires the `issue.documents.read` capability.
*/
list(issueId: string, companyId: string): Promise<IssueDocumentSummary[]>;
/**
* Get a single document by key, including its full body content.
*
* Returns `null` if no document exists with the given key.
*
* Requires the `issue.documents.read` capability.
*
* @param issueId - UUID of the issue
* @param key - Document key (e.g. `"plan"`, `"design-spec"`)
* @param companyId - UUID of the company
*/
get(issueId: string, key: string, companyId: string): Promise<IssueDocument | null>;
/**
* Create or update a document on an issue.
*
* If a document with the given key already exists, it is updated and a new
* revision is created. If it does not exist, it is created.
*
* Requires the `issue.documents.write` capability.
*
* @param input - Document data including issueId, key, body, and optional title/format/changeSummary
*/
upsert(input: {
issueId: string;
key: string;
body: string;
companyId: string;
title?: string;
format?: string;
changeSummary?: string;
}): Promise<IssueDocument>;
/**
* Delete a document and all its revisions.
*
* No-ops silently if the document does not exist (idempotent).
*
* Requires the `issue.documents.write` capability.
*
* @param issueId - UUID of the issue
* @param key - Document key to delete
* @param companyId - UUID of the company
*/
delete(issueId: string, key: string, companyId: string): Promise<void>;
}
/**
* `ctx.issues` — read and mutate issues plus comments.
*
@@ -854,8 +783,6 @@ export interface PluginIssueDocumentsClient {
* - `issues.update` for update
* - `issue.comments.read` for `listComments`
* - `issue.comments.create` for `createComment`
* - `issue.documents.read` for `documents.list` and `documents.get`
* - `issue.documents.write` for `documents.upsert` and `documents.delete`
*/
export interface PluginIssuesClient {
list(input: {
@@ -887,8 +814,6 @@ export interface PluginIssuesClient {
): Promise<Issue>;
listComments(issueId: string, companyId: string): Promise<IssueComment[]>;
createComment(issueId: string, body: string, companyId: string): Promise<IssueComment>;
/** Read and write issue documents. Requires `issue.documents.read` / `issue.documents.write`. */
documents: PluginIssueDocumentsClient;
}
/**
@@ -1131,7 +1056,7 @@ export interface PluginContext {
/** Read company metadata. Requires `companies.read`. */
companies: PluginCompaniesClient;
/** Read and write issues, comments, and documents. Requires issue capabilities. */
/** Read and write issues/comments. Requires issue capabilities. */
issues: PluginIssuesClient;
/** Read and manage agents. Requires `agents.read` for reads; `agents.pause` / `agents.resume` / `agents.invoke` for write ops. */

View File

@@ -19,7 +19,8 @@
* |--- request(initialize) -------------> | → calls plugin.setup(ctx)
* |<-- response(ok:true) ---------------- |
* | |
* |--- notification(onEvent) -----------> | → dispatches to registered handler
* |--- request(onEvent) ----------------> | → dispatches to registered handler
* |<-- response(void) ------------------ |
* | |
* |<-- request(state.get) --------------- | ← SDK client call from plugin code
* |--- response(result) ----------------> |
@@ -386,13 +387,6 @@ export function startWorkerRpcHost(options: WorkerRpcHostOptions): WorkerRpcHost
registration = { name, filter: filterOrFn, fn: maybeFn };
}
eventHandlers.push(registration);
// Register subscription on the host so events are forwarded to this worker
void callHost("events.subscribe", { eventPattern: name, filter: registration.filter ?? null }).catch((err) => {
notifyHost("log", {
level: "warn",
message: `Failed to subscribe to event "${name}" on host: ${err instanceof Error ? err.message : String(err)}`,
});
});
return () => {
const idx = eventHandlers.indexOf(registration);
if (idx !== -1) eventHandlers.splice(idx, 1);
@@ -612,32 +606,6 @@ export function startWorkerRpcHost(options: WorkerRpcHostOptions): WorkerRpcHost
async createComment(issueId: string, body: string, companyId: string) {
return callHost("issues.createComment", { issueId, body, companyId });
},
documents: {
async list(issueId: string, companyId: string) {
return callHost("issues.documents.list", { issueId, companyId });
},
async get(issueId: string, key: string, companyId: string) {
return callHost("issues.documents.get", { issueId, key, companyId });
},
async upsert(input) {
return callHost("issues.documents.upsert", {
issueId: input.issueId,
key: input.key,
body: input.body,
companyId: input.companyId,
title: input.title,
format: input.format,
changeSummary: input.changeSummary,
});
},
async delete(issueId: string, key: string, companyId: string) {
return callHost("issues.documents.delete", { issueId, key, companyId });
},
},
},
agents: {
@@ -1139,14 +1107,6 @@ export function startWorkerRpcHost(options: WorkerRpcHostOptions): WorkerRpcHost
const event = notif.params as AgentSessionEvent;
const cb = sessionEventCallbacks.get(event.sessionId);
if (cb) cb(event);
} else if (notif.method === "onEvent" && notif.params) {
// Plugin event bus notifications — dispatch to registered event handlers
handleOnEvent(notif.params as OnEventParams).catch((err) => {
notifyHost("log", {
level: "error",
message: `Failed to handle event notification: ${err instanceof Error ? err.message : String(err)}`,
});
});
}
}
}

View File

@@ -1,16 +1,6 @@
{
"name": "@paperclipai/shared",
"version": "0.3.1",
"license": "MIT",
"homepage": "https://github.com/paperclipai/paperclip",
"bugs": {
"url": "https://github.com/paperclipai/paperclip/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/paperclipai/paperclip",
"directory": "packages/shared"
},
"type": "module",
"exports": {
".": "./src/index.ts",

View File

@@ -30,7 +30,6 @@ export const AGENT_ADAPTER_TYPES = [
"pi_local",
"cursor",
"openclaw_gateway",
"hermes_local",
] as const;
export type AgentAdapterType = (typeof AGENT_ADAPTER_TYPES)[number];
@@ -385,7 +384,6 @@ export const PLUGIN_CAPABILITIES = [
"project.workspaces.read",
"issues.read",
"issue.comments.read",
"issue.documents.read",
"agents.read",
"goals.read",
"goals.create",
@@ -396,7 +394,6 @@ export const PLUGIN_CAPABILITIES = [
"issues.create",
"issues.update",
"issue.comments.create",
"issue.documents.write",
"agents.pause",
"agents.resume",
"agents.invoke",

View File

@@ -120,8 +120,6 @@ export {
export type {
Company,
InstanceExperimentalSettings,
InstanceSettings,
Agent,
AgentPermissions,
AgentKeyCreated,
@@ -132,28 +130,14 @@ export type {
AdapterEnvironmentTestResult,
AssetImage,
Project,
ProjectCodebase,
ProjectCodebaseOrigin,
ProjectGoalRef,
ProjectWorkspace,
ExecutionWorkspace,
WorkspaceRuntimeService,
WorkspaceOperation,
WorkspaceOperationPhase,
WorkspaceOperationStatus,
ExecutionWorkspaceStrategyType,
ExecutionWorkspaceMode,
ExecutionWorkspaceProviderType,
ExecutionWorkspaceStatus,
ExecutionWorkspaceStrategy,
ProjectExecutionWorkspacePolicy,
ProjectExecutionWorkspaceDefaultMode,
IssueExecutionWorkspaceSettings,
IssueWorkProduct,
IssueWorkProductType,
IssueWorkProductProvider,
IssueWorkProductStatus,
IssueWorkProductReviewState,
Issue,
IssueAssigneeAdapterOverrides,
IssueComment,
@@ -244,12 +228,6 @@ export type {
ProviderQuotaResult,
} from "./types/index.js";
export {
instanceExperimentalSettingsSchema,
patchInstanceExperimentalSettingsSchema,
type PatchInstanceExperimentalSettings,
} from "./validators/index.js";
export {
createCompanySchema,
updateCompanySchema,
@@ -291,13 +269,6 @@ export {
addIssueCommentSchema,
linkIssueApprovalSchema,
createIssueAttachmentMetadataSchema,
createIssueWorkProductSchema,
updateIssueWorkProductSchema,
issueWorkProductTypeSchema,
issueWorkProductStatusSchema,
issueWorkProductReviewStateSchema,
updateExecutionWorkspaceSchema,
executionWorkspaceStatusSchema,
issueDocumentFormatSchema,
issueDocumentKeySchema,
upsertIssueDocumentSchema,
@@ -308,9 +279,6 @@ export {
type AddIssueComment,
type LinkIssueApproval,
type CreateIssueAttachmentMetadata,
type CreateIssueWorkProduct,
type UpdateIssueWorkProduct,
type UpdateExecutionWorkspace,
type IssueDocumentFormat,
type UpsertIssueDocument,
createGoalSchema,

View File

@@ -1,20 +1,16 @@
import type { CompanyStatus, PauseReason } from "../constants.js";
import type { CompanyStatus } from "../constants.js";
export interface Company {
id: string;
name: string;
description: string | null;
status: CompanyStatus;
pauseReason: PauseReason | null;
pausedAt: Date | null;
issuePrefix: string;
issueCounter: number;
budgetMonthlyCents: number;
spentMonthlyCents: number;
requireBoardApprovalForNewAgents: boolean;
brandColor: string | null;
logoAssetId: string | null;
logoUrl: string | null;
createdAt: Date;
updatedAt: Date;
}

View File

@@ -1,5 +1,4 @@
export type { Company } from "./company.js";
export type { InstanceExperimentalSettings, InstanceSettings } from "./instance.js";
export type {
Agent,
AgentPermissions,
@@ -11,31 +10,15 @@ export type {
AdapterEnvironmentTestResult,
} from "./agent.js";
export type { AssetImage } from "./asset.js";
export type { Project, ProjectCodebase, ProjectCodebaseOrigin, ProjectGoalRef, ProjectWorkspace } from "./project.js";
export type { Project, ProjectGoalRef, ProjectWorkspace } from "./project.js";
export type {
ExecutionWorkspace,
WorkspaceRuntimeService,
ExecutionWorkspaceStrategyType,
ExecutionWorkspaceMode,
ExecutionWorkspaceProviderType,
ExecutionWorkspaceStatus,
ExecutionWorkspaceStrategy,
ProjectExecutionWorkspacePolicy,
ProjectExecutionWorkspaceDefaultMode,
IssueExecutionWorkspaceSettings,
} from "./workspace-runtime.js";
export type {
WorkspaceOperation,
WorkspaceOperationPhase,
WorkspaceOperationStatus,
} from "./workspace-operation.js";
export type {
IssueWorkProduct,
IssueWorkProductType,
IssueWorkProductProvider,
IssueWorkProductStatus,
IssueWorkProductReviewState,
} from "./work-product.js";
export type {
Issue,
IssueAssigneeAdapterOverrides,

View File

@@ -1,10 +0,0 @@
export interface InstanceExperimentalSettings {
enableIsolatedWorkspaces: boolean;
}
export interface InstanceSettings {
id: string;
experimental: InstanceExperimentalSettings;
createdAt: Date;
updatedAt: Date;
}

View File

@@ -1,8 +1,7 @@
import type { IssuePriority, IssueStatus } from "../constants.js";
import type { Goal } from "./goal.js";
import type { Project, ProjectWorkspace } from "./project.js";
import type { ExecutionWorkspace, IssueExecutionWorkspaceSettings } from "./workspace-runtime.js";
import type { IssueWorkProduct } from "./work-product.js";
import type { IssueExecutionWorkspaceSettings } from "./workspace-runtime.js";
export interface IssueAncestorProject {
id: string;
@@ -98,7 +97,6 @@ export interface Issue {
id: string;
companyId: string;
projectId: string | null;
projectWorkspaceId: string | null;
goalId: string | null;
parentId: string | null;
ancestors?: IssueAncestor[];
@@ -119,8 +117,6 @@ export interface Issue {
requestDepth: number;
billingCode: string | null;
assigneeAdapterOverrides: IssueAssigneeAdapterOverrides | null;
executionWorkspaceId: string | null;
executionWorkspacePreference: string | null;
executionWorkspaceSettings: IssueExecutionWorkspaceSettings | null;
startedAt: Date | null;
completedAt: Date | null;
@@ -133,8 +129,6 @@ export interface Issue {
legacyPlanDocument?: LegacyPlanDocument | null;
project?: Project | null;
goal?: Goal | null;
currentExecutionWorkspace?: ExecutionWorkspace | null;
workProducts?: IssueWorkProduct[];
mentionedProjects?: Project[];
myLastTouchAt?: Date | null;
lastExternalCommentAt?: Date | null;

View File

@@ -1,9 +1,6 @@
import type { PauseReason, ProjectStatus } from "../constants.js";
import type { ProjectExecutionWorkspacePolicy, WorkspaceRuntimeService } from "./workspace-runtime.js";
export type ProjectWorkspaceSourceType = "local_path" | "git_repo" | "remote_managed" | "non_git_path";
export type ProjectWorkspaceVisibility = "default" | "advanced";
export interface ProjectGoalRef {
id: string;
title: string;
@@ -14,17 +11,9 @@ export interface ProjectWorkspace {
companyId: string;
projectId: string;
name: string;
sourceType: ProjectWorkspaceSourceType;
cwd: string | null;
repoUrl: string | null;
repoRef: string | null;
defaultRef: string | null;
visibility: ProjectWorkspaceVisibility;
setupCommand: string | null;
cleanupCommand: string | null;
remoteProvider: string | null;
remoteWorkspaceRef: string | null;
sharedWorkspaceKey: string | null;
metadata: Record<string, unknown> | null;
isPrimary: boolean;
runtimeServices?: WorkspaceRuntimeService[];
@@ -32,20 +21,6 @@ export interface ProjectWorkspace {
updatedAt: Date;
}
export type ProjectCodebaseOrigin = "local_folder" | "managed_checkout";
export interface ProjectCodebase {
workspaceId: string | null;
repoUrl: string | null;
repoRef: string | null;
defaultRef: string | null;
repoName: string | null;
localFolder: string | null;
managedFolder: string;
effectiveLocalFolder: string;
origin: ProjectCodebaseOrigin;
}
export interface Project {
id: string;
companyId: string;
@@ -63,7 +38,6 @@ export interface Project {
pauseReason: PauseReason | null;
pausedAt: Date | null;
executionWorkspacePolicy: ProjectExecutionWorkspacePolicy | null;
codebase: ProjectCodebase;
workspaces: ProjectWorkspace[];
primaryWorkspace: ProjectWorkspace | null;
archivedAt: Date | null;

View File

@@ -1,55 +0,0 @@
export type IssueWorkProductType =
| "preview_url"
| "runtime_service"
| "pull_request"
| "branch"
| "commit"
| "artifact"
| "document";
export type IssueWorkProductProvider =
| "paperclip"
| "github"
| "vercel"
| "s3"
| "custom";
export type IssueWorkProductStatus =
| "active"
| "ready_for_review"
| "approved"
| "changes_requested"
| "merged"
| "closed"
| "failed"
| "archived"
| "draft";
export type IssueWorkProductReviewState =
| "none"
| "needs_board_review"
| "approved"
| "changes_requested";
export interface IssueWorkProduct {
id: string;
companyId: string;
projectId: string | null;
issueId: string;
executionWorkspaceId: string | null;
runtimeServiceId: string | null;
type: IssueWorkProductType;
provider: IssueWorkProductProvider | string;
externalId: string | null;
title: string;
url: string | null;
status: IssueWorkProductStatus | string;
reviewState: IssueWorkProductReviewState;
isPrimary: boolean;
healthStatus: "unknown" | "healthy" | "unhealthy";
summary: string | null;
metadata: Record<string, unknown> | null;
createdByRunId: string | null;
createdAt: Date;
updatedAt: Date;
}

View File

@@ -1,31 +0,0 @@
export type WorkspaceOperationPhase =
| "worktree_prepare"
| "workspace_provision"
| "workspace_teardown"
| "worktree_cleanup";
export type WorkspaceOperationStatus = "running" | "succeeded" | "failed" | "skipped";
export interface WorkspaceOperation {
id: string;
companyId: string;
executionWorkspaceId: string | null;
heartbeatRunId: string | null;
phase: WorkspaceOperationPhase;
command: string | null;
cwd: string | null;
status: WorkspaceOperationStatus;
exitCode: number | null;
logStore: string | null;
logRef: string | null;
logBytes: number | null;
logSha256: string | null;
logCompressed: boolean;
stdoutExcerpt: string | null;
stderrExcerpt: string | null;
metadata: Record<string, unknown> | null;
startedAt: Date;
finishedAt: Date | null;
createdAt: Date;
updatedAt: Date;
}

View File

@@ -1,35 +1,6 @@
export type ExecutionWorkspaceStrategyType =
| "project_primary"
| "git_worktree"
| "adapter_managed"
| "cloud_sandbox";
export type ExecutionWorkspaceStrategyType = "project_primary" | "git_worktree";
export type ProjectExecutionWorkspaceDefaultMode =
| "shared_workspace"
| "isolated_workspace"
| "operator_branch"
| "adapter_default";
export type ExecutionWorkspaceMode =
| "inherit"
| "shared_workspace"
| "isolated_workspace"
| "operator_branch"
| "reuse_existing"
| "agent_default";
export type ExecutionWorkspaceProviderType =
| "local_fs"
| "git_worktree"
| "adapter_managed"
| "cloud_sandbox";
export type ExecutionWorkspaceStatus =
| "active"
| "idle"
| "in_review"
| "archived"
| "cleanup_failed";
export type ExecutionWorkspaceMode = "inherit" | "project_primary" | "isolated" | "agent_default";
export interface ExecutionWorkspaceStrategy {
type: ExecutionWorkspaceStrategyType;
@@ -42,14 +13,12 @@ export interface ExecutionWorkspaceStrategy {
export interface ProjectExecutionWorkspacePolicy {
enabled: boolean;
defaultMode?: ProjectExecutionWorkspaceDefaultMode;
defaultMode?: "project_primary" | "isolated";
allowIssueOverride?: boolean;
defaultProjectWorkspaceId?: string | null;
workspaceStrategy?: ExecutionWorkspaceStrategy | null;
workspaceRuntime?: Record<string, unknown> | null;
branchPolicy?: Record<string, unknown> | null;
pullRequestPolicy?: Record<string, unknown> | null;
runtimePolicy?: Record<string, unknown> | null;
cleanupPolicy?: Record<string, unknown> | null;
}
@@ -59,39 +28,11 @@ export interface IssueExecutionWorkspaceSettings {
workspaceRuntime?: Record<string, unknown> | null;
}
export interface ExecutionWorkspace {
id: string;
companyId: string;
projectId: string;
projectWorkspaceId: string | null;
sourceIssueId: string | null;
mode: Exclude<ExecutionWorkspaceMode, "inherit" | "reuse_existing" | "agent_default"> | "adapter_managed" | "cloud_sandbox";
strategyType: ExecutionWorkspaceStrategyType;
name: string;
status: ExecutionWorkspaceStatus;
cwd: string | null;
repoUrl: string | null;
baseRef: string | null;
branchName: string | null;
providerType: ExecutionWorkspaceProviderType;
providerRef: string | null;
derivedFromExecutionWorkspaceId: string | null;
lastUsedAt: Date;
openedAt: Date;
closedAt: Date | null;
cleanupEligibleAt: Date | null;
cleanupReason: string | null;
metadata: Record<string, unknown> | null;
createdAt: Date;
updatedAt: Date;
}
export interface WorkspaceRuntimeService {
id: string;
companyId: string;
projectId: string | null;
projectWorkspaceId: string | null;
executionWorkspaceId: string | null;
issueId: string | null;
scopeType: "project_workspace" | "execution_workspace" | "run" | "agent";
scopeId: string | null;

Some files were not shown because too many files have changed in this diff Show More