[codex] Harden heartbeat scheduling and runtime controls (#4223)

## Thinking Path

> - Paperclip orchestrates AI agents through issue checkout, heartbeat
runs, routines, and auditable control-plane state
> - The runtime path has to recover from lost local processes, transient
adapter failures, blocked dependencies, and routine coalescing without
stranding work
> - The existing branch carried several reliability fixes across
heartbeat scheduling, issue runtime controls, routine dispatch, and
operator-facing run state
> - These changes belong together because they share backend contracts,
migrations, and runtime status semantics
> - This pull request groups the control-plane/runtime slice so it can
merge independently from board UI polish and adapter sandbox work
> - The benefit is safer heartbeat recovery, clearer runtime controls,
and more predictable recurring execution behavior

## What Changed

- Adds bounded heartbeat retry scheduling, scheduled retry state, and
Codex transient failure recovery handling.
- Tightens heartbeat process recovery, blocker wake behavior, issue
comment wake handling, routine dispatch coalescing, and
activity/dashboard bounds.
- Adds runtime-control MCP tools and Paperclip skill docs for issue
workspace runtime management.
- Adds migrations `0061_lively_thor_girl.sql` and
`0062_routine_run_dispatch_fingerprint.sql`.
- Surfaces retry state in run ledger/agent UI and keeps related shared
types synchronized.

## Verification

- `pnpm exec vitest run
server/src/__tests__/heartbeat-retry-scheduling.test.ts
server/src/__tests__/heartbeat-process-recovery.test.ts
server/src/__tests__/routines-service.test.ts`
- `pnpm exec vitest run src/tools.test.ts` from `packages/mcp-server`

## Risks

- Medium risk: this touches heartbeat recovery and routine dispatch,
which are central execution paths.
- Migration order matters if split branches land out of order: merge
this PR before branches that assume the new runtime/routine fields.
- Runtime retry behavior should be watched in CI and in local operator
smoke tests because it changes how transient failures are resumed.

> For core feature work, check [`ROADMAP.md`](ROADMAP.md) first and
discuss it in `#dev` before opening the PR. Feature PRs that overlap
with planned core work may need to be redirected — check the roadmap
first. See `CONTRIBUTING.md`.

## Model Used

- OpenAI Codex, GPT-5-based coding agent runtime, shell/git tool use
enabled. Exact hosted model build and context window are not exposed in
this Paperclip heartbeat environment.

## Checklist

- [x] I have included a thinking path that traces from project context
to this change
- [x] I have specified the model used (with version and capability
details)
- [x] I have checked ROADMAP.md and confirmed this PR does not duplicate
planned core work
- [x] I have run tests locally and they pass
- [x] I have added or updated tests where applicable
- [ ] If this change affects the UI, I have included before/after
screenshots
- [x] I have updated relevant documentation to reflect my changes
- [x] I have considered and documented any risks above
- [x] I will address all Greptile and reviewer comments before
requesting merge
This commit is contained in:
Dotta
2026-04-21 12:24:11 -05:00
committed by GitHub
parent ab9051b595
commit 09d0678840
61 changed files with 17622 additions and 456 deletions

View File

@@ -146,6 +146,8 @@ Use it for:
- explicit waiting relationships
- automatic wakeups when all blockers resolve
Blocked issues should stay idle while blockers remain unresolved. Paperclip should not create a queued heartbeat run for that issue until the final blocker is done and the `issue_blockers_resolved` wake can start real work.
If a parent is truly waiting on a child, model that with blockers. Do not rely on the parent/child relationship alone.
## 7. Consistent Execution Path Rules

View File

@@ -221,16 +221,6 @@ describe("runChildProcess", () => {
});
});
describe("appendWithByteCap", () => {
it("keeps valid UTF-8 when trimming through multibyte text", () => {
const output = appendWithByteCap("prefix ", "hello — world", 7);
expect(output).not.toContain("\uFFFD");
expect(Buffer.from(output, "utf8").toString("utf8")).toBe(output);
expect(Buffer.byteLength(output, "utf8")).toBeLessThanOrEqual(7);
});
});
describe("renderPaperclipWakePrompt", () => {
it("keeps the default local-agent prompt action-oriented", () => {
expect(DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE).toContain("Start actionable work in this heartbeat");
@@ -266,6 +256,42 @@ describe("renderPaperclipWakePrompt", () => {
expect(prompt).toContain("mark blocked work with the unblock owner/action");
});
it("renders dependency-blocked interaction guidance", () => {
const prompt = renderPaperclipWakePrompt({
reason: "issue_commented",
issue: {
id: "issue-1",
identifier: "PAP-1703",
title: "Blocked parent",
status: "todo",
},
dependencyBlockedInteraction: true,
unresolvedBlockerIssueIds: ["blocker-1"],
unresolvedBlockerSummaries: [
{
id: "blocker-1",
identifier: "PAP-1723",
title: "Finish blocker",
status: "todo",
priority: "medium",
},
],
commentWindow: {
requestedCount: 1,
includedCount: 1,
missingCount: 0,
},
commentIds: ["comment-1"],
latestCommentId: "comment-1",
comments: [{ id: "comment-1", body: "hello" }],
fallbackFetchNeeded: false,
});
expect(prompt).toContain("dependency-blocked interaction: yes");
expect(prompt).toContain("respond or triage the human comment");
expect(prompt).toContain("PAP-1723 Finish blocker (todo)");
});
it("includes continuation and child issue summaries in structured wake context", () => {
const payload = {
reason: "issue_children_completed",
@@ -335,3 +361,13 @@ describe("renderPaperclipWakePrompt", () => {
expect(prompt).toContain("Added the helper route and tests.");
});
});
describe("appendWithByteCap", () => {
it("keeps valid UTF-8 when trimming through multibyte text", () => {
const output = appendWithByteCap("prefix ", "hello — world", 7);
expect(output).not.toContain("\uFFFD");
expect(Buffer.from(output, "utf8").toString("utf8")).toBe(output);
expect(Buffer.byteLength(output, "utf8")).toBeLessThanOrEqual(7);
});
});

View File

@@ -83,6 +83,7 @@ export const DEFAULT_PAPERCLIP_AGENT_PROMPT_TEMPLATE = [
"- Start actionable work in this heartbeat; do not stop at a plan unless the issue asks for planning.",
"- Leave durable progress in comments, documents, or work products with a clear next action.",
"- Use child issues for parallel or long delegated work instead of polling agents, sessions, or processes.",
"- If woken by a human comment on a dependency-blocked issue, respond or triage the comment without treating the blocked deliverable work as unblocked.",
"- If blocked, mark the issue blocked and name the unblock owner and action.",
"- Respect budget, pause/cancel, approval gates, and company boundaries.",
].join("\n");
@@ -313,10 +314,21 @@ type PaperclipWakeChildIssueSummary = {
summary: string | null;
};
type PaperclipWakeBlockerSummary = {
id: string | null;
identifier: string | null;
title: string | null;
status: string | null;
priority: string | null;
};
type PaperclipWakePayload = {
reason: string | null;
issue: PaperclipWakeIssue | null;
checkedOutByHarness: boolean;
dependencyBlockedInteraction: boolean;
unresolvedBlockerIssueIds: string[];
unresolvedBlockerSummaries: PaperclipWakeBlockerSummary[];
executionStage: PaperclipWakeExecutionStage | null;
continuationSummary: PaperclipWakeContinuationSummary | null;
livenessContinuation: PaperclipWakeLivenessContinuation | null;
@@ -409,6 +421,17 @@ function normalizePaperclipWakeChildIssueSummary(value: unknown): PaperclipWakeC
return { id, identifier, title, status, priority, summary };
}
function normalizePaperclipWakeBlockerSummary(value: unknown): PaperclipWakeBlockerSummary | null {
const blocker = parseObject(value);
const id = asString(blocker.id, "").trim() || null;
const identifier = asString(blocker.identifier, "").trim() || null;
const title = asString(blocker.title, "").trim() || null;
const status = asString(blocker.status, "").trim() || null;
const priority = asString(blocker.priority, "").trim() || null;
if (!id && !identifier && !title && !status) return null;
return { id, identifier, title, status, priority };
}
function normalizePaperclipWakeExecutionPrincipal(value: unknown): PaperclipWakeExecutionPrincipal | null {
const principal = parseObject(value);
const typeRaw = asString(principal.type, "").trim().toLowerCase();
@@ -474,8 +497,18 @@ export function normalizePaperclipWakePayload(value: unknown): PaperclipWakePayl
.map((entry) => normalizePaperclipWakeChildIssueSummary(entry))
.filter((entry): entry is PaperclipWakeChildIssueSummary => Boolean(entry))
: [];
const unresolvedBlockerIssueIds = Array.isArray(payload.unresolvedBlockerIssueIds)
? payload.unresolvedBlockerIssueIds
.map((entry) => asString(entry, "").trim())
.filter(Boolean)
: [];
const unresolvedBlockerSummaries = Array.isArray(payload.unresolvedBlockerSummaries)
? payload.unresolvedBlockerSummaries
.map((entry) => normalizePaperclipWakeBlockerSummary(entry))
.filter((entry): entry is PaperclipWakeBlockerSummary => Boolean(entry))
: [];
if (comments.length === 0 && commentIds.length === 0 && childIssueSummaries.length === 0 && !executionStage && !continuationSummary && !livenessContinuation && !normalizePaperclipWakeIssue(payload.issue)) {
if (comments.length === 0 && commentIds.length === 0 && childIssueSummaries.length === 0 && unresolvedBlockerIssueIds.length === 0 && unresolvedBlockerSummaries.length === 0 && !executionStage && !continuationSummary && !livenessContinuation && !normalizePaperclipWakeIssue(payload.issue)) {
return null;
}
@@ -483,6 +516,9 @@ export function normalizePaperclipWakePayload(value: unknown): PaperclipWakePayl
reason: asString(payload.reason, "").trim() || null,
issue: normalizePaperclipWakeIssue(payload.issue),
checkedOutByHarness: asBoolean(payload.checkedOutByHarness, false),
dependencyBlockedInteraction: asBoolean(payload.dependencyBlockedInteraction, false),
unresolvedBlockerIssueIds,
unresolvedBlockerSummaries,
executionStage,
continuationSummary,
livenessContinuation,
@@ -563,6 +599,18 @@ export function renderPaperclipWakePrompt(
if (normalized.checkedOutByHarness) {
lines.push("- checkout: already claimed by the harness for this run");
}
if (normalized.dependencyBlockedInteraction) {
lines.push("- dependency-blocked interaction: yes");
lines.push("- execution scope: respond or triage the human comment; do not treat blocker-dependent deliverable work as unblocked");
if (normalized.unresolvedBlockerSummaries.length > 0) {
const blockers = normalized.unresolvedBlockerSummaries
.map((blocker) => `${blocker.identifier ?? blocker.id ?? "unknown"}${blocker.title ? ` ${blocker.title}` : ""}${blocker.status ? ` (${blocker.status})` : ""}`)
.join("; ");
lines.push(`- unresolved blockers: ${blockers}`);
} else if (normalized.unresolvedBlockerIssueIds.length > 0) {
lines.push(`- unresolved blocker issue ids: ${normalized.unresolvedBlockerIssueIds.join(", ")}`);
}
}
if (normalized.missingCount > 0) {
lines.push(`- omitted comments: ${normalized.missingCount}`);
}

View File

@@ -22,7 +22,11 @@ import {
joinPromptSections,
runChildProcess,
} from "@paperclipai/adapter-utils/server-utils";
import { parseCodexJsonl, isCodexUnknownSessionError } from "./parse.js";
import {
parseCodexJsonl,
isCodexTransientUpstreamError,
isCodexUnknownSessionError,
} from "./parse.js";
import { pathExists, prepareManagedCodexHome, resolveManagedCodexHomeDir, resolveSharedCodexHomeDir } from "./codex-home.js";
import { resolveCodexDesiredSkillNames } from "./skills.js";
import { buildCodexExecArgs } from "./codex-args.js";
@@ -149,6 +153,52 @@ type EnsureCodexSkillsInjectedOptions = {
linkSkill?: (source: string, target: string) => Promise<void>;
};
type CodexTransientFallbackMode =
| "same_session"
| "safer_invocation"
| "fresh_session"
| "fresh_session_safer_invocation";
function readCodexTransientFallbackMode(context: Record<string, unknown>): CodexTransientFallbackMode | null {
const value = asString(context.codexTransientFallbackMode, "").trim();
switch (value) {
case "same_session":
case "safer_invocation":
case "fresh_session":
case "fresh_session_safer_invocation":
return value;
default:
return null;
}
}
function fallbackModeUsesSaferInvocation(mode: CodexTransientFallbackMode | null): boolean {
return mode === "safer_invocation" || mode === "fresh_session_safer_invocation";
}
function fallbackModeUsesFreshSession(mode: CodexTransientFallbackMode | null): boolean {
return mode === "fresh_session" || mode === "fresh_session_safer_invocation";
}
function buildCodexTransientHandoffNote(input: {
previousSessionId: string | null;
fallbackMode: CodexTransientFallbackMode;
continuationSummaryBody: string | null;
}): string {
return [
"Paperclip session handoff:",
input.previousSessionId ? `- Previous session: ${input.previousSessionId}` : "",
"- Rotation reason: repeated Codex transient remote-compaction failures",
`- Fallback mode: ${input.fallbackMode}`,
input.continuationSummaryBody
? `- Issue continuation summary: ${input.continuationSummaryBody.slice(0, 1_500)}`
: "",
"Continue from the current task state. Rebuild only the minimum context you need.",
]
.filter(Boolean)
.join("\n");
}
export async function ensureCodexSkillsInjected(
onLog: AdapterExecutionContext["onLog"],
options: EnsureCodexSkillsInjectedOptions = {},
@@ -397,7 +447,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
const canResumeSession =
runtimeSessionId.length > 0 &&
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(cwd));
const sessionId = canResumeSession ? runtimeSessionId : null;
const codexTransientFallbackMode = readCodexTransientFallbackMode(context);
const forceSaferInvocation = fallbackModeUsesSaferInvocation(codexTransientFallbackMode);
const forceFreshSession = fallbackModeUsesFreshSession(codexTransientFallbackMode);
const sessionId = canResumeSession && !forceFreshSession ? runtimeSessionId : null;
if (runtimeSessionId && !canResumeSession) {
await onLog(
"stdout",
@@ -444,28 +497,66 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
const shouldUseResumeDeltaPrompt = Boolean(sessionId) && wakePrompt.length > 0;
const promptInstructionsPrefix = shouldUseResumeDeltaPrompt ? "" : instructionsPrefix;
instructionsChars = promptInstructionsPrefix.length;
const continuationSummary = parseObject(context.paperclipContinuationSummary);
const continuationSummaryBody = asString(continuationSummary.body, "").trim() || null;
const codexFallbackHandoffNote =
forceFreshSession
? buildCodexTransientHandoffNote({
previousSessionId: runtimeSessionId || runtime.sessionId || null,
fallbackMode: codexTransientFallbackMode ?? "fresh_session",
continuationSummaryBody,
})
: "";
const commandNotes = (() => {
if (!instructionsFilePath) {
return [repoAgentsNote];
const notes = [repoAgentsNote];
if (forceSaferInvocation) {
notes.push("Codex transient fallback requested safer invocation settings for this retry.");
}
if (forceFreshSession) {
notes.push("Codex transient fallback forced a fresh session with a continuation handoff.");
}
return notes;
}
if (instructionsPrefix.length > 0) {
if (shouldUseResumeDeltaPrompt) {
return [
const notes = [
`Loaded agent instructions from ${instructionsFilePath}`,
"Skipped stdin instruction reinjection because an existing Codex session is being resumed with a wake delta.",
repoAgentsNote,
];
if (forceSaferInvocation) {
notes.push("Codex transient fallback requested safer invocation settings for this retry.");
}
if (forceFreshSession) {
notes.push("Codex transient fallback forced a fresh session with a continuation handoff.");
}
return notes;
}
return [
const notes = [
`Loaded agent instructions from ${instructionsFilePath}`,
`Prepended instructions + path directive to stdin prompt (relative references from ${instructionsDir}).`,
repoAgentsNote,
];
if (forceSaferInvocation) {
notes.push("Codex transient fallback requested safer invocation settings for this retry.");
}
if (forceFreshSession) {
notes.push("Codex transient fallback forced a fresh session with a continuation handoff.");
}
return notes;
}
return [
const notes = [
`Configured instructionsFilePath ${instructionsFilePath}, but file could not be read; continuing without injected instructions.`,
repoAgentsNote,
];
if (forceSaferInvocation) {
notes.push("Codex transient fallback requested safer invocation settings for this retry.");
}
if (forceFreshSession) {
notes.push("Codex transient fallback forced a fresh session with a continuation handoff.");
}
return notes;
})();
const renderedPrompt = shouldUseResumeDeltaPrompt ? "" : renderTemplate(promptTemplate, templateData);
const sessionHandoffNote = asString(context.paperclipSessionHandoffMarkdown, "").trim();
@@ -473,6 +564,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
promptInstructionsPrefix,
renderedBootstrapPrompt,
wakePrompt,
codexFallbackHandoffNote,
sessionHandoffNote,
renderedPrompt,
]);
@@ -486,7 +578,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
};
const runAttempt = async (resumeSessionId: string | null) => {
const execArgs = buildCodexExecArgs(config, { resumeSessionId });
const execArgs = buildCodexExecArgs(
forceSaferInvocation ? { ...config, fastMode: false } : config,
{ resumeSessionId },
);
const args = execArgs.args;
const commandNotesWithFastMode =
execArgs.fastModeIgnoredReason == null
@@ -540,6 +635,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
const toResult = (
attempt: { proc: { exitCode: number | null; signal: string | null; timedOut: boolean; stdout: string; stderr: string }; rawStderr: string; parsed: ReturnType<typeof parseCodexJsonl> },
clearSessionOnMissingSession = false,
isRetry = false,
): AdapterExecutionResult => {
if (attempt.proc.timedOut) {
return {
@@ -551,7 +647,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
};
}
const resolvedSessionId = attempt.parsed.sessionId ?? runtimeSessionId ?? runtime.sessionId ?? null;
const canFallbackToRuntimeSession = !isRetry && !forceFreshSession;
const resolvedSessionId =
attempt.parsed.sessionId ??
(canFallbackToRuntimeSession ? (runtimeSessionId ?? runtime.sessionId ?? null) : null);
const resolvedSessionParams = resolvedSessionId
? ({
sessionId: resolvedSessionId,
@@ -576,6 +675,15 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
(attempt.proc.exitCode ?? 0) === 0
? null
: fallbackErrorMessage,
errorCode:
(attempt.proc.exitCode ?? 0) !== 0 &&
isCodexTransientUpstreamError({
stdout: attempt.proc.stdout,
stderr: attempt.proc.stderr,
errorMessage: fallbackErrorMessage,
})
? "codex_transient_upstream"
: null,
usage: attempt.parsed.usage,
sessionId: resolvedSessionId,
sessionParams: resolvedSessionParams,
@@ -590,7 +698,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
stderr: attempt.proc.stderr,
},
summary: attempt.parsed.summary,
clearSession: Boolean(clearSessionOnMissingSession && !resolvedSessionId),
clearSession: Boolean((clearSessionOnMissingSession || forceFreshSession) && !resolvedSessionId),
};
};
@@ -606,8 +714,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
`[paperclip] Codex resume session "${sessionId}" is unavailable; retrying with a fresh session.\n`,
);
const retry = await runAttempt(null);
return toResult(retry, true);
return toResult(retry, true, true);
}
return toResult(initial);
return toResult(initial, false, false);
}

View File

@@ -1,7 +1,7 @@
export { execute, ensureCodexSkillsInjected } from "./execute.js";
export { listCodexSkills, syncCodexSkills } from "./skills.js";
export { testEnvironment } from "./test.js";
export { parseCodexJsonl, isCodexUnknownSessionError } from "./parse.js";
export { parseCodexJsonl, isCodexTransientUpstreamError, isCodexUnknownSessionError } from "./parse.js";
export {
getQuotaWindows,
readCodexAuthInfo,

View File

@@ -1,5 +1,9 @@
import { describe, expect, it } from "vitest";
import { isCodexUnknownSessionError, parseCodexJsonl } from "./parse.js";
import {
isCodexTransientUpstreamError,
isCodexUnknownSessionError,
parseCodexJsonl,
} from "./parse.js";
describe("parseCodexJsonl", () => {
it("captures session id, assistant summary, usage, and error message", () => {
@@ -81,3 +85,36 @@ describe("isCodexUnknownSessionError", () => {
expect(isCodexUnknownSessionError("", "model overloaded")).toBe(false);
});
});
describe("isCodexTransientUpstreamError", () => {
it("classifies the remote-compaction high-demand failure as transient upstream", () => {
expect(
isCodexTransientUpstreamError({
errorMessage:
"Error running remote compact task: We're currently experiencing high demand, which may cause temporary errors.",
}),
).toBe(true);
expect(
isCodexTransientUpstreamError({
stderr: "We're currently experiencing high demand, which may cause temporary errors.",
}),
).toBe(true);
});
it("does not classify deterministic compaction errors as transient", () => {
expect(
isCodexTransientUpstreamError({
errorMessage: [
"Error running remote compact task: {",
' "error": {',
' "message": "Unknown parameter: \'prompt_cache_retention\'.",',
' "type": "invalid_request_error",',
' "param": "prompt_cache_retention",',
' "code": "unknown_parameter"',
" }",
"}",
].join("\n"),
}),
).toBe(false);
});
});

View File

@@ -1,5 +1,9 @@
import { asString, asNumber, parseObject, parseJson } from "@paperclipai/adapter-utils/server-utils";
const CODEX_TRANSIENT_UPSTREAM_RE =
/(?:we(?:'|)re\s+currently\s+experiencing\s+high\s+demand|temporary\s+errors|rate[-\s]?limit(?:ed)?|too\s+many\s+requests|\b429\b|server\s+overloaded|service\s+unavailable|try\s+again\s+later)/i;
const CODEX_REMOTE_COMPACTION_RE = /remote\s+compact\s+task/i;
export function parseCodexJsonl(stdout: string) {
let sessionId: string | null = null;
let finalMessage: string | null = null;
@@ -71,3 +75,25 @@ export function isCodexUnknownSessionError(stdout: string, stderr: string): bool
haystack,
);
}
export function isCodexTransientUpstreamError(input: {
stdout?: string | null;
stderr?: string | null;
errorMessage?: string | null;
}): boolean {
const haystack = [
input.errorMessage ?? "",
input.stdout ?? "",
input.stderr ?? "",
]
.join("\n")
.split(/\r?\n/)
.map((line) => line.trim())
.filter(Boolean)
.join("\n");
if (!CODEX_TRANSIENT_UPSTREAM_RE.test(haystack)) return false;
// Keep automatic retries scoped to the observed remote-compaction/high-demand
// failure shape; broader 429s may be caused by user or account limits.
return CODEX_REMOTE_COMPACTION_RE.test(haystack) || /high\s+demand|temporary\s+errors/i.test(haystack);
}

View File

@@ -0,0 +1,3 @@
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "scheduled_retry_at" timestamp with time zone;--> statement-breakpoint
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "scheduled_retry_attempt" integer DEFAULT 0 NOT NULL;--> statement-breakpoint
ALTER TABLE "heartbeat_runs" ADD COLUMN IF NOT EXISTS "scheduled_retry_reason" text;

View File

@@ -0,0 +1,9 @@
ALTER TABLE "routine_runs" ADD COLUMN IF NOT EXISTS "dispatch_fingerprint" text;--> statement-breakpoint
ALTER TABLE "issues" ADD COLUMN IF NOT EXISTS "origin_fingerprint" text DEFAULT 'default' NOT NULL;--> statement-breakpoint
DROP INDEX IF EXISTS "issues_open_routine_execution_uq";--> statement-breakpoint
CREATE UNIQUE INDEX IF NOT EXISTS "issues_open_routine_execution_uq" ON "issues" USING btree ("company_id","origin_kind","origin_id","origin_fingerprint") WHERE "issues"."origin_kind" = 'routine_execution'
and "issues"."origin_id" is not null
and "issues"."hidden_at" is null
and "issues"."execution_run_id" is not null
and "issues"."status" in ('backlog', 'todo', 'in_progress', 'in_review', 'blocked');--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "routine_runs_dispatch_fingerprint_idx" ON "routine_runs" USING btree ("routine_id","dispatch_fingerprint");

File diff suppressed because it is too large Load Diff

View File

@@ -428,6 +428,20 @@
"when": 1776717606743,
"tag": "0060_orange_annihilus",
"breakpoints": true
},
{
"idx": 61,
"version": "7",
"when": 1776785165389,
"tag": "0061_lively_thor_girl",
"breakpoints": true
},
{
"idx": 62,
"version": "7",
"when": 1776780000000,
"tag": "0062_routine_run_dispatch_fingerprint",
"breakpoints": true
}
]
}
}

View File

@@ -38,6 +38,9 @@ export const heartbeatRuns = pgTable(
onDelete: "set null",
}),
processLossRetryCount: integer("process_loss_retry_count").notNull().default(0),
scheduledRetryAt: timestamp("scheduled_retry_at", { withTimezone: true }),
scheduledRetryAttempt: integer("scheduled_retry_attempt").notNull().default(0),
scheduledRetryReason: text("scheduled_retry_reason"),
issueCommentStatus: text("issue_comment_status").notNull().default("not_applicable"),
issueCommentSatisfiedByCommentId: uuid("issue_comment_satisfied_by_comment_id"),
issueCommentRetryQueuedAt: timestamp("issue_comment_retry_queued_at", { withTimezone: true }),

View File

@@ -44,6 +44,7 @@ export const issues = pgTable(
originKind: text("origin_kind").notNull().default("manual"),
originId: text("origin_id"),
originRunId: text("origin_run_id"),
originFingerprint: text("origin_fingerprint").notNull().default("default"),
requestDepth: integer("request_depth").notNull().default(0),
billingCode: text("billing_code"),
assigneeAdapterOverrides: jsonb("assignee_adapter_overrides").$type<Record<string, unknown>>(),
@@ -82,7 +83,7 @@ export const issues = pgTable(
identifierSearchIdx: index("issues_identifier_search_idx").using("gin", table.identifier.op("gin_trgm_ops")),
descriptionSearchIdx: index("issues_description_search_idx").using("gin", table.description.op("gin_trgm_ops")),
openRoutineExecutionIdx: uniqueIndex("issues_open_routine_execution_uq")
.on(table.companyId, table.originKind, table.originId)
.on(table.companyId, table.originKind, table.originId, table.originFingerprint)
.where(
sql`${table.originKind} = 'routine_execution'
and ${table.originId} is not null

View File

@@ -96,6 +96,7 @@ export const routineRuns = pgTable(
triggeredAt: timestamp("triggered_at", { withTimezone: true }).notNull().defaultNow(),
idempotencyKey: text("idempotency_key"),
triggerPayload: jsonb("trigger_payload").$type<Record<string, unknown>>(),
dispatchFingerprint: text("dispatch_fingerprint"),
linkedIssueId: uuid("linked_issue_id").references(() => issues.id, { onDelete: "set null" }),
coalescedIntoRunId: uuid("coalesced_into_run_id"),
failureReason: text("failure_reason"),
@@ -106,6 +107,7 @@ export const routineRuns = pgTable(
(table) => ({
companyRoutineIdx: index("routine_runs_company_routine_idx").on(table.companyId, table.routineId, table.createdAt),
triggerIdx: index("routine_runs_trigger_idx").on(table.triggerId, table.createdAt),
dispatchFingerprintIdx: index("routine_runs_dispatch_fingerprint_idx").on(table.routineId, table.dispatchFingerprint),
linkedIssueIdx: index("routine_runs_linked_issue_idx").on(table.linkedIssueId),
idempotencyIdx: index("routine_runs_trigger_idempotency_idx").on(table.triggerId, table.idempotencyKey),
}),

View File

@@ -47,6 +47,8 @@ Read tools:
- `paperclipListDocumentRevisions`
- `paperclipListProjects`
- `paperclipGetProject`
- `paperclipGetIssueWorkspaceRuntime`
- `paperclipWaitForIssueWorkspaceService`
- `paperclipListGoals`
- `paperclipGetGoal`
- `paperclipListApprovals`
@@ -63,6 +65,7 @@ Write tools:
- `paperclipAddComment`
- `paperclipUpsertIssueDocument`
- `paperclipRestoreIssueDocumentRevision`
- `paperclipControlIssueWorkspaceServices`
- `paperclipCreateApproval`
- `paperclipLinkIssueApproval`
- `paperclipUnlinkIssueApproval`

View File

@@ -107,6 +107,81 @@ describe("paperclip MCP tools", () => {
});
});
it("controls issue workspace services through the current execution workspace", async () => {
const fetchMock = vi.fn()
.mockResolvedValueOnce(mockJsonResponse({
currentExecutionWorkspace: {
id: "44444444-4444-4444-8444-444444444444",
runtimeServices: [],
},
}))
.mockResolvedValueOnce(mockJsonResponse({
operation: { id: "operation-1" },
workspace: {
id: "44444444-4444-4444-8444-444444444444",
runtimeServices: [
{
id: "55555555-5555-4555-8555-555555555555",
serviceName: "web",
status: "running",
url: "http://127.0.0.1:5173",
},
],
},
}));
vi.stubGlobal("fetch", fetchMock);
const tool = getTool("paperclipControlIssueWorkspaceServices");
await tool.execute({
issueId: "PAP-1135",
action: "restart",
workspaceCommandId: "web",
});
expect(fetchMock).toHaveBeenCalledTimes(2);
const [lookupUrl, lookupInit] = fetchMock.mock.calls[0] as [string, RequestInit];
expect(String(lookupUrl)).toBe("http://localhost:3100/api/issues/PAP-1135/heartbeat-context");
expect(lookupInit.method).toBe("GET");
const [controlUrl, controlInit] = fetchMock.mock.calls[1] as [string, RequestInit];
expect(String(controlUrl)).toBe(
"http://localhost:3100/api/execution-workspaces/44444444-4444-4444-8444-444444444444/runtime-services/restart",
);
expect(controlInit.method).toBe("POST");
expect(JSON.parse(String(controlInit.body))).toEqual({
workspaceCommandId: "web",
});
});
it("waits for an issue workspace runtime service URL", async () => {
const fetchMock = vi.fn()
.mockResolvedValueOnce(mockJsonResponse({
currentExecutionWorkspace: {
id: "44444444-4444-4444-8444-444444444444",
runtimeServices: [
{
id: "55555555-5555-4555-8555-555555555555",
serviceName: "web",
status: "running",
healthStatus: "healthy",
url: "http://127.0.0.1:5173",
},
],
},
}));
vi.stubGlobal("fetch", fetchMock);
const tool = getTool("paperclipWaitForIssueWorkspaceService");
const response = await tool.execute({
issueId: "PAP-1135",
serviceName: "web",
timeoutSeconds: 1,
});
expect(fetchMock).toHaveBeenCalledTimes(1);
expect(response.content[0]?.text).toContain("http://127.0.0.1:5173");
});
it("creates approvals with the expected company-scoped payload", async () => {
const fetchMock = vi.fn().mockResolvedValue(
mockJsonResponse({ id: "approval-1" }),

View File

@@ -124,6 +124,66 @@ const apiRequestSchema = z.object({
jsonBody: z.string().optional(),
});
const workspaceRuntimeControlTargetSchema = z.object({
workspaceCommandId: z.string().min(1).optional().nullable(),
runtimeServiceId: z.string().uuid().optional().nullable(),
serviceIndex: z.number().int().nonnegative().optional().nullable(),
});
const issueWorkspaceRuntimeControlSchema = z.object({
issueId: issueIdSchema,
action: z.enum(["start", "stop", "restart"]),
}).merge(workspaceRuntimeControlTargetSchema);
const waitForIssueWorkspaceServiceSchema = z.object({
issueId: issueIdSchema,
runtimeServiceId: z.string().uuid().optional().nullable(),
serviceName: z.string().min(1).optional().nullable(),
timeoutSeconds: z.number().int().positive().max(300).optional(),
});
function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
function readCurrentExecutionWorkspace(context: unknown): Record<string, unknown> | null {
if (!context || typeof context !== "object") return null;
const workspace = (context as { currentExecutionWorkspace?: unknown }).currentExecutionWorkspace;
return workspace && typeof workspace === "object" ? workspace as Record<string, unknown> : null;
}
function readWorkspaceRuntimeServices(workspace: Record<string, unknown> | null): Array<Record<string, unknown>> {
const raw = workspace?.runtimeServices;
return Array.isArray(raw)
? raw.filter((entry): entry is Record<string, unknown> => Boolean(entry) && typeof entry === "object")
: [];
}
function selectRuntimeService(
services: Array<Record<string, unknown>>,
input: { runtimeServiceId?: string | null; serviceName?: string | null },
) {
if (input.runtimeServiceId) {
return services.find((service) => service.id === input.runtimeServiceId) ?? null;
}
if (input.serviceName) {
return services.find((service) => service.serviceName === input.serviceName) ?? null;
}
return services.find((service) => service.status === "running" || service.status === "starting")
?? services[0]
?? null;
}
async function getIssueWorkspaceRuntime(client: PaperclipApiClient, issueId: string) {
const context = await client.requestJson("GET", `/issues/${encodeURIComponent(issueId)}/heartbeat-context`);
const workspace = readCurrentExecutionWorkspace(context);
return {
context,
workspace,
runtimeServices: readWorkspaceRuntimeServices(workspace),
};
}
export function createToolDefinitions(client: PaperclipApiClient): ToolDefinition[] {
return [
makeTool(
@@ -247,6 +307,55 @@ export function createToolDefinitions(client: PaperclipApiClient): ToolDefinitio
return client.requestJson("GET", `/projects/${encodeURIComponent(projectId)}${qs}`);
},
),
makeTool(
"paperclipGetIssueWorkspaceRuntime",
"Get the current execution workspace and runtime services for an issue, including service URLs",
z.object({ issueId: issueIdSchema }),
async ({ issueId }) => getIssueWorkspaceRuntime(client, issueId),
),
makeTool(
"paperclipControlIssueWorkspaceServices",
"Start, stop, or restart the current issue execution workspace runtime services",
issueWorkspaceRuntimeControlSchema,
async ({ issueId, action, ...target }) => {
const runtime = await getIssueWorkspaceRuntime(client, issueId);
const workspaceId = typeof runtime.workspace?.id === "string" ? runtime.workspace.id : null;
if (!workspaceId) {
throw new Error("Issue has no current execution workspace");
}
return client.requestJson(
"POST",
`/execution-workspaces/${encodeURIComponent(workspaceId)}/runtime-services/${action}`,
{ body: target },
);
},
),
makeTool(
"paperclipWaitForIssueWorkspaceService",
"Wait until an issue execution workspace runtime service is running and has a URL when one is exposed",
waitForIssueWorkspaceServiceSchema,
async ({ issueId, runtimeServiceId, serviceName, timeoutSeconds }) => {
const deadline = Date.now() + (timeoutSeconds ?? 60) * 1000;
let latest: Awaited<ReturnType<typeof getIssueWorkspaceRuntime>> | null = null;
while (Date.now() <= deadline) {
latest = await getIssueWorkspaceRuntime(client, issueId);
const service = selectRuntimeService(latest.runtimeServices, { runtimeServiceId, serviceName });
if (service?.status === "running" && service.healthStatus !== "unhealthy") {
return {
workspace: latest.workspace,
service,
};
}
await sleep(1000);
}
return {
timedOut: true,
latestWorkspace: latest?.workspace ?? null,
latestRuntimeServices: latest?.runtimeServices ?? [],
};
},
),
makeTool(
"paperclipListGoals",
"List goals in a company",

View File

@@ -67,9 +67,7 @@ export const AGENT_ROLE_LABELS: Record<AgentRole, string> = {
};
export const AGENT_DEFAULT_MAX_CONCURRENT_RUNS = 5;
export const WORKSPACE_BRANCH_ROUTINE_VARIABLE = "workspaceBranch";
export const AGENT_ICON_NAMES = [
"bot",
"cpu",
@@ -353,6 +351,7 @@ export type WakeupRequestStatus = (typeof WAKEUP_REQUEST_STATUSES)[number];
export const HEARTBEAT_RUN_STATUSES = [
"queued",
"scheduled_retry",
"running",
"succeeded",
"failed",

View File

@@ -39,6 +39,10 @@ export interface HeartbeatRun {
processStartedAt: Date | null;
retryOfRunId: string | null;
processLossRetryCount: number;
scheduledRetryAt?: Date | null;
scheduledRetryAttempt?: number;
scheduledRetryReason?: string | null;
retryExhaustedReason?: string | null;
livenessState: RunLivenessState | null;
livenessReason: string | null;
continuationAttempt: number;

View File

@@ -217,6 +217,7 @@ export interface Issue {
originKind?: IssueOriginKind;
originId?: string | null;
originRunId?: string | null;
originFingerprint?: string | null;
requestDepth: number;
billingCode: string | null;
assigneeAdapterOverrides: IssueAssigneeAdapterOverrides | null;

View File

@@ -95,6 +95,7 @@ export interface RoutineRun {
triggeredAt: Date;
idempotencyKey: string | null;
triggerPayload: Record<string, unknown> | null;
dispatchFingerprint: string | null;
linkedIssueId: string | null;
coalescedIntoRunId: string | null;
failureReason: string | null;

View File

@@ -24,7 +24,6 @@ COMMITS=$(git log --since="${DATE}T00:00:00" --until="${NEXT_DATE}T00:00:00" mas
json_escape() {
python3 -c 'import json, sys; print(json.dumps(sys.stdin.read().rstrip("\n"))[1:-1])'
}
if [[ -z "$COMMITS" ]]; then
PAYLOAD=$(cat <<ENDJSON
{

65
scripts/kill-vitest.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
#
# Kill all running vitest processes.
#
# Usage:
# scripts/kill-vitest.sh # kill all
# scripts/kill-vitest.sh --dry # preview what would be killed
#
set -euo pipefail
DRY_RUN=false
if [[ "${1:-}" == "--dry" || "${1:-}" == "--dry-run" || "${1:-}" == "-n" ]]; then
DRY_RUN=true
fi
pids=()
lines=()
while IFS= read -r line; do
[[ -z "$line" ]] && continue
pid=$(echo "$line" | awk '{print $2}')
pids+=("$pid")
lines+=("$line")
done < <(ps aux | grep -E '(^|/)(vitest|node .*/vitest)( |$)|/\.bin/vitest|vitest/dist|vitest\.mjs' | grep -v grep || true)
if [[ ${#pids[@]} -eq 0 ]]; then
echo "No vitest processes found."
exit 0
fi
echo "Found ${#pids[@]} vitest process(es):"
echo ""
for i in "${!pids[@]}"; do
line="${lines[$i]}"
pid=$(echo "$line" | awk '{print $2}')
start=$(echo "$line" | awk '{print $9}')
cmd=$(echo "$line" | awk '{for(i=11;i<=NF;i++) printf "%s ", $i; print ""}')
cmd=$(echo "$cmd" | sed "s|$HOME/||g")
printf " PID %-7s started %-10s %s\n" "$pid" "$start" "$cmd"
done
echo ""
if [[ "$DRY_RUN" == true ]]; then
echo "Dry run — re-run without --dry to kill these processes."
exit 0
fi
echo "Sending SIGTERM..."
for pid in "${pids[@]}"; do
kill -TERM "$pid" 2>/dev/null && echo " signaled $pid" || echo " $pid already gone"
done
sleep 2
for pid in "${pids[@]}"; do
if kill -0 "$pid" 2>/dev/null; then
echo " $pid still alive, sending SIGKILL..."
kill -KILL "$pid" 2>/dev/null || true
fi
done
echo "Done."

View File

@@ -21,6 +21,10 @@ const mockIssueService = vi.hoisted(() => ({
vi.mock("../services/activity.js", () => ({
activityService: () => mockActivityService,
normalizeActivityLimit: (limit: number | undefined) => {
if (!Number.isFinite(limit)) return 100;
return Math.max(1, Math.min(500, Math.floor(limit ?? 100)));
},
}));
vi.mock("../services/index.js", () => ({
@@ -58,6 +62,38 @@ describe("activity routes", () => {
vi.clearAllMocks();
});
it("limits company activity lists by default", async () => {
mockActivityService.list.mockResolvedValue([]);
const app = await createApp();
const res = await request(app).get("/api/companies/company-1/activity");
expect(res.status).toBe(200);
expect(mockActivityService.list).toHaveBeenCalledWith({
companyId: "company-1",
agentId: undefined,
entityType: undefined,
entityId: undefined,
limit: 100,
});
});
it("caps requested company activity list limits", async () => {
mockActivityService.list.mockResolvedValue([]);
const app = await createApp();
const res = await request(app).get("/api/companies/company-1/activity?limit=5000&entityType=issue");
expect(res.status).toBe(200);
expect(mockActivityService.list).toHaveBeenCalledWith({
companyId: "company-1",
agentId: undefined,
entityType: "issue",
entityId: undefined,
limit: 500,
});
});
it("resolves issue identifiers before loading runs", async () => {
mockIssueService.getByIdentifier.mockResolvedValue({
id: "issue-uuid-1",

View File

@@ -1,6 +1,7 @@
import { randomUUID } from "node:crypto";
import { afterAll, afterEach, beforeAll, describe, expect, it } from "vitest";
import {
activityLog,
agents,
companies,
createDb,
@@ -56,6 +57,7 @@ describeEmbeddedPostgres("activity service", () => {
}, 20_000);
afterEach(async () => {
await db.delete(activityLog);
await db.delete(issueComments);
await db.delete(issueDocuments);
await db.delete(documentRevisions);
@@ -70,6 +72,51 @@ describeEmbeddedPostgres("activity service", () => {
await tempDb?.cleanup();
});
it("limits company activity lists", async () => {
const companyId = randomUUID();
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix: `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(activityLog).values([
{
companyId,
actorType: "system",
actorId: "system",
action: "test.oldest",
entityType: "company",
entityId: companyId,
createdAt: new Date("2026-04-21T10:00:00.000Z"),
},
{
companyId,
actorType: "system",
actorId: "system",
action: "test.middle",
entityType: "company",
entityId: companyId,
createdAt: new Date("2026-04-21T11:00:00.000Z"),
},
{
companyId,
actorType: "system",
actorId: "system",
action: "test.newest",
entityType: "company",
entityId: companyId,
createdAt: new Date("2026-04-21T12:00:00.000Z"),
},
]);
const result = await activityService(db).list({ companyId, limit: 2 });
expect(result.map((event) => event.action)).toEqual(["test.newest", "test.middle"]);
});
it("returns compact usage and result summaries for issue runs", async () => {
const companyId = randomUUID();
const agentId = randomUUID();

View File

@@ -29,6 +29,15 @@ console.log(JSON.stringify({ type: "turn.completed", usage: { input_tokens: 1, c
await fs.chmod(commandPath, 0o755);
}
async function writeFailingCodexCommand(commandPath: string, errorMessage: string): Promise<void> {
const script = `#!/usr/bin/env node
console.log(JSON.stringify({ type: "error", message: ${JSON.stringify(errorMessage)} }));
process.exit(1);
`;
await fs.writeFile(commandPath, script, "utf8");
await fs.chmod(commandPath, 0o755);
}
type CapturePayload = {
argv: string[];
prompt: string;
@@ -369,6 +378,131 @@ describe("codex execute", () => {
}
});
it("classifies remote-compaction high-demand failures as retryable transient upstream errors", async () => {
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-codex-execute-transient-"));
const workspace = path.join(root, "workspace");
const commandPath = path.join(root, "codex");
await fs.mkdir(workspace, { recursive: true });
await writeFailingCodexCommand(
commandPath,
"Error running remote compact task: We're currently experiencing high demand, which may cause temporary errors.",
);
const previousHome = process.env.HOME;
process.env.HOME = root;
try {
const result = await execute({
runId: "run-transient-error",
agent: {
id: "agent-1",
companyId: "company-1",
name: "Codex Coder",
adapterType: "codex_local",
adapterConfig: {},
},
runtime: {
sessionId: null,
sessionParams: null,
sessionDisplayId: null,
taskKey: null,
},
config: {
command: commandPath,
cwd: workspace,
promptTemplate: "Follow the paperclip heartbeat.",
},
context: {},
authToken: "run-jwt-token",
onLog: async () => {},
});
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("codex_transient_upstream");
expect(result.errorMessage).toContain("high demand");
} finally {
if (previousHome === undefined) delete process.env.HOME;
else process.env.HOME = previousHome;
await fs.rm(root, { recursive: true, force: true });
}
});
it("uses safer invocation settings and a fresh-session handoff for codex transient fallback retries", async () => {
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-codex-execute-fallback-"));
const workspace = path.join(root, "workspace");
const commandPath = path.join(root, "codex");
const capturePath = path.join(root, "capture.json");
await fs.mkdir(workspace, { recursive: true });
await writeFakeCodexCommand(commandPath);
const previousHome = process.env.HOME;
process.env.HOME = root;
let commandNotes: string[] = [];
try {
const result = await execute({
runId: "run-fallback",
agent: {
id: "agent-1",
companyId: "company-1",
name: "Codex Coder",
adapterType: "codex_local",
adapterConfig: {},
},
runtime: {
sessionId: null,
sessionParams: {
sessionId: "codex-session-stale",
cwd: workspace,
},
sessionDisplayId: "codex-session-stale",
taskKey: null,
},
config: {
command: commandPath,
cwd: workspace,
fastMode: true,
model: "gpt-5.4",
env: {
PAPERCLIP_TEST_CAPTURE_PATH: capturePath,
},
promptTemplate: "Follow the paperclip heartbeat.",
},
context: {
codexTransientFallbackMode: "fresh_session_safer_invocation",
paperclipContinuationSummary: {
key: "continuation-summary",
title: "Continuation Summary",
body: "Issue continuation summary for the next fresh session.",
updatedAt: "2026-04-21T01:00:00.000Z",
},
},
authToken: "run-jwt-token",
onLog: async () => {},
onMeta: async (meta) => {
commandNotes = meta.commandNotes ?? [];
},
});
expect(result.exitCode).toBe(0);
expect(result.errorMessage).toBeNull();
const capture = JSON.parse(await fs.readFile(capturePath, "utf8")) as CapturePayload;
expect(capture.argv).toEqual(expect.arrayContaining(["exec", "--json", "-"]));
expect(capture.argv).not.toContain("resume");
expect(capture.argv).not.toContain('service_tier="fast"');
expect(capture.argv).not.toContain("features.fast_mode=true");
expect(capture.prompt).toContain("Paperclip session handoff:");
expect(capture.prompt).toContain("Issue continuation summary for the next fresh session.");
expect(commandNotes).toContain("Codex transient fallback requested safer invocation settings for this retry.");
expect(commandNotes).toContain("Codex transient fallback forced a fresh session with a continuation handoff.");
} finally {
if (previousHome === undefined) delete process.env.HOME;
else process.env.HOME = previousHome;
await fs.rm(root, { recursive: true, force: true });
}
});
it("renders execution-stage wake instructions for reviewer and executor roles", async () => {
const root = await fs.mkdtemp(path.join(os.tmpdir(), "paperclip-codex-execute-stage-wake-"));
const workspace = path.join(root, "workspace");

View File

@@ -5,7 +5,7 @@ import {
getEmbeddedPostgresTestSupport,
startEmbeddedPostgresTestDatabase,
} from "./helpers/embedded-postgres.js";
import { dashboardService } from "../services/dashboard.ts";
import { dashboardService, getUtcMonthStart } from "../services/dashboard.ts";
const embeddedPostgresSupport = await getEmbeddedPostgresTestSupport();
const describeEmbeddedPostgres = embeddedPostgresSupport.supported ? describe : describe.skip;
@@ -26,6 +26,17 @@ function utcDateKey(date: Date): string {
return date.toISOString().slice(0, 10);
}
describe("getUtcMonthStart", () => {
it("anchors the monthly spend window to UTC month boundaries", () => {
expect(getUtcMonthStart(new Date("2026-03-31T20:30:00.000-05:00")).toISOString()).toBe(
"2026-04-01T00:00:00.000Z",
);
expect(getUtcMonthStart(new Date("2026-04-01T00:30:00.000+14:00")).toISOString()).toBe(
"2026-03-01T00:00:00.000Z",
);
});
});
describeEmbeddedPostgres("dashboard service", () => {
let db!: ReturnType<typeof createDb>;
let tempDb: Awaited<ReturnType<typeof startEmbeddedPostgresTestDatabase>> | null = null;

View File

@@ -538,6 +538,144 @@ describe("heartbeat comment wake batching", () => {
}
}, 120_000);
it("promotes deferred comment wakes with their comments after the active run is cancelled", async () => {
const gateway = await createControlledGatewayServer();
const companyId = randomUUID();
const agentId = randomUUID();
const issueId = randomUUID();
const issuePrefix = `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`;
const heartbeat = heartbeatService(db);
try {
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values({
id: agentId,
companyId,
name: "Gateway Agent",
role: "engineer",
status: "idle",
adapterType: "openclaw_gateway",
adapterConfig: {
url: gateway.url,
headers: {
"x-openclaw-token": "gateway-token",
},
payloadTemplate: {
message: "wake now",
},
waitTimeoutMs: 2_000,
},
runtimeConfig: {},
permissions: {},
});
await db.insert(issues).values({
id: issueId,
companyId,
title: "Interrupt queued comment",
status: "todo",
priority: "medium",
assigneeAgentId: agentId,
issueNumber: 2,
identifier: `${issuePrefix}-2`,
});
const comment1 = await db
.insert(issueComments)
.values({
companyId,
issueId,
authorUserId: "user-1",
body: "Start work",
})
.returning()
.then((rows) => rows[0]);
const firstRun = await heartbeat.wakeup(agentId, {
source: "automation",
triggerDetail: "system",
reason: "issue_commented",
payload: { issueId, commentId: comment1.id },
contextSnapshot: {
issueId,
taskId: issueId,
commentId: comment1.id,
wakeReason: "issue_commented",
},
requestedByActorType: "user",
requestedByActorId: "user-1",
});
expect(firstRun).not.toBeNull();
await waitFor(() => gateway.getAgentPayloads().length === 1);
const queuedComment = await db
.insert(issueComments)
.values({
companyId,
issueId,
authorUserId: "user-1",
body: "Queued follow-up",
})
.returning()
.then((rows) => rows[0]);
const followupRun = await heartbeat.wakeup(agentId, {
source: "automation",
triggerDetail: "system",
reason: "issue_commented",
payload: { issueId, commentId: queuedComment.id },
contextSnapshot: {
issueId,
taskId: issueId,
commentId: queuedComment.id,
wakeReason: "issue_commented",
},
requestedByActorType: "user",
requestedByActorId: "user-1",
});
expect(followupRun).toBeNull();
await heartbeat.cancelRun(firstRun!.id);
await waitFor(() => gateway.getAgentPayloads().length === 2);
const promotedPayload = gateway.getAgentPayloads()[1] ?? {};
expect(promotedPayload.paperclip).toMatchObject({
wake: {
commentIds: [queuedComment.id],
latestCommentId: queuedComment.id,
comments: [
expect.objectContaining({
id: queuedComment.id,
body: "Queued follow-up",
}),
],
commentWindow: {
requestedCount: 1,
includedCount: 1,
missingCount: 0,
},
},
});
expect(String(promotedPayload.message ?? "")).toContain("Queued follow-up");
gateway.releaseFirstWait();
await waitFor(async () => {
const runs = await db.select().from(heartbeatRuns).where(eq(heartbeatRuns.agentId, agentId));
return runs.length === 2 && runs.every((run) => ["cancelled", "succeeded"].includes(run.status));
}, 90_000);
} finally {
gateway.releaseFirstWait();
await gateway.close();
}
}, 120_000);
it("promotes deferred comment wakes after the active run closes the issue", async () => {
const gateway = await createControlledGatewayServer();
const companyId = randomUUID();

View File

@@ -132,7 +132,7 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
await tempDb?.cleanup();
});
it("keeps blocked descendants queued until their blockers resolve", async () => {
it("keeps blocked descendants idle until their blockers resolve", async () => {
const companyId = randomUUID();
const agentId = randomUUID();
const blockerId = randomUUID();
@@ -200,15 +200,72 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
payload: { issueId: blockedIssueId },
contextSnapshot: { issueId: blockedIssueId, wakeReason: "issue_assigned" },
});
expect(blockedWake).not.toBeNull();
expect(blockedWake).toBeNull();
const blockedWakeRequest = await waitForCondition(async () => {
const wakeup = await db
.select({
status: agentWakeupRequests.status,
reason: agentWakeupRequests.reason,
})
.from(agentWakeupRequests)
.where(
and(
eq(agentWakeupRequests.agentId, agentId),
sql`${agentWakeupRequests.payload} ->> 'issueId' = ${blockedIssueId}`,
),
)
.orderBy(agentWakeupRequests.requestedAt)
.then((rows) => rows[0] ?? null);
return Boolean(
wakeup &&
wakeup.status === "skipped" &&
wakeup.reason === "issue_dependencies_blocked",
);
});
expect(blockedWakeRequest).toBe(true);
const blockedRunsBeforeResolution = await db
.select({ count: sql<number>`count(*)::int` })
.from(heartbeatRuns)
.where(sql`${heartbeatRuns.contextSnapshot} ->> 'issueId' = ${blockedIssueId}`)
.then((rows) => rows[0]?.count ?? 0);
expect(blockedRunsBeforeResolution).toBe(0);
const interactionWake = await heartbeat.wakeup(agentId, {
source: "automation",
triggerDetail: "system",
reason: "issue_commented",
payload: { issueId: blockedIssueId, commentId: randomUUID() },
contextSnapshot: {
issueId: blockedIssueId,
wakeReason: "issue_commented",
},
});
expect(interactionWake).not.toBeNull();
await waitForCondition(async () => {
const run = await db
.select({ status: heartbeatRuns.status })
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, blockedWake!.id))
.where(eq(heartbeatRuns.id, interactionWake!.id))
.then((rows) => rows[0] ?? null);
return run?.status === "queued";
return run?.status === "succeeded";
});
const interactionRun = await db
.select({
status: heartbeatRuns.status,
contextSnapshot: heartbeatRuns.contextSnapshot,
})
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, interactionWake!.id))
.then((rows) => rows[0] ?? null);
expect(interactionRun?.status).toBe("succeeded");
expect(interactionRun?.contextSnapshot).toMatchObject({
dependencyBlockedInteraction: true,
unresolvedBlockerIssueIds: [blockerId],
});
const readyWake = await heartbeat.wakeup(agentId, {
@@ -229,12 +286,12 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
return run?.status === "succeeded";
});
const [blockedRun, readyRun] = await Promise.all([
db.select().from(heartbeatRuns).where(eq(heartbeatRuns.id, blockedWake!.id)).then((rows) => rows[0] ?? null),
db.select().from(heartbeatRuns).where(eq(heartbeatRuns.id, readyWake!.id)).then((rows) => rows[0] ?? null),
]);
const readyRun = await db
.select()
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, readyWake!.id))
.then((rows) => rows[0] ?? null);
expect(blockedRun?.status).toBe("queued");
expect(readyRun?.status).toBe("succeeded");
await db
@@ -242,7 +299,7 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
.set({ status: "done", updatedAt: new Date() })
.where(eq(issues.id, blockerId));
await heartbeat.wakeup(agentId, {
const promotedWake = await heartbeat.wakeup(agentId, {
source: "automation",
triggerDetail: "system",
reason: "issue_blockers_resolved",
@@ -253,12 +310,13 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
resolvedBlockerIssueId: blockerId,
},
});
expect(promotedWake).not.toBeNull();
await waitForCondition(async () => {
const run = await db
.select({ status: heartbeatRuns.status })
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, blockedWake!.id))
.where(eq(heartbeatRuns.id, promotedWake!.id))
.then((rows) => rows[0] ?? null);
return run?.status === "succeeded";
});
@@ -269,7 +327,7 @@ describeEmbeddedPostgres("heartbeat dependency-aware queued run selection", () =
status: heartbeatRuns.status,
})
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, blockedWake!.id))
.where(eq(heartbeatRuns.id, promotedWake!.id))
.then((rows) => rows[0] ?? null);
const blockedWakeRequestCount = await db
.select({ count: sql<number>`count(*)::int` })

View File

@@ -16,6 +16,7 @@ import {
heartbeatRuns,
issueComments,
issueDocuments,
issueRelations,
issues,
} from "@paperclipai/db";
import {
@@ -231,6 +232,7 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
await db.delete(issueDocuments);
await db.delete(documentRevisions);
await db.delete(documents);
await db.delete(issueRelations);
await db.delete(issues);
await db.delete(heartbeatRunEvents);
await db.delete(heartbeatRuns);
@@ -441,6 +443,87 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
return { companyId, agentId, runId, wakeupRequestId, issueId };
}
async function seedQueuedIssueRunFixture() {
const companyId = randomUUID();
const agentId = randomUUID();
const runId = randomUUID();
const wakeupRequestId = randomUUID();
const issueId = randomUUID();
const now = new Date("2026-03-19T00:00:00.000Z");
const issuePrefix = `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`;
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values({
id: agentId,
companyId,
name: "CodexCoder",
role: "engineer",
status: "idle",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {
heartbeat: {
wakeOnDemand: true,
maxConcurrentRuns: 1,
},
},
permissions: {},
});
await db.insert(agentWakeupRequests).values({
id: wakeupRequestId,
companyId,
agentId,
source: "assignment",
triggerDetail: "system",
reason: "issue_assigned",
payload: { issueId },
status: "queued",
runId,
requestedAt: now,
updatedAt: now,
});
await db.insert(heartbeatRuns).values({
id: runId,
companyId,
agentId,
invocationSource: "assignment",
triggerDetail: "system",
status: "queued",
wakeupRequestId,
contextSnapshot: {
issueId,
taskId: issueId,
wakeReason: "issue_assigned",
},
updatedAt: now,
createdAt: now,
});
await db.insert(issues).values({
id: issueId,
companyId,
title: "Retry transient Codex failure without blocking",
status: "in_progress",
priority: "medium",
assigneeAgentId: agentId,
checkoutRunId: runId,
executionRunId: runId,
issueNumber: 1,
identifier: `${issuePrefix}-1`,
startedAt: now,
});
return { companyId, agentId, runId, wakeupRequestId, issueId };
}
it("keeps a local run active when the recorded pid is still alive", async () => {
const child = spawnAliveProcess();
childProcesses.add(child);
@@ -547,8 +630,11 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
expect(issue?.executionRunId).toBe(retryRun?.id ?? null);
});
it("does not queue a second retry after the first process-loss retry was already used", async () => {
it("blocks the issue when process-loss retry is exhausted and the immediate continuation recovery also fails", async () => {
mockAdapterExecute.mockRejectedValueOnce(new Error("continuation recovery failed"));
const { agentId, runId, issueId } = await seedRunFixture({
agentStatus: "idle",
processPid: 999_999_999,
processLossRetryCount: 1,
});
@@ -562,16 +648,74 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
.select()
.from(heartbeatRuns)
.where(eq(heartbeatRuns.agentId, agentId));
expect(runs).toHaveLength(1);
expect(runs[0]?.status).toBe("failed");
expect(runs).toHaveLength(2);
expect(runs.find((row) => row.id === runId)?.status).toBe("failed");
const continuationRun = runs.find((row) => row.id !== runId);
expect(continuationRun?.contextSnapshot as Record<string, unknown> | undefined).toMatchObject({
retryReason: "issue_continuation_needed",
retryOfRunId: runId,
});
const blockedIssue = await waitForValue(async () =>
db.select().from(issues).where(eq(issues.id, issueId)).then((rows) => {
const issue = rows[0] ?? null;
return issue?.status === "blocked" ? issue : null;
})
);
expect(blockedIssue?.status).toBe("blocked");
expect(blockedIssue?.executionRunId).toBeNull();
expect(blockedIssue?.checkoutRunId).toBe(continuationRun?.id ?? null);
const comments = await db.select().from(issueComments).where(eq(issueComments.issueId, issueId));
expect(comments).toHaveLength(1);
expect(comments[0]?.body).toContain("retried continuation");
});
it("schedules a bounded retry for codex transient upstream failures instead of blocking the issue immediately", async () => {
mockAdapterExecute.mockResolvedValueOnce({
exitCode: 1,
signal: null,
timedOut: false,
errorCode: "codex_transient_upstream",
errorMessage:
"Error running remote compact task: We're currently experiencing high demand, which may cause temporary errors.",
provider: "openai",
model: "gpt-5.4",
});
const { agentId, runId, issueId } = await seedQueuedIssueRunFixture();
const heartbeat = heartbeatService(db);
await heartbeat.resumeQueuedRuns();
await waitForRunToSettle(heartbeat, runId);
const runs = await waitForValue(async () => {
const rows = await db
.select()
.from(heartbeatRuns)
.where(eq(heartbeatRuns.agentId, agentId));
return rows.length >= 2 ? rows : null;
});
expect(runs).toHaveLength(2);
const failedRun = runs?.find((row) => row.id === runId);
const retryRun = runs?.find((row) => row.id !== runId);
expect(failedRun?.status).toBe("failed");
expect(failedRun?.errorCode).toBe("codex_transient_upstream");
expect(retryRun?.status).toBe("scheduled_retry");
expect(retryRun?.scheduledRetryReason).toBe("transient_failure");
expect((retryRun?.contextSnapshot as Record<string, unknown> | null)?.codexTransientFallbackMode).toBe("same_session");
const issue = await db
.select()
.from(issues)
.where(eq(issues.id, issueId))
.then((rows) => rows[0] ?? null);
expect(issue?.executionRunId).toBeNull();
expect(issue?.checkoutRunId).toBe(runId);
expect(issue?.status).toBe("in_progress");
expect(issue?.executionRunId).toBe(retryRun?.id ?? null);
const comments = await db.select().from(issueComments).where(eq(issueComments.issueId, issueId));
expect(comments).toHaveLength(0);
});
it("clears the detached warning when the run reports activity again", async () => {
@@ -675,6 +819,107 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
expect(comments[0]?.body).toContain("Latest retry failure: `process_lost` - run failed before issue advanced.");
});
it("assigns open unassigned blockers back to their creator agent", async () => {
const companyId = randomUUID();
const creatorAgentId = randomUUID();
const blockedAssigneeAgentId = randomUUID();
const blockerIssueId = randomUUID();
const blockedIssueId = randomUUID();
const issuePrefix = `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`;
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values([
{
id: creatorAgentId,
companyId,
name: "SecurityEngineer",
role: "engineer",
status: "idle",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {},
permissions: {},
},
{
id: blockedAssigneeAgentId,
companyId,
name: "CodexCoder",
role: "engineer",
status: "idle",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {},
permissions: {},
},
]);
await db.insert(issues).values([
{
id: blockerIssueId,
companyId,
title: "Fix blocker",
status: "todo",
priority: "high",
createdByAgentId: creatorAgentId,
issueNumber: 1,
identifier: `${issuePrefix}-1`,
},
{
id: blockedIssueId,
companyId,
title: "Blocked work",
status: "blocked",
priority: "high",
assigneeAgentId: blockedAssigneeAgentId,
issueNumber: 2,
identifier: `${issuePrefix}-2`,
},
]);
await db.insert(issueRelations).values({
companyId,
issueId: blockerIssueId,
relatedIssueId: blockedIssueId,
type: "blocks",
createdByAgentId: creatorAgentId,
});
const heartbeat = heartbeatService(db);
const result = await heartbeat.reconcileStrandedAssignedIssues();
expect(result.orphanBlockersAssigned).toBe(1);
expect(result.issueIds).toContain(blockerIssueId);
const blocker = await db
.select()
.from(issues)
.where(eq(issues.id, blockerIssueId))
.then((rows) => rows[0] ?? null);
expect(blocker?.assigneeAgentId).toBe(creatorAgentId);
const comments = await db.select().from(issueComments).where(eq(issueComments.issueId, blockerIssueId));
expect(comments[0]?.body).toContain("Assigned Orphan Blocker");
expect(comments[0]?.body).toContain(`[${issuePrefix}-2](/${issuePrefix}/issues/${issuePrefix}-2)`);
const wakeups = await db.select().from(agentWakeupRequests).where(eq(agentWakeupRequests.agentId, creatorAgentId));
expect(wakeups).toEqual([
expect.objectContaining({
reason: "issue_assigned",
payload: expect.objectContaining({
issueId: blockerIssueId,
mutation: "unassigned_blocker_recovery",
}),
}),
]);
const runId = wakeups[0]?.runId;
if (runId) {
await waitForRunToSettle(heartbeat, runId);
}
});
it("re-enqueues continuation for stranded in-progress work with no active run", async () => {
const { agentId, issueId, runId } = await seedStrandedIssueFixture({
status: "in_progress",
@@ -851,7 +1096,6 @@ describeEmbeddedPostgres("heartbeat orphaned process recovery", () => {
const wakes = await db.select().from(agentWakeupRequests).where(eq(agentWakeupRequests.agentId, agentId));
expect(wakes.some((row) => row.reason === "run_liveness_continuation")).toBe(false);
});
it("blocks stranded in-progress work after the continuation retry was already used", async () => {
const { issueId } = await seedStrandedIssueFixture({
status: "in_progress",

View File

@@ -0,0 +1,338 @@
import { randomUUID } from "node:crypto";
import { eq, sql } from "drizzle-orm";
import { afterAll, afterEach, beforeAll, describe, expect, it } from "vitest";
import {
agents,
agentWakeupRequests,
companies,
createDb,
heartbeatRunEvents,
heartbeatRuns,
} from "@paperclipai/db";
import {
getEmbeddedPostgresTestSupport,
startEmbeddedPostgresTestDatabase,
} from "./helpers/embedded-postgres.js";
import {
BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS,
heartbeatService,
} from "../services/heartbeat.ts";
const embeddedPostgresSupport = await getEmbeddedPostgresTestSupport();
const describeEmbeddedPostgres = embeddedPostgresSupport.supported ? describe : describe.skip;
if (!embeddedPostgresSupport.supported) {
console.warn(
`Skipping embedded Postgres heartbeat retry scheduling tests on this host: ${embeddedPostgresSupport.reason ?? "unsupported environment"}`,
);
}
describeEmbeddedPostgres("heartbeat bounded retry scheduling", () => {
let db!: ReturnType<typeof createDb>;
let heartbeat!: ReturnType<typeof heartbeatService>;
let tempDb: Awaited<ReturnType<typeof startEmbeddedPostgresTestDatabase>> | null = null;
beforeAll(async () => {
tempDb = await startEmbeddedPostgresTestDatabase("paperclip-heartbeat-retry-scheduling-");
db = createDb(tempDb.connectionString);
heartbeat = heartbeatService(db);
}, 20_000);
afterEach(async () => {
await db.delete(heartbeatRunEvents);
await db.delete(heartbeatRuns);
await db.delete(agentWakeupRequests);
await db.delete(agents);
await db.delete(companies);
});
afterAll(async () => {
await tempDb?.cleanup();
});
async function seedRetryFixture(input: {
runId: string;
companyId: string;
agentId: string;
now: Date;
errorCode: string;
scheduledRetryAttempt?: number;
}) {
await db.insert(companies).values({
id: input.companyId,
name: "Paperclip",
issuePrefix: `T${input.companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values({
id: input.agentId,
companyId: input.companyId,
name: "CodexCoder",
role: "engineer",
status: "active",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {
heartbeat: {
wakeOnDemand: true,
maxConcurrentRuns: 1,
},
},
permissions: {},
});
await db.insert(heartbeatRuns).values({
id: input.runId,
companyId: input.companyId,
agentId: input.agentId,
invocationSource: "assignment",
status: "failed",
error: "upstream overload",
errorCode: input.errorCode,
finishedAt: input.now,
scheduledRetryAttempt: input.scheduledRetryAttempt ?? 0,
scheduledRetryReason: input.scheduledRetryAttempt ? "transient_failure" : null,
contextSnapshot: {
issueId: randomUUID(),
wakeReason: "issue_assigned",
},
updatedAt: input.now,
createdAt: input.now,
});
}
it("schedules a retry with durable metadata and only promotes it when due", async () => {
const companyId = randomUUID();
const agentId = randomUUID();
const sourceRunId = randomUUID();
const now = new Date("2026-04-20T12:00:00.000Z");
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix: `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values({
id: agentId,
companyId,
name: "CodexCoder",
role: "engineer",
status: "active",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {
heartbeat: {
wakeOnDemand: true,
maxConcurrentRuns: 1,
},
},
permissions: {},
});
await db.insert(heartbeatRuns).values({
id: sourceRunId,
companyId,
agentId,
invocationSource: "assignment",
status: "failed",
error: "upstream overload",
errorCode: "adapter_failed",
finishedAt: now,
contextSnapshot: {
issueId: randomUUID(),
wakeReason: "issue_assigned",
},
updatedAt: now,
createdAt: now,
});
const scheduled = await heartbeat.scheduleBoundedRetry(sourceRunId, {
now,
random: () => 0.5,
});
expect(scheduled.outcome).toBe("scheduled");
if (scheduled.outcome !== "scheduled") return;
const expectedDueAt = new Date(now.getTime() + BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS[0]);
expect(scheduled.attempt).toBe(1);
expect(scheduled.dueAt.toISOString()).toBe(expectedDueAt.toISOString());
const retryRun = await db
.select()
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, scheduled.run.id))
.then((rows) => rows[0] ?? null);
expect(retryRun).toMatchObject({
status: "scheduled_retry",
retryOfRunId: sourceRunId,
scheduledRetryAttempt: 1,
scheduledRetryReason: "transient_failure",
});
expect(retryRun?.scheduledRetryAt?.toISOString()).toBe(expectedDueAt.toISOString());
const earlyPromotion = await heartbeat.promoteDueScheduledRetries(new Date("2026-04-20T12:01:59.000Z"));
expect(earlyPromotion).toEqual({ promoted: 0, runIds: [] });
const stillScheduled = await db
.select({ status: heartbeatRuns.status })
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, scheduled.run.id))
.then((rows) => rows[0] ?? null);
expect(stillScheduled?.status).toBe("scheduled_retry");
const duePromotion = await heartbeat.promoteDueScheduledRetries(expectedDueAt);
expect(duePromotion).toEqual({ promoted: 1, runIds: [scheduled.run.id] });
const promotedRun = await db
.select({ status: heartbeatRuns.status })
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, scheduled.run.id))
.then((rows) => rows[0] ?? null);
expect(promotedRun?.status).toBe("queued");
});
it("exhausts bounded retries after the hard cap", async () => {
const companyId = randomUUID();
const agentId = randomUUID();
const cappedRunId = randomUUID();
const now = new Date("2026-04-20T18:00:00.000Z");
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix: `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(agents).values({
id: agentId,
companyId,
name: "CodexCoder",
role: "engineer",
status: "active",
adapterType: "codex_local",
adapterConfig: {},
runtimeConfig: {
heartbeat: {
wakeOnDemand: true,
maxConcurrentRuns: 1,
},
},
permissions: {},
});
await db.insert(heartbeatRuns).values({
id: cappedRunId,
companyId,
agentId,
invocationSource: "automation",
status: "failed",
error: "still transient",
errorCode: "adapter_failed",
finishedAt: now,
scheduledRetryAttempt: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length,
scheduledRetryReason: "transient_failure",
contextSnapshot: {
wakeReason: "transient_failure_retry",
},
updatedAt: now,
createdAt: now,
});
const exhausted = await heartbeat.scheduleBoundedRetry(cappedRunId, {
now,
random: () => 0.5,
});
expect(exhausted).toEqual({
outcome: "retry_exhausted",
attempt: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length + 1,
maxAttempts: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length,
});
const runCount = await db
.select({ count: sql<number>`count(*)::int` })
.from(heartbeatRuns)
.where(eq(heartbeatRuns.companyId, companyId))
.then((rows) => rows[0]?.count ?? 0);
expect(runCount).toBe(1);
const exhaustionEvent = await db
.select({
message: heartbeatRunEvents.message,
payload: heartbeatRunEvents.payload,
})
.from(heartbeatRunEvents)
.where(eq(heartbeatRunEvents.runId, cappedRunId))
.orderBy(sql`${heartbeatRunEvents.id} desc`)
.then((rows) => rows[0] ?? null);
expect(exhaustionEvent?.message).toContain("Bounded retry exhausted");
expect(exhaustionEvent?.payload).toMatchObject({
retryReason: "transient_failure",
scheduledRetryAttempt: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length,
maxAttempts: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length,
});
});
it("advances codex transient fallback stages across bounded retry attempts", async () => {
const fallbackModes = [
"same_session",
"safer_invocation",
"fresh_session",
"fresh_session_safer_invocation",
] as const;
for (const [index, expectedMode] of fallbackModes.entries()) {
const companyId = randomUUID();
const agentId = randomUUID();
const runId = randomUUID();
const now = new Date(`2026-04-20T1${index}:00:00.000Z`);
await seedRetryFixture({
runId,
companyId,
agentId,
now,
errorCode: "codex_transient_upstream",
scheduledRetryAttempt: index,
});
const scheduled = await heartbeat.scheduleBoundedRetry(runId, {
now,
random: () => 0.5,
});
expect(scheduled.outcome).toBe("scheduled");
if (scheduled.outcome !== "scheduled") continue;
const retryRun = await db
.select({
contextSnapshot: heartbeatRuns.contextSnapshot,
wakeupRequestId: heartbeatRuns.wakeupRequestId,
})
.from(heartbeatRuns)
.where(eq(heartbeatRuns.id, scheduled.run.id))
.then((rows) => rows[0] ?? null);
expect((retryRun?.contextSnapshot as Record<string, unknown> | null)?.codexTransientFallbackMode).toBe(expectedMode);
const wakeupRequest = await db
.select({ payload: agentWakeupRequests.payload })
.from(agentWakeupRequests)
.where(eq(agentWakeupRequests.id, retryRun?.wakeupRequestId ?? ""))
.then((rows) => rows[0] ?? null);
expect((wakeupRequest?.payload as Record<string, unknown> | null)?.codexTransientFallbackMode).toBe(expectedMode);
await db.delete(heartbeatRunEvents);
await db.delete(heartbeatRuns);
await db.delete(agentWakeupRequests);
await db.delete(agents);
await db.delete(companies);
}
});
});

View File

@@ -7,6 +7,7 @@ const mockIssueService = vi.hoisted(() => ({
assertCheckoutOwner: vi.fn(),
update: vi.fn(),
addComment: vi.fn(),
getDependencyReadiness: vi.fn(),
findMentionedAgents: vi.fn(),
listWakeableBlockedDependents: vi.fn(),
getWakeableParentAfterChildCompletion: vi.fn(),
@@ -199,6 +200,7 @@ describe("issue comment reopen routes", () => {
mockIssueService.assertCheckoutOwner.mockReset();
mockIssueService.update.mockReset();
mockIssueService.addComment.mockReset();
mockIssueService.getDependencyReadiness.mockReset();
mockIssueService.findMentionedAgents.mockReset();
mockIssueService.listWakeableBlockedDependents.mockReset();
mockIssueService.getWakeableParentAfterChildCompletion.mockReset();
@@ -255,6 +257,14 @@ describe("issue comment reopen routes", () => {
authorUserId: "local-board",
});
mockIssueService.findMentionedAgents.mockResolvedValue([]);
mockIssueService.getDependencyReadiness.mockResolvedValue({
issueId: "11111111-1111-4111-8111-111111111111",
blockerIssueIds: [],
unresolvedBlockerIssueIds: [],
unresolvedBlockerCount: 0,
allBlockersDone: true,
isDependencyReady: true,
});
mockIssueService.listWakeableBlockedDependents.mockResolvedValue([]);
mockIssueService.getWakeableParentAfterChildCompletion.mockResolvedValue(null);
mockIssueService.assertCheckoutOwner.mockResolvedValue({ adoptedFromRunId: null });
@@ -442,6 +452,75 @@ describe("issue comment reopen routes", () => {
);
});
it("moves assigned blocked issues back to todo via POST comments", async () => {
mockIssueService.getById.mockResolvedValue(makeIssue("blocked"));
mockIssueService.update.mockImplementation(async (_id: string, patch: Record<string, unknown>) => ({
...makeIssue("blocked"),
...patch,
}));
const res = await request(await installActor(createApp()))
.post("/api/issues/11111111-1111-4111-8111-111111111111/comments")
.send({ body: "please continue" });
expect(res.status).toBe(201);
expect(mockIssueService.update).toHaveBeenCalledWith(
"11111111-1111-4111-8111-111111111111",
{ status: "todo" },
);
expect(mockHeartbeatService.wakeup).toHaveBeenCalledWith(
"22222222-2222-4222-8222-222222222222",
expect.objectContaining({
reason: "issue_reopened_via_comment",
payload: expect.objectContaining({
commentId: "comment-1",
reopenedFrom: "blocked",
mutation: "comment",
}),
contextSnapshot: expect.objectContaining({
issueId: "11111111-1111-4111-8111-111111111111",
wakeCommentId: "comment-1",
wakeReason: "issue_reopened_via_comment",
reopenedFrom: "blocked",
}),
}),
);
});
it("does not move dependency-blocked issues to todo via POST comments", async () => {
mockIssueService.getById.mockResolvedValue(makeIssue("blocked"));
mockIssueService.getDependencyReadiness.mockResolvedValue({
issueId: "11111111-1111-4111-8111-111111111111",
blockerIssueIds: ["33333333-3333-4333-8333-333333333333"],
unresolvedBlockerIssueIds: ["33333333-3333-4333-8333-333333333333"],
unresolvedBlockerCount: 1,
allBlockersDone: false,
isDependencyReady: false,
});
const res = await request(await installActor(createApp()))
.post("/api/issues/11111111-1111-4111-8111-111111111111/comments")
.send({ body: "what is happening?" });
expect(res.status).toBe(201);
expect(mockIssueService.update).not.toHaveBeenCalled();
expect(mockHeartbeatService.wakeup).toHaveBeenCalledWith(
"22222222-2222-4222-8222-222222222222",
expect.objectContaining({
reason: "issue_commented",
payload: expect.objectContaining({
commentId: "comment-1",
mutation: "comment",
}),
contextSnapshot: expect.objectContaining({
issueId: "11111111-1111-4111-8111-111111111111",
wakeCommentId: "comment-1",
wakeReason: "issue_commented",
}),
}),
);
});
it("does not implicitly reopen closed issues via POST comments when no agent is assigned", async () => {
mockIssueService.getById.mockResolvedValue({
...makeIssue("done"),
@@ -457,6 +536,82 @@ describe("issue comment reopen routes", () => {
expect(mockIssueService.update).not.toHaveBeenCalled();
});
it("moves assigned blocked issues back to todo via the PATCH comment path", async () => {
mockIssueService.getById.mockResolvedValue(makeIssue("blocked"));
mockIssueService.update.mockImplementation(async (_id: string, patch: Record<string, unknown>) => ({
...makeIssue("blocked"),
...patch,
}));
const res = await request(await installActor(createApp()))
.patch("/api/issues/11111111-1111-4111-8111-111111111111")
.send({ comment: "please continue" });
expect(res.status).toBe(200);
expect(mockIssueService.update).toHaveBeenCalledWith(
"11111111-1111-4111-8111-111111111111",
expect.objectContaining({
status: "todo",
actorAgentId: null,
actorUserId: "local-board",
}),
);
expect(mockHeartbeatService.wakeup).toHaveBeenCalledWith(
"22222222-2222-4222-8222-222222222222",
expect.objectContaining({
reason: "issue_reopened_via_comment",
payload: expect.objectContaining({
commentId: "comment-1",
reopenedFrom: "blocked",
mutation: "comment",
}),
}),
);
});
it("does not move dependency-blocked issues to todo via the PATCH comment path", async () => {
mockIssueService.getById.mockResolvedValue(makeIssue("blocked"));
mockIssueService.getDependencyReadiness.mockResolvedValue({
issueId: "11111111-1111-4111-8111-111111111111",
blockerIssueIds: ["33333333-3333-4333-8333-333333333333"],
unresolvedBlockerIssueIds: ["33333333-3333-4333-8333-333333333333"],
unresolvedBlockerCount: 1,
allBlockersDone: false,
isDependencyReady: false,
});
mockIssueService.update.mockImplementation(async (_id: string, patch: Record<string, unknown>) => ({
...makeIssue("blocked"),
...patch,
}));
const res = await request(await installActor(createApp()))
.patch("/api/issues/11111111-1111-4111-8111-111111111111")
.send({ comment: "what is happening?" });
expect(res.status).toBe(200);
expect(mockIssueService.update).toHaveBeenCalledWith(
"11111111-1111-4111-8111-111111111111",
expect.objectContaining({
actorAgentId: null,
actorUserId: "local-board",
}),
);
expect(mockIssueService.update).not.toHaveBeenCalledWith(
"11111111-1111-4111-8111-111111111111",
expect.objectContaining({ status: "todo" }),
);
expect(mockHeartbeatService.wakeup).toHaveBeenCalledWith(
"22222222-2222-4222-8222-222222222222",
expect.objectContaining({
reason: "issue_commented",
payload: expect.objectContaining({
commentId: "comment-1",
mutation: "comment",
}),
}),
);
});
it("wakes the assignee when an assigned blocked issue moves back to todo", async () => {
const issue = makeIssue("blocked");
mockIssueService.getById.mockResolvedValue(issue);

View File

@@ -27,6 +27,10 @@ const mockDocumentsService = vi.hoisted(() => ({
getIssueDocumentByKey: vi.fn(),
}));
const mockExecutionWorkspaceService = vi.hoisted(() => ({
getById: vi.fn(),
}));
vi.mock("../services/index.js", () => ({
accessService: () => ({
canUser: vi.fn(),
@@ -36,9 +40,7 @@ vi.mock("../services/index.js", () => ({
getById: vi.fn(),
}),
documentService: () => mockDocumentsService,
executionWorkspaceService: () => ({
getById: vi.fn(),
}),
executionWorkspaceService: () => mockExecutionWorkspaceService,
feedbackService: () => ({
listIssueVotesForUser: vi.fn(async () => []),
saveIssueVote: vi.fn(async () => ({ vote: null, consentEnabledNow: false, sharingEnabled: false })),
@@ -157,6 +159,7 @@ describe("issue goal context routes", () => {
mockIssueService.listAttachments.mockResolvedValue([]);
mockDocumentsService.getIssueDocumentPayload.mockResolvedValue({});
mockDocumentsService.getIssueDocumentByKey.mockResolvedValue(null);
mockExecutionWorkspaceService.getById.mockResolvedValue(null);
mockProjectService.getById.mockResolvedValue({
id: legacyProjectLinkedIssue.projectId,
companyId: "company-1",
@@ -285,4 +288,44 @@ describe("issue goal context routes", () => {
}),
]);
});
it("surfaces the current execution workspace from GET /issues/:id/heartbeat-context", async () => {
mockIssueService.getById.mockResolvedValue({
...legacyProjectLinkedIssue,
executionWorkspaceId: "55555555-5555-4555-8555-555555555555",
});
mockExecutionWorkspaceService.getById.mockResolvedValue({
id: "55555555-5555-4555-8555-555555555555",
name: "PAP-581 workspace",
mode: "isolated_workspace",
status: "active",
cwd: "/tmp/pap-581",
runtimeServices: [
{
id: "service-1",
serviceName: "web",
status: "running",
url: "http://127.0.0.1:5173",
healthStatus: "healthy",
},
],
});
const res = await request(await createApp()).get(
"/api/issues/11111111-1111-4111-8111-111111111111/heartbeat-context",
);
expect(res.status).toBe(200);
expect(mockExecutionWorkspaceService.getById).toHaveBeenCalledWith("55555555-5555-4555-8555-555555555555");
expect(res.body.currentExecutionWorkspace).toEqual(expect.objectContaining({
id: "55555555-5555-4555-8555-555555555555",
mode: "isolated_workspace",
runtimeServices: [
expect.objectContaining({
serviceName: "web",
url: "http://127.0.0.1:5173",
}),
],
}));
});
});

View File

@@ -469,6 +469,88 @@ describeEmbeddedPostgres("issueService.list participantAgentId", () => {
expect(result.map((issue) => issue.id)).toEqual([linkedIssueId]);
});
it("filters issues by generic workspace id across execution and project workspace links", async () => {
const companyId = randomUUID();
const projectId = randomUUID();
const projectWorkspaceId = randomUUID();
const executionWorkspaceId = randomUUID();
const executionLinkedIssueId = randomUUID();
const projectLinkedIssueId = randomUUID();
const otherIssueId = randomUUID();
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix: `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(projects).values({
id: projectId,
companyId,
name: "Workspace project",
status: "in_progress",
});
await db.insert(projectWorkspaces).values({
id: projectWorkspaceId,
companyId,
projectId,
name: "Feature workspace",
sourceType: "local_path",
visibility: "default",
isPrimary: false,
});
await db.insert(executionWorkspaces).values({
id: executionWorkspaceId,
companyId,
projectId,
projectWorkspaceId,
mode: "isolated_workspace",
strategyType: "git_worktree",
name: "Execution workspace",
status: "active",
providerType: "git_worktree",
});
await db.insert(issues).values([
{
id: executionLinkedIssueId,
companyId,
projectId,
projectWorkspaceId,
title: "Execution linked issue",
status: "done",
priority: "medium",
executionWorkspaceId,
},
{
id: projectLinkedIssueId,
companyId,
projectId,
projectWorkspaceId,
title: "Project linked issue",
status: "todo",
priority: "medium",
},
{
id: otherIssueId,
companyId,
projectId,
title: "Other issue",
status: "todo",
priority: "medium",
},
]);
const executionResult = await svc.list(companyId, { workspaceId: executionWorkspaceId });
const projectResult = await svc.list(companyId, { workspaceId: projectWorkspaceId });
expect(executionResult.map((issue) => issue.id)).toEqual([executionLinkedIssueId]);
expect(projectResult.map((issue) => issue.id).sort()).toEqual([executionLinkedIssueId, projectLinkedIssueId].sort());
});
it("hides archived inbox issues until new external activity arrives", async () => {
const companyId = randomUUID();
const userId = "user-1";
@@ -740,6 +822,33 @@ describeEmbeddedPostgres("issueService.list participantAgentId", () => {
expect(result?.executionState).toBeNull();
expect(result?.executionWorkspaceSettings).toBeNull();
});
it("does not let description preview truncation split multibyte characters", async () => {
const companyId = randomUUID();
const issueId = randomUUID();
const description = `${"x".repeat(1199)}— still valid after truncation`;
await db.insert(companies).values({
id: companyId,
name: "Paperclip",
issuePrefix: `T${companyId.replace(/-/g, "").slice(0, 6).toUpperCase()}`,
requireBoardApprovalForNewAgents: false,
});
await db.insert(issues).values({
id: issueId,
companyId,
title: "Multibyte boundary issue",
description,
status: "todo",
priority: "medium",
});
const [result] = await svc.list(companyId);
expect(result?.description).toHaveLength(1200);
expect(result?.description?.endsWith("—")).toBe(true);
});
});
describeEmbeddedPostgres("issueService.create workspace inheritance", () => {

View File

@@ -349,6 +349,60 @@ describeEmbeddedPostgres("routine service live-execution coalescing", () => {
expect(routineIssues[0]?.id).toBe(previousIssue.id);
});
it("does not coalesce live routine runs with different resolved variables", async () => {
const { companyId, agentId, projectId, svc } = await seedFixture();
const variableRoutine = await svc.create(
companyId,
{
projectId,
goalId: null,
parentIssueId: null,
title: "pre-pr for {{branch}}",
description: "Create a pre-PR from {{branch}}",
assigneeAgentId: agentId,
priority: "medium",
status: "active",
concurrencyPolicy: "coalesce_if_active",
catchUpPolicy: "skip_missed",
variables: [
{ name: "branch", label: null, type: "text", defaultValue: null, required: true, options: [] },
],
},
{},
);
const first = await svc.runRoutine(variableRoutine.id, {
source: "manual",
variables: { branch: "feature/a" },
});
const second = await svc.runRoutine(variableRoutine.id, {
source: "manual",
variables: { branch: "feature/b" },
});
expect(first.status).toBe("issue_created");
expect(second.status).toBe("issue_created");
expect(first.linkedIssueId).toBeTruthy();
expect(second.linkedIssueId).toBeTruthy();
expect(first.linkedIssueId).not.toBe(second.linkedIssueId);
const routineIssues = await db
.select({
id: issues.id,
title: issues.title,
originFingerprint: issues.originFingerprint,
})
.from(issues)
.where(eq(issues.originId, variableRoutine.id));
expect(routineIssues).toHaveLength(2);
expect(routineIssues.map((issue) => issue.title).sort()).toEqual([
"pre-pr for feature/a",
"pre-pr for feature/b",
]);
expect(new Set(routineIssues.map((issue) => issue.originFingerprint)).size).toBe(2);
});
it("interpolates routine variables into the execution issue and stores resolved values", async () => {
const { companyId, agentId, projectId, svc } = await seedFixture();
const variableRoutine = await svc.create(

View File

@@ -118,6 +118,7 @@ vi.mock("../services/index.js", () => ({
feedbackService: feedbackServiceFactoryMock,
heartbeatService: vi.fn(() => ({
reapOrphanedRuns: vi.fn(async () => undefined),
promoteDueScheduledRetries: vi.fn(async () => ({ promoted: 0, runIds: [] })),
resumeQueuedRuns: vi.fn(async () => undefined),
reconcileStrandedAssignedIssues: vi.fn(async () => ({
dispatchRequeued: 0,

View File

@@ -663,15 +663,20 @@ export async function startServer(): Promise<StartedServer> {
// then resume any persisted queued runs that were waiting on the previous process.
void heartbeat
.reapOrphanedRuns()
.then(() => heartbeat.resumeQueuedRuns())
.then(async () => {
.then(() => heartbeat.promoteDueScheduledRetries())
.then(async (promotion) => {
await heartbeat.resumeQueuedRuns();
const reconciled = await heartbeat.reconcileStrandedAssignedIssues();
if (
promotion.promoted > 0 ||
reconciled.dispatchRequeued > 0 ||
reconciled.continuationRequeued > 0 ||
reconciled.escalated > 0
) {
logger.warn({ ...reconciled }, "startup stranded-issue reconciliation changed assigned issue state");
logger.warn(
{ promotedScheduledRetries: promotion.promoted, promotedScheduledRetryRunIds: promotion.runIds, ...reconciled },
"startup heartbeat recovery changed assigned issue state",
);
}
})
.then(async () => {
@@ -710,15 +715,20 @@ export async function startServer(): Promise<StartedServer> {
// persisted queued work is still being driven forward.
void heartbeat
.reapOrphanedRuns({ staleThresholdMs: 5 * 60 * 1000 })
.then(() => heartbeat.resumeQueuedRuns())
.then(async () => {
.then(() => heartbeat.promoteDueScheduledRetries())
.then(async (promotion) => {
await heartbeat.resumeQueuedRuns();
const reconciled = await heartbeat.reconcileStrandedAssignedIssues();
if (
promotion.promoted > 0 ||
reconciled.dispatchRequeued > 0 ||
reconciled.continuationRequeued > 0 ||
reconciled.escalated > 0
) {
logger.warn({ ...reconciled }, "periodic stranded-issue reconciliation changed assigned issue state");
logger.warn(
{ promotedScheduledRetries: promotion.promoted, promotedScheduledRetryRunIds: promotion.runIds, ...reconciled },
"periodic heartbeat recovery changed assigned issue state",
);
}
})
.then(async () => {

View File

@@ -2,7 +2,7 @@ import { Router } from "express";
import { z } from "zod";
import type { Db } from "@paperclipai/db";
import { validate } from "../middleware/validate.js";
import { activityService } from "../services/activity.js";
import { activityService, normalizeActivityLimit } from "../services/activity.js";
import { assertAuthenticated, assertBoard, assertCompanyAccess } from "./authz.js";
import { heartbeatService, issueService } from "../services/index.js";
import { sanitizeRecord } from "../redaction.js";
@@ -39,6 +39,7 @@ export function activityRoutes(db: Db) {
agentId: req.query.agentId as string | undefined,
entityType: req.query.entityType as string | undefined,
entityId: req.query.entityId as string | undefined,
limit: normalizeActivityLimit(Number(req.query.limit)),
};
const result = await svc.list(filters);
res.json(result);

View File

@@ -2155,7 +2155,6 @@ export function agentRoutes(db: Db) {
res.status(409).json({ error: "Only pending approval agents can be approved" });
return;
}
const approval = await svc.activatePendingApproval(id);
if (!approval) {
res.status(404).json({ error: "Agent not found" });
@@ -2515,7 +2514,13 @@ export function agentRoutes(db: Db) {
return;
}
assertCompanyAccess(req, run.companyId);
res.json(redactCurrentUserValue(run, await getCurrentUserRedactionOptions()));
const retryExhaustedReason = await heartbeat.getRetryExhaustedReason(runId);
res.json(
redactCurrentUserValue(
{ ...run, retryExhaustedReason },
await getCurrentUserRedactionOptions(),
),
);
});
router.post("/heartbeat-runs/:runId/cancel", async (req, res) => {

View File

@@ -173,13 +173,13 @@ function isClosedIssueStatus(status: string | null | undefined): status is "done
return status === "done" || status === "cancelled";
}
function shouldImplicitlyReopenCommentForAgent(input: {
function shouldImplicitlyMoveCommentedIssueToTodoForAgent(input: {
issueStatus: string | null | undefined;
assigneeAgentId: string | null | undefined;
actorType: "agent" | "user";
actorId: string;
}) {
if (!isClosedIssueStatus(input.issueStatus)) return false;
if (!isClosedIssueStatus(input.issueStatus) && input.issueStatus !== "blocked") return false;
if (typeof input.assigneeAgentId !== "string" || input.assigneeAgentId.length === 0) return false;
if (input.actorType === "agent" && input.actorId === input.assigneeAgentId) return false;
return true;
@@ -721,6 +721,7 @@ export function issueRoutes(
inboxArchivedByUserId,
unreadForUserId,
projectId: req.query.projectId as string | undefined,
workspaceId: req.query.workspaceId as string | undefined,
executionWorkspaceId: req.query.executionWorkspaceId as string | undefined,
parentId: req.query.parentId as string | undefined,
labelId: req.query.labelId as string | undefined,
@@ -804,16 +805,29 @@ export function issueRoutes(
? req.query.wakeCommentId.trim()
: null;
const [{ project, goal }, ancestors, commentCursor, wakeComment, relations, attachments, continuationSummary] =
const currentExecutionWorkspacePromise = issue.executionWorkspaceId
? executionWorkspacesSvc.getById(issue.executionWorkspaceId)
: Promise.resolve(null);
const [
{ project, goal },
ancestors,
commentCursor,
wakeComment,
relations,
attachments,
continuationSummary,
currentExecutionWorkspace,
] =
await Promise.all([
resolveIssueProjectAndGoal(issue),
svc.getAncestors(issue.id),
svc.getCommentCursor(issue.id),
wakeCommentId ? svc.getComment(wakeCommentId) : null,
svc.getRelationSummaries(issue.id),
svc.listAttachments(issue.id),
documentsSvc.getIssueDocumentByKey(issue.id, ISSUE_CONTINUATION_SUMMARY_DOCUMENT_KEY),
]);
resolveIssueProjectAndGoal(issue),
svc.getAncestors(issue.id),
svc.getCommentCursor(issue.id),
wakeCommentId ? svc.getComment(wakeCommentId) : null,
svc.getRelationSummaries(issue.id),
svc.listAttachments(issue.id),
documentsSvc.getIssueDocumentByKey(issue.id, ISSUE_CONTINUATION_SUMMARY_DOCUMENT_KEY),
currentExecutionWorkspacePromise,
]);
res.json({
issue: {
@@ -879,6 +893,7 @@ export function issueRoutes(
updatedAt: continuationSummary.updatedAt,
}
: null,
currentExecutionWorkspace,
});
});
@@ -1590,6 +1605,7 @@ export function issueRoutes(
const actor = getActorInfo(req);
const isClosed = isClosedIssueStatus(existing.status);
const isBlocked = existing.status === "blocked";
const normalizedAssigneeAgentId = await normalizeIssueAssigneeAgentReference(
existing.companyId,
req.body.assigneeAgentId as string | null | undefined,
@@ -1608,10 +1624,10 @@ export function issueRoutes(
} = req.body;
const requestedAssigneeAgentId =
normalizedAssigneeAgentId === undefined ? existing.assigneeAgentId : normalizedAssigneeAgentId;
const effectiveReopenRequested =
const effectiveMoveToTodoRequested =
reopenRequested ||
(!!commentBody &&
shouldImplicitlyReopenCommentForAgent({
shouldImplicitlyMoveCommentedIssueToTodoForAgent({
issueStatus: existing.status,
assigneeAgentId: requestedAssigneeAgentId,
actorType: actor.actorType,
@@ -1620,6 +1636,10 @@ export function issueRoutes(
const updateReferenceSummaryBefore = titleOrDescriptionChanged
? await issueReferencesSvc.listIssueReferenceSummary(existing.id)
: null;
const hasUnresolvedFirstClassBlockers =
isBlocked && effectiveMoveToTodoRequested
? (await svc.getDependencyReadiness(existing.id)).unresolvedBlockerCount > 0
: false;
let interruptedRunId: string | null = null;
const closedExecutionWorkspace = await getClosedIssueExecutionWorkspace(existing);
const isAgentWorkUpdate = req.actor.type === "agent" && Object.keys(updateFields).length > 0;
@@ -1662,7 +1682,12 @@ export function issueRoutes(
if (hiddenAtRaw !== undefined) {
updateFields.hiddenAt = hiddenAtRaw ? new Date(hiddenAtRaw) : null;
}
if (commentBody && effectiveReopenRequested && isClosed && updateFields.status === undefined) {
if (
commentBody &&
effectiveMoveToTodoRequested &&
(isClosed || (isBlocked && !hasUnresolvedFirstClassBlockers)) &&
updateFields.status === undefined
) {
updateFields.status = "todo";
}
if (req.body.executionPolicy !== undefined) {
@@ -1836,8 +1861,8 @@ export function issueRoutes(
const hasFieldChanges = Object.keys(previous).length > 0;
const reopened =
commentBody &&
effectiveReopenRequested &&
isClosed &&
effectiveMoveToTodoRequested &&
(isClosed || (isBlocked && !hasUnresolvedFirstClassBlockers)) &&
previous.status !== undefined &&
issue.status === "todo";
const reopenFromStatus = reopened ? existing.status : null;
@@ -2025,7 +2050,7 @@ export function issueRoutes(
const statusChangedFromBlockedToTodo =
existing.status === "blocked" &&
issue.status === "todo" &&
req.body.status !== undefined;
(req.body.status !== undefined || reopened);
const previousExecutionState = parseIssueExecutionState(existing.executionState);
const nextExecutionState = parseIssueExecutionState(issue.executionState);
const executionStageWakeup = buildExecutionStageWakeup({
@@ -2596,21 +2621,26 @@ export function issueRoutes(
const reopenRequested = req.body.reopen === true;
const interruptRequested = req.body.interrupt === true;
const isClosed = isClosedIssueStatus(issue.status);
const effectiveReopenRequested =
const isBlocked = issue.status === "blocked";
const effectiveMoveToTodoRequested =
reopenRequested ||
shouldImplicitlyReopenCommentForAgent({
shouldImplicitlyMoveCommentedIssueToTodoForAgent({
issueStatus: issue.status,
assigneeAgentId: issue.assigneeAgentId,
actorType: actor.actorType,
actorId: actor.actorId,
});
const hasUnresolvedFirstClassBlockers =
isBlocked && effectiveMoveToTodoRequested
? (await svc.getDependencyReadiness(issue.id)).unresolvedBlockerCount > 0
: false;
let reopened = false;
let reopenFromStatus: string | null = null;
let interruptedRunId: string | null = null;
let currentIssue = issue;
const commentReferenceSummaryBefore = await issueReferencesSvc.listIssueReferenceSummary(issue.id);
if (effectiveReopenRequested && isClosed) {
if (effectiveMoveToTodoRequested && (isClosed || (isBlocked && !hasUnresolvedFirstClassBlockers))) {
const reopenedIssue = await svc.update(id, { status: "todo" });
if (!reopenedIssue) {
res.status(404).json({ error: "Issue not found" });

View File

@@ -1,4 +1,4 @@
import { and, desc, eq, isNull, or, sql } from "drizzle-orm";
import { and, asc, desc, eq, inArray, isNull, or, sql } from "drizzle-orm";
import type { Db } from "@paperclipai/db";
import {
activityLog,
@@ -21,6 +21,15 @@ export interface ActivityFilters {
agentId?: string;
entityType?: string;
entityId?: string;
limit?: number;
}
const DEFAULT_ACTIVITY_LIMIT = 100;
const MAX_ACTIVITY_LIMIT = 500;
export function normalizeActivityLimit(limit: number | undefined) {
if (!Number.isFinite(limit)) return DEFAULT_ACTIVITY_LIMIT;
return Math.max(1, Math.min(MAX_ACTIVITY_LIMIT, Math.floor(limit ?? DEFAULT_ACTIVITY_LIMIT)));
}
export function activityService(db: Db) {
@@ -316,6 +325,7 @@ export function activityService(db: Db) {
return {
list: (filters: ActivityFilters) => {
const conditions = [eq(activityLog.companyId, filters.companyId)];
const limit = normalizeActivityLimit(filters.limit);
if (filters.agentId) {
conditions.push(eq(activityLog.agentId, filters.agentId));
@@ -347,6 +357,7 @@ export function activityService(db: Db) {
),
)
.orderBy(desc(activityLog.createdAt))
.limit(limit)
.then((rows) => rows.map((r) => r.activityLog));
},
@@ -364,7 +375,7 @@ export function activityService(db: Db) {
runsForIssue: async (companyId: string, issueId: string) => {
scheduleRunLivenessBackfill(companyId, issueId);
return db
const runs = await db
.select({
runId: heartbeatRuns.id,
status: heartbeatRuns.status,
@@ -377,6 +388,10 @@ export function activityService(db: Db) {
usageJson: summarizedUsageJson,
resultJson: summarizedResultJson,
logBytes: heartbeatRuns.logBytes,
retryOfRunId: heartbeatRuns.retryOfRunId,
scheduledRetryAt: heartbeatRuns.scheduledRetryAt,
scheduledRetryAttempt: heartbeatRuns.scheduledRetryAttempt,
scheduledRetryReason: heartbeatRuns.scheduledRetryReason,
livenessState: heartbeatRuns.livenessState,
livenessReason: heartbeatRuns.livenessReason,
continuationAttempt: heartbeatRuns.continuationAttempt,
@@ -408,6 +423,34 @@ export function activityService(db: Db) {
),
)
.orderBy(desc(heartbeatRuns.createdAt));
if (runs.length === 0) return runs;
const exhaustionRows = await db
.select({
runId: heartbeatRunEvents.runId,
message: heartbeatRunEvents.message,
})
.from(heartbeatRunEvents)
.where(
and(
inArray(heartbeatRunEvents.runId, runs.map((run) => run.runId)),
eq(heartbeatRunEvents.eventType, "lifecycle"),
sql`${heartbeatRunEvents.message} like 'Bounded retry exhausted%'`,
),
)
.orderBy(asc(heartbeatRunEvents.runId), desc(heartbeatRunEvents.id));
const retryExhaustedReasonByRunId = new Map<string, string>();
for (const row of exhaustionRows) {
if (!row.message || retryExhaustedReasonByRunId.has(row.runId)) continue;
retryExhaustedReasonByRunId.set(row.runId, row.message);
}
return runs.map((run) => ({
...run,
retryExhaustedReason: retryExhaustedReasonByRunId.get(run.runId) ?? null,
}));
},
issuesForRun: async (runId: string) => {

View File

@@ -10,6 +10,10 @@ function formatUtcDateKey(date: Date): string {
return date.toISOString().slice(0, 10);
}
export function getUtcMonthStart(date: Date): Date {
return new Date(Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), 1));
}
function getRecentUtcDateKeys(now: Date, days: number): string[] {
const todayUtc = Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate());
return Array.from({ length: days }, (_, index) => {
@@ -76,7 +80,7 @@ export function dashboardService(db: Db) {
}
const now = new Date();
const monthStart = new Date(Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), 1));
const monthStart = getUtcMonthStart(now);
const runActivityDays = getRecentUtcDateKeys(now, DASHBOARD_RUN_ACTIVITY_DAYS);
const runActivityStart = new Date(`${runActivityDays[0]}T00:00:00.000Z`);
const [{ monthSpend }] = await db

View File

@@ -3,7 +3,7 @@ import path from "node:path";
import { execFile as execFileCallback } from "node:child_process";
import { promisify } from "node:util";
import { randomUUID } from "node:crypto";
import { and, asc, desc, eq, getTableColumns, gt, inArray, isNull, notInArray, or, sql } from "drizzle-orm";
import { and, asc, desc, eq, getTableColumns, gt, inArray, isNull, lte, notInArray, or, sql } from "drizzle-orm";
import type { Db } from "@paperclipai/db";
import {
AGENT_DEFAULT_MAX_CONCURRENT_RUNS,
@@ -134,8 +134,31 @@ const MAX_INLINE_WAKE_COMMENTS = 8;
const MAX_INLINE_WAKE_COMMENT_BODY_CHARS = 4_000;
const MAX_INLINE_WAKE_COMMENT_BODY_TOTAL_CHARS = 12_000;
const execFile = promisify(execFileCallback);
const ACTIVE_HEARTBEAT_RUN_STATUSES = ["queued", "running"] as const;
const EXECUTION_PATH_HEARTBEAT_RUN_STATUSES = ["queued", "running", "scheduled_retry"] as const;
const CANCELLABLE_HEARTBEAT_RUN_STATUSES = ["queued", "running", "scheduled_retry"] as const;
const UNSUCCESSFUL_HEARTBEAT_RUN_TERMINAL_STATUSES = ["failed", "cancelled", "timed_out"] as const;
export const BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS = [
2 * 60 * 1000,
10 * 60 * 1000,
30 * 60 * 1000,
2 * 60 * 60 * 1000,
] as const;
const BOUNDED_TRANSIENT_HEARTBEAT_RETRY_JITTER_RATIO = 0.25;
const BOUNDED_TRANSIENT_HEARTBEAT_RETRY_REASON = "transient_failure";
const BOUNDED_TRANSIENT_HEARTBEAT_RETRY_WAKE_REASON = "transient_failure_retry";
const BOUNDED_TRANSIENT_HEARTBEAT_RETRY_MAX_ATTEMPTS = BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS.length;
type CodexTransientFallbackMode =
| "same_session"
| "safer_invocation"
| "fresh_session"
| "fresh_session_safer_invocation";
function resolveCodexTransientFallbackMode(attempt: number): CodexTransientFallbackMode {
if (attempt <= 1) return "same_session";
if (attempt === 2) return "safer_invocation";
if (attempt === 3) return "fresh_session";
return "fresh_session_safer_invocation";
}
const RUNNING_ISSUE_WAKE_REASONS_REQUIRING_FOLLOWUP = new Set(["approval_approved"]);
const SESSIONED_LOCAL_ADAPTERS = new Set([
"claude_local",
@@ -211,6 +234,26 @@ export function applyRunScopedMentionedSkillKeys(
]);
}
export function computeBoundedTransientHeartbeatRetrySchedule(
attempt: number,
now = new Date(),
random: () => number = Math.random,
) {
if (!Number.isInteger(attempt) || attempt <= 0) return null;
const baseDelayMs = BOUNDED_TRANSIENT_HEARTBEAT_RETRY_DELAYS_MS[attempt - 1];
if (typeof baseDelayMs !== "number") return null;
const sample = Math.min(1, Math.max(0, random()));
const jitterMultiplier = 1 + (((sample * 2) - 1) * BOUNDED_TRANSIENT_HEARTBEAT_RETRY_JITTER_RATIO);
const delayMs = Math.max(1_000, Math.round(baseDelayMs * jitterMultiplier));
return {
attempt,
baseDelayMs,
delayMs,
dueAt: new Date(now.getTime() + delayMs),
maxAttempts: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_MAX_ATTEMPTS,
};
}
async function resolveRunScopedMentionedSkillKeys(input: {
db: Db;
companyId: string;
@@ -466,6 +509,9 @@ const heartbeatRunListColumns = {
processStartedAt: heartbeatRuns.processStartedAt,
retryOfRunId: heartbeatRuns.retryOfRunId,
processLossRetryCount: heartbeatRuns.processLossRetryCount,
scheduledRetryAt: heartbeatRuns.scheduledRetryAt,
scheduledRetryAttempt: heartbeatRuns.scheduledRetryAttempt,
scheduledRetryReason: heartbeatRuns.scheduledRetryReason,
livenessState: heartbeatRuns.livenessState,
livenessReason: heartbeatRuns.livenessReason,
continuationAttempt: heartbeatRuns.continuationAttempt,
@@ -1192,6 +1238,51 @@ function shouldRequireIssueCommentForWake(
);
}
const BLOCKED_INTERACTION_WAKE_REASONS = new Set([
"issue_commented",
"issue_reopened_via_comment",
"issue_comment_mentioned",
]);
function allowsBlockedIssueInteractionWake(
contextSnapshot: Record<string, unknown> | null | undefined,
) {
const wakeReason = readNonEmptyString(contextSnapshot?.wakeReason);
if (!wakeReason || !BLOCKED_INTERACTION_WAKE_REASONS.has(wakeReason)) return false;
return Boolean(deriveCommentId(contextSnapshot, null));
}
async function listUnresolvedBlockerSummaries(
dbOrTx: Pick<Db, "select">,
companyId: string,
issueId: string,
unresolvedBlockerIssueIds: string[],
) {
const ids = [...new Set(unresolvedBlockerIssueIds.filter(Boolean))];
if (ids.length === 0) return [];
return dbOrTx
.select({
id: issues.id,
identifier: issues.identifier,
title: issues.title,
status: issues.status,
priority: issues.priority,
assigneeAgentId: issues.assigneeAgentId,
assigneeUserId: issues.assigneeUserId,
})
.from(issueRelations)
.innerJoin(issues, eq(issueRelations.issueId, issues.id))
.where(
and(
eq(issueRelations.companyId, companyId),
eq(issueRelations.type, "blocks"),
eq(issueRelations.relatedIssueId, issueId),
inArray(issues.id, ids),
),
)
.orderBy(asc(issues.title));
}
export function formatRuntimeWorkspaceWarningLog(warning: string) {
return {
stream: "stdout" as const,
@@ -1525,6 +1616,13 @@ async function buildPaperclipWakePayload(input: {
}
: null,
checkedOutByHarness: input.contextSnapshot[PAPERCLIP_HARNESS_CHECKOUT_KEY] === true,
dependencyBlockedInteraction: input.contextSnapshot.dependencyBlockedInteraction === true,
unresolvedBlockerIssueIds: Array.isArray(input.contextSnapshot.unresolvedBlockerIssueIds)
? input.contextSnapshot.unresolvedBlockerIssueIds.filter((value): value is string => typeof value === "string" && value.length > 0)
: [],
unresolvedBlockerSummaries: Array.isArray(input.contextSnapshot.unresolvedBlockerSummaries)
? input.contextSnapshot.unresolvedBlockerSummaries
: [],
executionStage: Object.keys(executionStage).length > 0 ? executionStage : null,
continuationSummary: continuationSummary
? {
@@ -3057,6 +3155,219 @@ export function heartbeatService(db: Db) {
return queued;
}
async function scheduleBoundedRetryForRun(
run: typeof heartbeatRuns.$inferSelect,
agent: typeof agents.$inferSelect,
opts?: {
now?: Date;
random?: () => number;
retryReason?: string;
wakeReason?: string;
},
) {
const now = opts?.now ?? new Date();
const retryReason = opts?.retryReason ?? BOUNDED_TRANSIENT_HEARTBEAT_RETRY_REASON;
const wakeReason = opts?.wakeReason ?? BOUNDED_TRANSIENT_HEARTBEAT_RETRY_WAKE_REASON;
const nextAttempt = (run.scheduledRetryAttempt ?? 0) + 1;
const schedule = computeBoundedTransientHeartbeatRetrySchedule(nextAttempt, now, opts?.random);
const codexTransientFallbackMode =
agent.adapterType === "codex_local" && retryReason === BOUNDED_TRANSIENT_HEARTBEAT_RETRY_REASON && run.errorCode === "codex_transient_upstream"
? resolveCodexTransientFallbackMode(nextAttempt)
: null;
if (!schedule) {
await appendRunEvent(run, await nextRunEventSeq(run.id), {
eventType: "lifecycle",
stream: "system",
level: "warn",
message: `Bounded retry exhausted after ${run.scheduledRetryAttempt ?? 0} scheduled attempts; no further automatic retry will be queued`,
payload: {
retryReason,
scheduledRetryAttempt: run.scheduledRetryAttempt ?? 0,
maxAttempts: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_MAX_ATTEMPTS,
},
});
return {
outcome: "retry_exhausted" as const,
attempt: nextAttempt,
maxAttempts: BOUNDED_TRANSIENT_HEARTBEAT_RETRY_MAX_ATTEMPTS,
};
}
const contextSnapshot = parseObject(run.contextSnapshot);
const issueId = readNonEmptyString(contextSnapshot.issueId);
const taskKey = deriveTaskKeyWithHeartbeatFallback(contextSnapshot, null);
const sessionBefore = await resolveSessionBeforeForWakeup(agent, taskKey);
const retryContextSnapshot: Record<string, unknown> = {
...contextSnapshot,
retryOfRunId: run.id,
wakeReason,
retryReason,
scheduledRetryAttempt: schedule.attempt,
scheduledRetryAt: schedule.dueAt.toISOString(),
...(codexTransientFallbackMode ? { codexTransientFallbackMode } : {}),
};
const retryRun = await db.transaction(async (tx) => {
const wakeupRequest = await tx
.insert(agentWakeupRequests)
.values({
companyId: run.companyId,
agentId: run.agentId,
source: "automation",
triggerDetail: "system",
reason: wakeReason,
payload: {
...(issueId ? { issueId } : {}),
retryOfRunId: run.id,
retryReason,
scheduledRetryAttempt: schedule.attempt,
scheduledRetryAt: schedule.dueAt.toISOString(),
...(codexTransientFallbackMode ? { codexTransientFallbackMode } : {}),
},
status: "queued",
requestedByActorType: "system",
requestedByActorId: null,
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
const scheduledRun = await tx
.insert(heartbeatRuns)
.values({
companyId: run.companyId,
agentId: run.agentId,
invocationSource: "automation",
triggerDetail: "system",
status: "scheduled_retry",
wakeupRequestId: wakeupRequest.id,
contextSnapshot: retryContextSnapshot,
sessionIdBefore: sessionBefore,
retryOfRunId: run.id,
scheduledRetryAt: schedule.dueAt,
scheduledRetryAttempt: schedule.attempt,
scheduledRetryReason: retryReason,
continuationAttempt: readContinuationAttempt(retryContextSnapshot.livenessContinuationAttempt),
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
await tx
.update(agentWakeupRequests)
.set({
runId: scheduledRun.id,
updatedAt: now,
})
.where(eq(agentWakeupRequests.id, wakeupRequest.id));
if (issueId) {
await tx
.update(issues)
.set({
executionRunId: scheduledRun.id,
executionAgentNameKey: normalizeAgentNameKey(agent.name),
executionLockedAt: now,
updatedAt: now,
})
.where(and(eq(issues.id, issueId), eq(issues.companyId, run.companyId), eq(issues.executionRunId, run.id)));
}
return scheduledRun;
});
await appendRunEvent(run, await nextRunEventSeq(run.id), {
eventType: "lifecycle",
stream: "system",
level: "warn",
message: `Scheduled bounded retry ${schedule.attempt}/${schedule.maxAttempts} for ${schedule.dueAt.toISOString()}`,
payload: {
retryRunId: retryRun.id,
retryReason,
scheduledRetryAttempt: schedule.attempt,
scheduledRetryAt: schedule.dueAt.toISOString(),
baseDelayMs: schedule.baseDelayMs,
delayMs: schedule.delayMs,
...(codexTransientFallbackMode ? { codexTransientFallbackMode } : {}),
},
});
return {
outcome: "scheduled" as const,
run: retryRun,
dueAt: schedule.dueAt,
attempt: schedule.attempt,
maxAttempts: schedule.maxAttempts,
};
}
async function promoteDueScheduledRetries(now = new Date()) {
const dueRuns = await db
.select()
.from(heartbeatRuns)
.where(
and(
eq(heartbeatRuns.status, "scheduled_retry"),
lte(heartbeatRuns.scheduledRetryAt, now),
),
)
.orderBy(asc(heartbeatRuns.scheduledRetryAt), asc(heartbeatRuns.createdAt), asc(heartbeatRuns.id))
.limit(50);
const promotedRunIds: string[] = [];
for (const dueRun of dueRuns) {
const promoted = await db
.update(heartbeatRuns)
.set({
status: "queued",
updatedAt: now,
})
.where(
and(
eq(heartbeatRuns.id, dueRun.id),
eq(heartbeatRuns.status, "scheduled_retry"),
lte(heartbeatRuns.scheduledRetryAt, now),
),
)
.returning()
.then((rows) => rows[0] ?? null);
if (!promoted) continue;
promotedRunIds.push(promoted.id);
await appendRunEvent(promoted, await nextRunEventSeq(promoted.id), {
eventType: "lifecycle",
stream: "system",
level: "info",
message: "Scheduled retry became due and was promoted to the queued run pool",
payload: {
scheduledRetryAttempt: promoted.scheduledRetryAttempt,
scheduledRetryAt: promoted.scheduledRetryAt ? new Date(promoted.scheduledRetryAt).toISOString() : null,
scheduledRetryReason: promoted.scheduledRetryReason,
},
});
publishLiveEvent({
companyId: promoted.companyId,
type: "heartbeat.run.queued",
payload: {
runId: promoted.id,
agentId: promoted.agentId,
invocationSource: promoted.invocationSource,
triggerDetail: promoted.triggerDetail,
wakeupRequestId: promoted.wakeupRequestId,
},
});
}
return {
promoted: promotedRunIds.length,
runIds: promotedRunIds,
};
}
function parseHeartbeatPolicy(agent: typeof agents.$inferSelect) {
const runtimeConfig = parseObject(agent.runtimeConfig);
const heartbeat = parseObject(runtimeConfig.heartbeat);
@@ -3133,7 +3444,7 @@ export function heartbeatService(db: Db) {
if (issueId) {
const dependencyReadiness = await issuesSvc.listDependencyReadiness(run.companyId, [issueId]);
const unresolvedBlockerCount = dependencyReadiness.get(issueId)?.unresolvedBlockerCount ?? 0;
if (unresolvedBlockerCount > 0) {
if (unresolvedBlockerCount > 0 && !allowsBlockedIssueInteractionWake(context)) {
logger.debug({ runId: run.id, issueId, unresolvedBlockerCount }, "claimQueuedRun: skipping blocked run");
return null;
}
@@ -3600,7 +3911,7 @@ export function heartbeatService(db: Db) {
.where(
and(
eq(heartbeatRuns.companyId, companyId),
inArray(heartbeatRuns.status, [...ACTIVE_HEARTBEAT_RUN_STATUSES]),
inArray(heartbeatRuns.status, [...EXECUTION_PATH_HEARTBEAT_RUN_STATUSES]),
sql`${heartbeatRuns.contextSnapshot} ->> 'issueId' = ${issueId}`,
),
)
@@ -3666,6 +3977,147 @@ export function heartbeatService(db: Db) {
return queued;
}
function formatIssueLinksForComment(relations: Array<{ identifier?: string | null }>) {
const identifiers = [
...new Set(
relations
.map((relation) => relation.identifier)
.filter((identifier): identifier is string => Boolean(identifier)),
),
];
if (identifiers.length === 0) return "another open issue";
return identifiers
.slice(0, 5)
.map((identifier) => {
const prefix = identifier.split("-")[0] || "PAP";
return `[${identifier}](/${prefix}/issues/${identifier})`;
})
.join(", ");
}
async function reconcileUnassignedBlockingIssues() {
const candidates = await db
.select({
id: issues.id,
companyId: issues.companyId,
identifier: issues.identifier,
status: issues.status,
createdByAgentId: issues.createdByAgentId,
})
.from(issueRelations)
.innerJoin(issues, eq(issueRelations.issueId, issues.id))
.where(
and(
eq(issueRelations.type, "blocks"),
inArray(issues.status, ["todo", "blocked"]),
isNull(issues.assigneeAgentId),
isNull(issues.assigneeUserId),
sql`${issues.createdByAgentId} is not null`,
sql`exists (
select 1
from issues blocked_issue
where blocked_issue.id = ${issueRelations.relatedIssueId}
and blocked_issue.company_id = ${issues.companyId}
and blocked_issue.status not in ('done', 'cancelled')
)`,
),
);
let assigned = 0;
let skipped = 0;
const issueIds: string[] = [];
const seen = new Set<string>();
for (const candidate of candidates) {
if (seen.has(candidate.id)) continue;
seen.add(candidate.id);
const creatorAgentId = candidate.createdByAgentId;
if (!creatorAgentId) {
skipped += 1;
continue;
}
const creatorAgent = await getAgent(creatorAgentId);
if (
!creatorAgent ||
creatorAgent.companyId !== candidate.companyId ||
creatorAgent.status === "paused" ||
creatorAgent.status === "terminated" ||
creatorAgent.status === "pending_approval"
) {
skipped += 1;
continue;
}
const relations = await issuesSvc.getRelationSummaries(candidate.id);
const blockingLinks = formatIssueLinksForComment(relations.blocks);
const updated = await issuesSvc.update(candidate.id, {
assigneeAgentId: creatorAgent.id,
assigneeUserId: null,
});
if (!updated) {
skipped += 1;
continue;
}
await issuesSvc.addComment(
candidate.id,
[
"## Assigned Orphan Blocker",
"",
`Paperclip found this issue is blocking ${blockingLinks} but had no assignee, so no heartbeat could pick it up.`,
"",
"- Assigned it back to the agent that created the blocker.",
"- Next action: resolve this blocker or reassign it to the right owner.",
].join("\n"),
{},
);
await logActivity(db, {
companyId: candidate.companyId,
actorType: "system",
actorId: "system",
agentId: null,
runId: null,
action: "issue.updated",
entityType: "issue",
entityId: candidate.id,
details: {
identifier: candidate.identifier,
assigneeAgentId: creatorAgent.id,
source: "heartbeat.reconcile_unassigned_blocking_issue",
},
});
const queued = await enqueueWakeup(creatorAgent.id, {
source: "automation",
triggerDetail: "system",
reason: "issue_assigned",
payload: {
issueId: candidate.id,
mutation: "unassigned_blocker_recovery",
},
requestedByActorType: "system",
requestedByActorId: null,
contextSnapshot: {
issueId: candidate.id,
taskId: candidate.id,
wakeReason: "issue_assigned",
source: "issue.unassigned_blocker_recovery",
},
});
if (queued) {
assigned += 1;
issueIds.push(candidate.id);
} else {
skipped += 1;
}
}
return { assigned, skipped, issueIds };
}
async function escalateStrandedAssignedIssue(input: {
issue: typeof issues.$inferSelect;
previousStatus: "todo" | "in_progress";
@@ -3720,6 +4172,7 @@ export function heartbeatService(db: Db) {
const result = {
dispatchRequeued: 0,
continuationRequeued: 0,
orphanBlockersAssigned: 0,
escalated: 0,
skipped: 0,
issueIds: [] as string[],
@@ -3795,7 +4248,6 @@ export function heartbeatService(db: Db) {
result.skipped += 1;
continue;
}
if (didAutomaticRecoveryFail(latestRun, "issue_continuation_needed")) {
const failureSummary = summarizeRunFailureForIssueComment(latestRun);
const updated = await escalateStrandedAssignedIssue({
@@ -3832,6 +4284,11 @@ export function heartbeatService(db: Db) {
}
}
const orphanBlockerRecovery = await reconcileUnassignedBlockingIssues();
result.orphanBlockersAssigned = orphanBlockerRecovery.assigned;
result.skipped += orphanBlockerRecovery.skipped;
result.issueIds.push(...orphanBlockerRecovery.issueIds);
return result;
}
@@ -3895,7 +4352,7 @@ export function heartbeatService(db: Db) {
contextSnapshot: heartbeatRuns.contextSnapshot,
})
.from(heartbeatRuns)
.where(inArray(heartbeatRuns.status, [...ACTIVE_HEARTBEAT_RUN_STATUSES])),
.where(inArray(heartbeatRuns.status, [...EXECUTION_PATH_HEARTBEAT_RUN_STATUSES])),
db
.select({
companyId: agentWakeupRequests.companyId,
@@ -5209,6 +5666,9 @@ export function heartbeatService(db: Db) {
);
}
}
if (outcome === "failed" && livenessRun.errorCode === "codex_transient_upstream") {
await scheduleBoundedRetryForRun(livenessRun, agent);
}
await finalizeIssueCommentPolicy(livenessRun, agent);
await releaseIssueExecutionAndPromote(livenessRun);
await handleRunLivenessContinuation(livenessRun);
@@ -5360,9 +5820,41 @@ export function heartbeatService(db: Db) {
}
}
function buildImmediateExecutionPathRecoveryComment(input: {
status: "todo" | "in_progress";
latestRun: Pick<typeof heartbeatRuns.$inferSelect, "error" | "errorCode"> | null | undefined;
}) {
const failureSummary = summarizeRunFailureForIssueComment(input.latestRun);
if (input.status === "todo") {
return (
"Paperclip automatically retried dispatch for this assigned `todo` issue during terminal run recovery, " +
`but it still has no live execution path.${failureSummary ?? ""} ` +
"Moving it to `blocked` so it is visible for intervention."
);
}
return (
"Paperclip automatically retried continuation for this assigned `in_progress` issue during terminal run " +
`recovery, but it still has no live execution path.${failureSummary ?? ""} ` +
"Moving it to `blocked` so it is visible for intervention."
);
}
async function releaseIssueExecutionAndPromote(run: typeof heartbeatRuns.$inferSelect) {
const runContext = parseObject(run.contextSnapshot);
const contextIssueId = readNonEmptyString(runContext.issueId);
const taskKey = deriveTaskKeyWithHeartbeatFallback(runContext, null);
const recoveryAgent = await getAgent(run.agentId);
const recoveryAgentInvokable =
recoveryAgent &&
recoveryAgent.status !== "paused" &&
recoveryAgent.status !== "terminated" &&
recoveryAgent.status !== "pending_approval";
const recoverySessionBefore = recoveryAgentInvokable
? await resolveSessionBeforeForWakeup(recoveryAgent, taskKey)
: null;
const recoveryAgentNameKey = normalizeAgentNameKey(recoveryAgent?.name);
const promotionResult = await db.transaction(async (tx) => {
if (contextIssueId) {
await tx.execute(
@@ -5380,6 +5872,8 @@ export function heartbeatService(db: Db) {
companyId: issues.companyId,
identifier: issues.identifier,
status: issues.status,
assigneeAgentId: issues.assigneeAgentId,
assigneeUserId: issues.assigneeUserId,
executionRunId: issues.executionRunId,
})
.from(issues)
@@ -5421,7 +5915,7 @@ export function heartbeatService(db: Db) {
.limit(1)
.then((rows) => rows[0] ?? null);
if (!deferred) return null;
if (!deferred) break;
const deferredAgent = await tx
.select()
@@ -5562,16 +6056,165 @@ export function heartbeatService(db: Db) {
.where(eq(issues.id, issue.id));
return {
kind: "promoted" as const,
run: newRun,
reopenedActivity,
};
}
const issueNeedsImmediateRecovery =
(issue.status === "todo" || issue.status === "in_progress") &&
!issue.assigneeUserId &&
issue.assigneeAgentId === run.agentId &&
(run.status === "failed" || run.status === "timed_out" || run.status === "cancelled");
if (!issueNeedsImmediateRecovery) {
return { kind: "released" as const };
}
const existingExecutionPath = await tx
.select({ id: heartbeatRuns.id })
.from(heartbeatRuns)
.where(
and(
eq(heartbeatRuns.companyId, issue.companyId),
inArray(heartbeatRuns.status, [...EXECUTION_PATH_HEARTBEAT_RUN_STATUSES]),
sql`${heartbeatRuns.contextSnapshot} ->> 'issueId' = ${issue.id}`,
sql`${heartbeatRuns.id} <> ${run.id}`,
),
)
.limit(1)
.then((rows) => rows[0] ?? null);
if (existingExecutionPath) {
return { kind: "released" as const };
}
const shouldBlockImmediately =
!recoveryAgentInvokable ||
!recoveryAgent ||
didAutomaticRecoveryFail(run, issue.status === "todo" ? "assignment_recovery" : "issue_continuation_needed");
if (shouldBlockImmediately) {
const comment = buildImmediateExecutionPathRecoveryComment({
status: issue.status as "todo" | "in_progress",
latestRun: run,
});
await tx
.update(issues)
.set({
status: "blocked",
updatedAt: new Date(),
})
.where(eq(issues.id, issue.id));
return {
kind: "blocked" as const,
issueId: issue.id,
issueIdentifier: issue.identifier,
previousStatus: issue.status,
comment,
};
}
const retryReason = issue.status === "todo" ? "assignment_recovery" : "issue_continuation_needed";
const recoveryReason = issue.status === "todo" ? "issue_assignment_recovery" : "issue_continuation_needed";
const recoverySource =
issue.status === "todo" ? "issue.assignment_recovery" : "issue.continuation_recovery";
const now = new Date();
const wakeupRequest = await tx
.insert(agentWakeupRequests)
.values({
companyId: issue.companyId,
agentId: recoveryAgent.id,
source: "automation",
triggerDetail: "system",
reason: recoveryReason,
payload: {
issueId: issue.id,
retryOfRunId: run.id,
},
status: "queued",
requestedByActorType: "system",
requestedByActorId: null,
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
const queuedRun = await tx
.insert(heartbeatRuns)
.values({
companyId: issue.companyId,
agentId: recoveryAgent.id,
invocationSource: "automation",
triggerDetail: "system",
status: "queued",
wakeupRequestId: wakeupRequest.id,
contextSnapshot: {
issueId: issue.id,
taskId: issue.id,
wakeReason: recoveryReason,
retryReason,
source: recoverySource,
retryOfRunId: run.id,
},
sessionIdBefore: recoverySessionBefore,
retryOfRunId: run.id,
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
await tx
.update(agentWakeupRequests)
.set({
runId: queuedRun.id,
updatedAt: now,
})
.where(eq(agentWakeupRequests.id, wakeupRequest.id));
await tx
.update(issues)
.set({
executionRunId: queuedRun.id,
executionAgentNameKey: recoveryAgentNameKey,
executionLockedAt: now,
updatedAt: now,
})
.where(eq(issues.id, issue.id));
return {
kind: "queued_recovery" as const,
run: queuedRun,
};
});
if (promotionResult?.kind === "blocked") {
await issuesSvc.addComment(promotionResult.issueId, promotionResult.comment, {});
await logActivity(db, {
companyId: run.companyId,
actorType: "system",
actorId: "system",
agentId: null,
runId: run.id,
action: "issue.updated",
entityType: "issue",
entityId: promotionResult.issueId,
details: {
identifier: promotionResult.issueIdentifier,
status: "blocked",
previousStatus: promotionResult.previousStatus,
source: "heartbeat.release_issue_execution_and_promote",
latestRunId: run.id,
latestRunStatus: run.status,
latestRunErrorCode: run.errorCode ?? null,
},
});
return;
}
const promotedRun = promotionResult?.run ?? null;
if (!promotedRun) return;
if (promotionResult?.reopenedActivity) {
if (promotionResult?.kind === "promoted" && promotionResult.reopenedActivity) {
await logActivity(db, promotionResult.reopenedActivity);
}
@@ -5737,7 +6380,12 @@ export function heartbeatService(db: Db) {
.then((rows) => rows[0] ?? null)
: null;
if (activeExecutionRun && activeExecutionRun.status !== "queued" && activeExecutionRun.status !== "running") {
if (
activeExecutionRun &&
!EXECUTION_PATH_HEARTBEAT_RUN_STATUSES.includes(
activeExecutionRun.status as (typeof EXECUTION_PATH_HEARTBEAT_RUN_STATUSES)[number],
)
) {
activeExecutionRun = null;
}
@@ -5760,7 +6408,7 @@ export function heartbeatService(db: Db) {
.where(
and(
eq(heartbeatRuns.companyId, issue.companyId),
inArray(heartbeatRuns.status, ["queued", "running"]),
inArray(heartbeatRuns.status, [...EXECUTION_PATH_HEARTBEAT_RUN_STATUSES]),
sql`${heartbeatRuns.contextSnapshot} ->> 'issueId' = ${issue.id}`,
),
)
@@ -5790,6 +6438,53 @@ export function heartbeatService(db: Db) {
}
}
const dependencyReadiness = await issuesSvc.listDependencyReadiness(
issue.companyId,
[issue.id],
tx,
).then((rows) => rows.get(issue.id) ?? null);
// Blocked descendants should stay idle until the final blocker resolves.
// Human comment/mention wakes are the exception: they may run in a
// bounded interaction mode so the assignee can answer or triage.
const blockedInteractionWake =
dependencyReadiness &&
!dependencyReadiness.isDependencyReady &&
allowsBlockedIssueInteractionWake(enrichedContextSnapshot);
if (blockedInteractionWake) {
enrichedContextSnapshot.dependencyBlockedInteraction = true;
enrichedContextSnapshot.unresolvedBlockerIssueIds = dependencyReadiness.unresolvedBlockerIssueIds;
enrichedContextSnapshot.unresolvedBlockerCount = dependencyReadiness.unresolvedBlockerCount;
enrichedContextSnapshot.unresolvedBlockerSummaries = await listUnresolvedBlockerSummaries(
tx,
issue.companyId,
issue.id,
dependencyReadiness.unresolvedBlockerIssueIds,
);
}
if (!activeExecutionRun && dependencyReadiness && !dependencyReadiness.isDependencyReady && !blockedInteractionWake) {
await tx.insert(agentWakeupRequests).values({
companyId: agent.companyId,
agentId,
source,
triggerDetail,
reason: "issue_dependencies_blocked",
payload: {
...(payload ?? {}),
issueId,
unresolvedBlockerIssueIds: dependencyReadiness.unresolvedBlockerIssueIds,
},
status: "skipped",
requestedByActorType: opts.requestedByActorType ?? null,
requestedByActorId: opts.requestedByActorId ?? null,
idempotencyKey: opts.idempotencyKey ?? null,
finishedAt: new Date(),
});
return { kind: "skipped" as const };
}
if (activeExecutionRun) {
const executionAgent = await tx
.select({ name: agents.name })
@@ -5977,12 +6672,15 @@ export function heartbeatService(db: Db) {
const activeRuns = await db
.select()
.from(heartbeatRuns)
.where(and(eq(heartbeatRuns.agentId, agentId), inArray(heartbeatRuns.status, ["queued", "running"])))
.where(and(eq(heartbeatRuns.agentId, agentId), inArray(heartbeatRuns.status, [...EXECUTION_PATH_HEARTBEAT_RUN_STATUSES])))
.orderBy(desc(heartbeatRuns.createdAt));
const sameScopeQueuedRun = activeRuns.find(
(candidate) => candidate.status === "queued" && isSameTaskScope(runTaskKey(candidate), taskKey),
);
const sameScopeScheduledRetryRun = activeRuns.find(
(candidate) => candidate.status === "scheduled_retry" && isSameTaskScope(runTaskKey(candidate), taskKey),
);
const sameScopeRunningRun = activeRuns.find(
(candidate) => candidate.status === "running" && isSameTaskScope(runTaskKey(candidate), taskKey),
);
@@ -5993,6 +6691,7 @@ export function heartbeatService(db: Db) {
const coalescedTargetRun =
sameScopeQueuedRun ??
sameScopeScheduledRetryRun ??
(shouldQueueFollowupForRunningWake ? null : sameScopeRunningRun ?? null);
if (coalescedTargetRun) {
@@ -6103,7 +6802,7 @@ export function heartbeatService(db: Db) {
.where(
and(
eq(heartbeatRuns.companyId, companyId),
inArray(heartbeatRuns.status, ["queued", "running"]),
inArray(heartbeatRuns.status, [...CANCELLABLE_HEARTBEAT_RUN_STATUSES]),
sql`${effectiveProjectId} = ${projectId}`,
),
);
@@ -6188,7 +6887,7 @@ export function heartbeatService(db: Db) {
async function cancelRunInternal(runId: string, reason = "Cancelled by control plane") {
const run = await getRun(runId);
if (!run) throw notFound("Heartbeat run not found");
if (run.status !== "running" && run.status !== "queued") return run;
if (!CANCELLABLE_HEARTBEAT_RUN_STATUSES.includes(run.status as (typeof CANCELLABLE_HEARTBEAT_RUN_STATUSES)[number])) return run;
const agent = await getAgent(run.agentId);
const running = runningProcesses.get(run.id);
@@ -6244,7 +6943,7 @@ export function heartbeatService(db: Db) {
const runs = await db
.select()
.from(heartbeatRuns)
.where(and(eq(heartbeatRuns.agentId, agentId), inArray(heartbeatRuns.status, ["queued", "running"])));
.where(and(eq(heartbeatRuns.agentId, agentId), inArray(heartbeatRuns.status, [...CANCELLABLE_HEARTBEAT_RUN_STATUSES])));
for (const run of runs) {
await setRunStatus(run.id, "cancelled", {
@@ -6300,7 +6999,7 @@ export function heartbeatService(db: Db) {
.where(
and(
eq(heartbeatRuns.companyId, scope.companyId),
inArray(heartbeatRuns.status, ["queued", "running"]),
inArray(heartbeatRuns.status, [...CANCELLABLE_HEARTBEAT_RUN_STATUSES]),
),
)
.then((rows) => rows.map((row) => row.id))
@@ -6471,6 +7170,25 @@ export function heartbeatService(db: Db) {
.orderBy(asc(heartbeatRunEvents.seq))
.limit(Math.max(1, Math.min(limit, 1000))),
getRetryExhaustedReason: async (runId: string) => {
const row = await db
.select({
message: heartbeatRunEvents.message,
})
.from(heartbeatRunEvents)
.where(
and(
eq(heartbeatRunEvents.runId, runId),
eq(heartbeatRunEvents.eventType, "lifecycle"),
sql`${heartbeatRunEvents.message} like 'Bounded retry exhausted%'`,
),
)
.orderBy(desc(heartbeatRunEvents.id))
.limit(1)
.then((rows) => rows[0] ?? null);
return row?.message ?? null;
},
readLog: async (
runOrLookup: string | {
id: string;
@@ -6525,8 +7243,26 @@ export function heartbeatService(db: Db) {
reapOrphanedRuns,
promoteDueScheduledRetries,
resumeQueuedRuns,
scheduleBoundedRetry: async (
runId: string,
opts?: {
now?: Date;
random?: () => number;
retryReason?: string;
wakeReason?: string;
},
) => {
const run = await getRun(runId, { unsafeFullResultJson: true });
if (!run) return { outcome: "missing_run" as const };
const agent = await getAgent(run.agentId);
if (!agent) return { outcome: "missing_agent" as const };
return scheduleBoundedRetryForRun(run, agent, opts);
},
reconcileStrandedAssignedIssues,
reconcileIssueGraphLiveness,

View File

@@ -1,3 +1,4 @@
import { Buffer } from "node:buffer";
import { and, asc, desc, eq, inArray, isNull, ne, or, sql } from "drizzle-orm";
import type { Db } from "@paperclipai/db";
import {
@@ -79,6 +80,7 @@ export interface IssueFilters {
inboxArchivedByUserId?: string;
unreadForUserId?: string;
projectId?: string;
workspaceId?: string;
executionWorkspaceId?: string;
parentId?: string;
labelId?: string;
@@ -168,6 +170,7 @@ function sameRunLock(checkoutRunId: string | null, actorRunId: string | null) {
const TERMINAL_HEARTBEAT_RUN_STATUSES = new Set(["succeeded", "failed", "cancelled", "timed_out"]);
const ISSUE_LIST_DESCRIPTION_MAX_CHARS = 1200;
const ISSUE_LIST_DESCRIPTION_MAX_BYTES = ISSUE_LIST_DESCRIPTION_MAX_CHARS * 4;
function escapeLikePattern(value: string): string {
return value.replace(/[\\%_]/g, "\\$&");
@@ -191,6 +194,16 @@ function truncateInlineSummary(value: string | null | undefined, maxChars = CHIL
return normalized.length > maxChars ? `${normalized.slice(0, Math.max(0, maxChars - 15)).trimEnd()} [truncated]` : normalized;
}
function truncateByCodePoint(value: string, maxChars: number): string {
if (value.length <= maxChars) return value;
return Array.from(value).slice(0, maxChars).join("");
}
function decodeDatabaseTextPreview(value: string | null | undefined, maxChars: number): string | null {
if (value == null) return null;
return truncateByCodePoint(Buffer.from(value, "base64").toString("utf8"), maxChars);
}
function appendAcceptanceCriteriaToDescription(description: string | null | undefined, acceptanceCriteria: string[] | undefined) {
const criteria = (acceptanceCriteria ?? []).map((item) => item.trim()).filter(Boolean);
if (criteria.length === 0) return description ?? null;
@@ -275,7 +288,6 @@ async function listUnresolvedBlockerIssueIds(
)
.then((rows) => rows.map((row) => row.id));
}
async function getProjectDefaultGoalId(
db: ProjectGoalReader,
companyId: string,
@@ -681,7 +693,13 @@ const issueListSelect = {
description: sql<string | null>`
CASE
WHEN ${issues.description} IS NULL THEN NULL
ELSE substring(${issues.description} FROM 1 FOR ${ISSUE_LIST_DESCRIPTION_MAX_CHARS})
ELSE encode(
substring(
convert_to(${issues.description}, current_setting('server_encoding'))
FROM 1 FOR ${ISSUE_LIST_DESCRIPTION_MAX_BYTES}
),
'base64'
)
END
`,
status: issues.status,
@@ -699,6 +717,7 @@ const issueListSelect = {
originKind: issues.originKind,
originId: issues.originId,
originRunId: issues.originRunId,
originFingerprint: issues.originFingerprint,
requestDepth: issues.requestDepth,
billingCode: issues.billingCode,
assigneeAdapterOverrides: issues.assigneeAdapterOverrides,
@@ -1275,6 +1294,12 @@ export function issueService(db: Db) {
conditions.push(unreadForUserCondition(companyId, unreadForUserId));
}
if (filters?.projectId) conditions.push(eq(issues.projectId, filters.projectId));
if (filters?.workspaceId) {
conditions.push(or(
eq(issues.executionWorkspaceId, filters.workspaceId),
eq(issues.projectWorkspaceId, filters.workspaceId),
)!);
}
if (filters?.executionWorkspaceId) {
conditions.push(eq(issues.executionWorkspaceId, filters.executionWorkspaceId));
}
@@ -1327,7 +1352,10 @@ export function issueService(db: Db) {
desc(canonicalLastActivityAt),
desc(issues.updatedAt),
);
const rows = limit === undefined ? await baseQuery : await baseQuery.limit(limit);
const rows = (limit === undefined ? await baseQuery : await baseQuery.limit(limit)).map((row) => ({
...row,
description: decodeDatabaseTextPreview(row.description, ISSUE_LIST_DESCRIPTION_MAX_CHARS),
}));
const withLabels = await withIssueLabels(db, rows);
const runMap = await activeRunMapForIssues(db, withLabels);
const withRuns = withActiveRuns(withLabels, runMap);

View File

@@ -47,7 +47,7 @@ import { queueIssueAssignmentWakeup, type IssueAssignmentWakeupDeps } from "./is
import { logActivity } from "./activity-log.js";
const OPEN_ISSUE_STATUSES = ["backlog", "todo", "in_progress", "in_review", "blocked"];
const LIVE_HEARTBEAT_RUN_STATUSES = ["queued", "running"];
const LIVE_HEARTBEAT_RUN_STATUSES = ["queued", "running", "scheduled_retry"];
const TERMINAL_ISSUE_STATUSES = new Set(["done", "cancelled"]);
const MAX_CATCH_UP_RUNS = 25;
const WEEKDAY_INDEX: Record<string, number> = {
@@ -320,6 +320,37 @@ function mergeRoutineRunPayload(
};
}
function normalizeRoutineDispatchFingerprintValue(value: unknown): unknown {
if (value === undefined) return null;
if (value == null || typeof value === "string" || typeof value === "number" || typeof value === "boolean") {
return value;
}
if (value instanceof Date) return value.toISOString();
if (Array.isArray(value)) return value.map((item) => normalizeRoutineDispatchFingerprintValue(item));
if (isPlainRecord(value)) {
return Object.fromEntries(
Object.keys(value)
.sort()
.map((key) => [key, normalizeRoutineDispatchFingerprintValue(value[key])]),
);
}
return String(value);
}
function createRoutineDispatchFingerprint(input: {
payload: Record<string, unknown> | null;
projectId: string | null;
assigneeAgentId: string | null;
executionWorkspaceId?: string | null;
executionWorkspacePreference?: string | null;
executionWorkspaceSettings?: Record<string, unknown> | null;
title: string;
description: string | null;
}) {
const canonical = JSON.stringify(normalizeRoutineDispatchFingerprintValue(input));
return crypto.createHash("sha256").update(canonical).digest("hex");
}
function routineUsesWorkspaceBranch(routine: typeof routines.$inferSelect) {
return (routine.variables ?? []).some((variable) => variable.name === WORKSPACE_BRANCH_ROUTINE_VARIABLE)
|| extractRoutineVariableNames([routine.title, routine.description]).includes(WORKSPACE_BRANCH_ROUTINE_VARIABLE);
@@ -426,6 +457,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: routineRuns.triggeredAt,
idempotencyKey: routineRuns.idempotencyKey,
triggerPayload: routineRuns.triggerPayload,
dispatchFingerprint: routineRuns.dispatchFingerprint,
linkedIssueId: routineRuns.linkedIssueId,
coalescedIntoRunId: routineRuns.coalescedIntoRunId,
failureReason: routineRuns.failureReason,
@@ -458,6 +490,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: row.triggeredAt,
idempotencyKey: row.idempotencyKey,
triggerPayload: row.triggerPayload as Record<string, unknown> | null,
dispatchFingerprint: row.dispatchFingerprint,
linkedIssueId: row.linkedIssueId,
coalescedIntoRunId: row.coalescedIntoRunId,
failureReason: row.failureReason,
@@ -606,7 +639,22 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
}
}
async function findLiveExecutionIssue(routine: typeof routines.$inferSelect, executor: Db = db) {
function routineExecutionFingerprintCondition(dispatchFingerprint?: string | null) {
if (!dispatchFingerprint) return null;
// The "default" arm preserves coalescing against pre-migration open issues.
// It becomes inert once those legacy routine execution issues drain out.
return or(
eq(issues.originFingerprint, dispatchFingerprint),
eq(issues.originFingerprint, "default"),
);
}
async function findLiveExecutionIssue(
routine: typeof routines.$inferSelect,
executor: Db = db,
dispatchFingerprint?: string | null,
) {
const fingerprintCondition = routineExecutionFingerprintCondition(dispatchFingerprint);
const executionBoundIssue = await executor
.select()
.from(issues)
@@ -624,6 +672,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
eq(issues.originId, routine.id),
inArray(issues.status, OPEN_ISSUE_STATUSES),
isNull(issues.hiddenAt),
...(fingerprintCondition ? [fingerprintCondition] : []),
),
)
.orderBy(desc(issues.updatedAt), desc(issues.createdAt))
@@ -649,6 +698,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
eq(issues.originId, routine.id),
inArray(issues.status, OPEN_ISSUE_STATUSES),
isNull(issues.hiddenAt),
...(fingerprintCondition ? [fingerprintCondition] : []),
),
)
.orderBy(desc(issues.updatedAt), desc(issues.createdAt))
@@ -745,6 +795,16 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
const title = interpolateRoutineTemplate(input.routine.title, allVariables) ?? input.routine.title;
const description = interpolateRoutineTemplate(input.routine.description, allVariables);
const triggerPayload = mergeRoutineRunPayload(input.payload, { ...automaticVariables, ...resolvedVariables });
const dispatchFingerprint = createRoutineDispatchFingerprint({
payload: triggerPayload,
projectId,
assigneeAgentId,
executionWorkspaceId: input.executionWorkspaceId ?? null,
executionWorkspacePreference: input.executionWorkspacePreference ?? null,
executionWorkspaceSettings: input.executionWorkspaceSettings ?? null,
title,
description,
});
const run = await db.transaction(async (tx) => {
const txDb = tx as unknown as Db;
await tx.execute(
@@ -782,6 +842,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt,
idempotencyKey: input.idempotencyKey ?? null,
triggerPayload,
dispatchFingerprint,
})
.returning();
@@ -791,7 +852,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
let createdIssue: Awaited<ReturnType<typeof issueSvc.create>> | null = null;
try {
const activeIssue = await findLiveExecutionIssue(input.routine, txDb);
const activeIssue = await findLiveExecutionIssue(input.routine, txDb, dispatchFingerprint);
if (activeIssue && input.routine.concurrencyPolicy !== "always_enqueue") {
const status = input.routine.concurrencyPolicy === "skip_if_active" ? "skipped" : "coalesced";
const updated = await finalizeRun(createdRun.id, {
@@ -824,6 +885,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
originKind: "routine_execution",
originId: input.routine.id,
originRunId: createdRun.id,
originFingerprint: dispatchFingerprint,
executionWorkspaceId: input.executionWorkspaceId ?? null,
executionWorkspacePreference: input.executionWorkspacePreference ?? null,
executionWorkspaceSettings: input.executionWorkspaceSettings ?? null,
@@ -840,7 +902,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
throw error;
}
const existingIssue = await findLiveExecutionIssue(input.routine, txDb);
const existingIssue = await findLiveExecutionIssue(input.routine, txDb, dispatchFingerprint);
if (!existingIssue) throw error;
const status = input.routine.concurrencyPolicy === "skip_if_active" ? "skipped" : "coalesced";
const updated = await finalizeRun(createdRun.id, {
@@ -994,6 +1056,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: routineRuns.triggeredAt,
idempotencyKey: routineRuns.idempotencyKey,
triggerPayload: routineRuns.triggerPayload,
dispatchFingerprint: routineRuns.dispatchFingerprint,
linkedIssueId: routineRuns.linkedIssueId,
coalescedIntoRunId: routineRuns.coalescedIntoRunId,
failureReason: routineRuns.failureReason,
@@ -1025,6 +1088,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: run.triggeredAt,
idempotencyKey: run.idempotencyKey,
triggerPayload: run.triggerPayload as Record<string, unknown> | null,
dispatchFingerprint: run.dispatchFingerprint,
linkedIssueId: run.linkedIssueId,
coalescedIntoRunId: run.coalescedIntoRunId,
failureReason: run.failureReason,
@@ -1437,6 +1501,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: routineRuns.triggeredAt,
idempotencyKey: routineRuns.idempotencyKey,
triggerPayload: routineRuns.triggerPayload,
dispatchFingerprint: routineRuns.dispatchFingerprint,
linkedIssueId: routineRuns.linkedIssueId,
coalescedIntoRunId: routineRuns.coalescedIntoRunId,
failureReason: routineRuns.failureReason,
@@ -1468,6 +1533,7 @@ export function routineService(db: Db, deps: { heartbeat?: IssueAssignmentWakeup
triggeredAt: row.triggeredAt,
idempotencyKey: row.idempotencyKey,
triggerPayload: row.triggerPayload as Record<string, unknown> | null,
dispatchFingerprint: row.dispatchFingerprint,
linkedIssueId: row.linkedIssueId,
coalescedIntoRunId: row.coalescedIntoRunId,
failureReason: row.failureReason,

View File

@@ -42,15 +42,16 @@ Follow these steps every time you wake up:
**Step 3 — Get assignments.** Prefer `GET /api/agents/me/inbox-lite` for the normal heartbeat inbox. It returns the compact assignment list you need for prioritization. Fall back to `GET /api/companies/{companyId}/issues?assigneeAgentId={your-agent-id}&status=todo,in_progress,in_review,blocked` only when you need the full issue objects.
**Step 4 — Pick work (with mention exception).** Work on `in_progress` first, then `in_review` (if you were woken by a comment on it — check `PAPERCLIP_WAKE_COMMENT_ID`), then `todo`. Skip `blocked` unless you can unblock it.
**Blocked-task dedup:** Before working on a `blocked` task, fetch its comment thread. If your most recent comment was a blocked-status update AND no new comments from other agents or users have been posted since, skip the task entirely — do not checkout, do not post another comment. Exit the heartbeat (or move to the next task) instead. Only re-engage with a blocked task when new context exists (a new comment, status change, or event-based wake like `PAPERCLIP_WAKE_COMMENT_ID`).
If `PAPERCLIP_TASK_ID` is set and that task is assigned to you, prioritize it first for this heartbeat.
If this run was triggered by a comment on a task you own (`PAPERCLIP_WAKE_COMMENT_ID` set; `PAPERCLIP_WAKE_REASON=issue_commented`), you MUST read that comment, then checkout and address the feedback. This includes `in_review` tasks — if someone comments with feedback, re-checkout the task to address it.
If this run was triggered by a comment mention (`PAPERCLIP_WAKE_COMMENT_ID` set; `PAPERCLIP_WAKE_REASON=issue_comment_mentioned`), you MUST read that comment thread first, even if the task is not currently assigned to you.
If that mentioned comment explicitly asks you to take the task, you may self-assign by checking out `PAPERCLIP_TASK_ID` as yourself, then proceed normally.
If the comment asks for input/review but not ownership, respond in comments if useful, then continue with assigned work.
If the comment does not direct you to take ownership, do not self-assign.
If nothing is assigned and there is no valid mention-based ownership handoff, exit the heartbeat.
**Step 4 — Pick work.** Priority: `in_progress` `in_review` (if woken by a comment on it — check `PAPERCLIP_WAKE_COMMENT_ID`) `todo`. Skip `blocked` unless you can unblock.
Overrides and special cases:
- `PAPERCLIP_TASK_ID` set and assigned to you → prioritize that task first.
- `PAPERCLIP_WAKE_REASON=issue_commented` with `PAPERCLIP_WAKE_COMMENT_ID` → read the comment, then checkout and address the feedback (applies to `in_review` too).
- `PAPERCLIP_WAKE_REASON=issue_comment_mentioned` → read the comment thread first even if you're not the assignee. Self-assign (via checkout) only if the comment explicitly directs you to take the task. Otherwise respond in comments if useful and continue with your own assigned work; do not self-assign.
- Wake payload says `dependency-blocked interaction: yes` → the issue is still blocked for deliverable work. Do not try to unblock it. Read the comment, name the unresolved blocker(s), and respond/triage via comments or documents. Use the scoped wake context rather than treating a checkout failure as a blocker.
- **Blocked-task dedup:** before touching a `blocked` task, check the thread. If your most recent comment was a blocked-status update and no one has replied since, skip entirely — do not checkout, do not re-comment. Only re-engage on new context (comment, status change, event wake).
- Nothing assigned and no valid mention handoff → exit the heartbeat.
**Step 5 — Checkout.** You MUST checkout before doing any work. Include the run ID header:
@@ -64,48 +65,26 @@ If already checked out by you, returns normally. If owned by another agent: `409
**Step 6 — Understand context.** Prefer `GET /api/issues/{issueId}/heartbeat-context` first. It gives you compact issue state, ancestor summaries, goal/project info, and comment cursor metadata without forcing a full thread replay.
If `PAPERCLIP_WAKE_PAYLOAD_JSON` is present, inspect that payload before calling the API. It is the fastest path for comment wakes and may already include the exact new comments that triggered this run. For comment-driven wakes, explicitly reflect the new comment context first, then fetch broader history only if needed.
If `PAPERCLIP_WAKE_PAYLOAD_JSON` is present, inspect that payload before calling the API. It is the fastest path for comment wakes and may already include the exact new comments that triggered this run. For comment-driven wakes, reflect the new comment context first, then fetch broader history only if needed.
Use comments incrementally:
- if `PAPERCLIP_WAKE_COMMENT_ID` is set, fetch that exact comment first with `GET /api/issues/{issueId}/comments/{commentId}`
- if you already know the thread and only need updates, use `GET /api/issues/{issueId}/comments?after={last-seen-comment-id}&order=asc`
- use the full `GET /api/issues/{issueId}/comments` route only when you are cold-starting, when session memory is unreliable, or when the incremental path is not enough
- use the full `GET /api/issues/{issueId}/comments` route only when cold-starting or when incremental isn't enough
Read enough ancestor/comment context to understand _why_ the task exists and what changed. Do not reflexively reload the whole thread on every heartbeat.
**Execution-policy review/approval wakes.** If the issue is in `in_review` and includes `executionState`, inspect these fields immediately:
**Execution-policy review/approval wakes.** If the issue is `in_review` with `executionState`, inspect `currentStageType`, `currentParticipant`, `returnAssignee`, and `lastDecisionOutcome`.
- `executionState.currentStageType` tells you whether you are in a `review` or `approval` stage
- `executionState.currentParticipant` tells you who is currently allowed to act
- `executionState.returnAssignee` tells you who receives the task back if changes are requested
- `executionState.lastDecisionOutcome` tells you the latest review/approval outcome
If `currentParticipant` matches you, submit your decision via the normal update route — there is no separate execution-decision endpoint:
If `currentParticipant` matches you, you are the active reviewer/approver for this heartbeat. There is **no separate execution-decision endpoint**. Submit your decision through the normal issue update route:
- Approve: `PATCH /api/issues/{issueId}` with `{ "status": "done", "comment": "Approved: …" }`. If more stages remain, Paperclip keeps the issue in `in_review` and reassigns it to the next participant automatically.
- Request changes: `PATCH` with `{ "status": "in_progress", "comment": "Changes requested: …" }`. Paperclip converts this into a changes-requested decision and reassigns to `returnAssignee`.
```json
PATCH /api/issues/{issueId}
Headers: X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID
{ "status": "done", "comment": "Approved: what you reviewed and why it passes." }
```
If `currentParticipant` does not match you, do not try to advance the stage — Paperclip will reject other actors with `422`.
That approves the current stage. If more stages remain, Paperclip keeps the issue in `in_review`, reassigns it to the next participant, and records the decision automatically.
To request changes, send a non-`done` status with a required comment. Prefer `in_progress`:
```json
PATCH /api/issues/{issueId}
Headers: X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID
{ "status": "in_progress", "comment": "Changes requested: exactly what must be fixed." }
```
Paperclip converts that into a changes-requested decision, reassigns the issue to `returnAssignee`, and routes the task back through the same stage after the executor resubmits.
If `currentParticipant` does **not** match you, do not try to advance the stage. Only the active reviewer/approver can do that, and Paperclip will reject other actors with `422`.
**Step 7 — Do the work.** Use your tools and capabilities.
Execution contract:
**Step 7 — Do the work.** Use your tools and capabilities. Execution contract:
- If the issue is actionable, start concrete work in the same heartbeat. Do not stop at a plan unless the issue specifically asks for planning.
- Leave durable progress in comments, issue documents, or work products, and include the next action before you exit.
@@ -122,13 +101,9 @@ When writing issue descriptions or comments, follow the ticket-linking rule in *
PATCH /api/issues/{issueId}
Headers: X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID
{ "status": "done", "comment": "What was done and why." }
PATCH /api/issues/{issueId}
Headers: X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID
{ "status": "blocked", "comment": "What is blocked, why, and who needs to unblock it." }
```
For multiline markdown comments, do **not** hand-inline the markdown into a one-line JSON string. That is how comments get "smooshed" together. Use the helper below or an equivalent `jq --arg` pattern so literal newlines survive JSON encoding:
For multiline markdown comments, do **not** hand-inline the markdown into a one-line JSON string — that is how comments get "smooshed" together. Use the helper below (or an equivalent `jq --arg` pattern reading from a heredoc/file) so literal newlines survive JSON encoding:
```bash
scripts/paperclip-issue-update.sh --issue-id "$PAPERCLIP_TASK_ID" --status done <<'MD'
@@ -139,76 +114,48 @@ Done
MD
```
Status values: `backlog`, `todo`, `in_progress`, `in_review`, `done`, `blocked`, `cancelled`. Use the quick guide below when choosing one. Priority values: `critical`, `high`, `medium`, `low`. Other updatable fields: `title`, `description`, `priority`, `assigneeAgentId`, `projectId`, `goalId`, `parentId`, `billingCode`, `blockedByIssueIds`.
Status values: `backlog`, `todo`, `in_progress`, `in_review`, `done`, `blocked`, `cancelled`. Priority values: `critical`, `high`, `medium`, `low`. Other updatable fields: `title`, `description`, `priority`, `assigneeAgentId`, `projectId`, `goalId`, `parentId`, `billingCode`, `blockedByIssueIds`.
### Status Quick Guide
- `backlog` not ready to execute yet. Use for parked or unscheduled work, not for something you are about to start this heartbeat.
- `todo` — ready and actionable, but not actively checked out yet. Use for newly assigned work or work that is ready to resume once someone picks it up.
- `in_progress` — actively owned work. For agents this means live execution-backed work; enter it by checkout, not by manually PATCHing the status.
- `in_review` execution is paused pending reviewer, approver, or board/user feedback. Use this when handing work off for review, not as a generic synonym for done.
- `blocked` — cannot proceed until something specific changes. Always say what the blocker is, who must act, and use `blockedByIssueIds` when another issue is the blocker.
- `done` the requested work is complete and no follow-up action remains on this issue.
- `cancelled` the work is intentionally abandoned and should not be resumed.
Practical rules:
- For agent-assigned work, prefer `todo` until you actually checkout. Do not PATCH an issue into `in_progress` just to signal intent.
- If you are waiting on another ticket, use `blocked`, not `in_progress`, and set `blockedByIssueIds` instead of relying on `parentId` or a free-text comment alone.
- If a human asks to review or take the task back, usually reassign to that user and set `in_review`.
- `parentId` is structural only. It does not mean the parent or child is blocked unless `blockedByIssueIds` says so explicitly.
- `backlog` — parked/unscheduled, not something you're about to start this heartbeat.
- `todo` — ready and actionable, but not checked out yet. Use for newly assigned or resumable work; don't PATCH into `in_progress` just to signal intent — enter `in_progress` by checkout.
- `in_progress` — actively owned, execution-backed work.
- `in_review` — paused pending reviewer/approver/board/user feedback. Use when handing work off for review; not a synonym for done. If a human asks to take the task back, reassign to them and set `in_review`.
- `blocked` — cannot proceed until something specific changes. Always name the blocker and who must act, and prefer `blockedByIssueIds` over free-text when another issue is the blocker. `parentId` alone does not imply a blocker.
- `done` — work complete, no follow-up on this issue.
- `cancelled` — intentionally abandoned, not to be resumed.
**Step 9 — Delegate if needed.** Create subtasks with `POST /api/companies/{companyId}/issues`. Always set `parentId` and `goalId`. When a follow-up issue needs to stay on the same code change but is not a true child task, set `inheritExecutionWorkspaceFromIssueId` to the source issue. Set `billingCode` for cross-team work.
## Issue Dependencies (Blockers)
Paperclip supports first-class blocker relationships between issues. Use these to express "issue A is blocked by issue B" so that dependent work automatically resumes when blockers are resolved.
Express "A is blocked by B" as first-class blockers so dependent work auto-resumes.
### Setting blockers
Pass `blockedByIssueIds` (an array of issue IDs) when creating or updating an issue:
**Set blockers** via `blockedByIssueIds` (array of issue IDs) on create or update:
```json
// At creation time
POST /api/companies/{companyId}/issues
{ "title": "Deploy to prod", "blockedByIssueIds": ["issue-id-1", "issue-id-2"], "status": "blocked", ... }
{ "title": "Deploy to prod", "blockedByIssueIds": ["id-1","id-2"], "status": "blocked" }
// After the fact
PATCH /api/issues/{issueId}
{ "blockedByIssueIds": ["issue-id-1", "issue-id-2"] }
{ "blockedByIssueIds": ["id-1","id-2"] }
```
The `blockedByIssueIds` array **replaces** the existing blocker set on each update. To add a blocker, include the full list. To remove all blockers, send `[]`.
The array **replaces** the current set on each update — send `[]` to clear. Issues cannot block themselves; circular chains are rejected.
Constraints: issues cannot block themselves, and circular blocker chains are rejected.
**Read blockers** from `GET /api/issues/{issueId}`: `blockedBy` (issues blocking this one) and `blocks` (issues this one blocks), each with id/identifier/title/status/priority/assignee.
### Reading blockers
**Automatic wakes:**
`GET /api/issues/{issueId}` returns two relation arrays:
- `PAPERCLIP_WAKE_REASON=issue_blockers_resolved` — all `blockedBy` issues reached `done`; dependent's assignee is woken.
- `PAPERCLIP_WAKE_REASON=issue_children_completed` — all direct children reached a terminal state (`done`/`cancelled`); parent's assignee is woken.
- `blockedBy` — issues that block this one (with `id`, `identifier`, `title`, `status`, `priority`, assignee info)
- `blocks` — issues that this one blocks
### Automatic wake-on-dependency-resolved
Paperclip fires automatic wakes in two scenarios:
1. **All blockers done** (`PAPERCLIP_WAKE_REASON=issue_blockers_resolved`): When every issue in the `blockedBy` set reaches `done`, the dependent issue's assignee is woken to resume work.
2. **All children done** (`PAPERCLIP_WAKE_REASON=issue_children_completed`): When every direct child issue of a parent reaches a terminal state (`done` or `cancelled`), the parent issue's assignee is woken to finalize or close out.
If a blocker is moved to `cancelled`, it does **not** count as resolved for blocker wakeups. Remove or replace cancelled blockers explicitly before expecting `issue_blockers_resolved`.
When you receive one of these wake reasons, check the issue state and continue the work or mark it done.
`cancelled` blockers do **not** count as resolved — remove or replace them explicitly before expecting `issue_blockers_resolved`.
## Requesting Board Approval
Agents can create approval requests for arbitrary issue-linked work. Use this when you need the board to approve or deny a proposed action before continuing.
Recommended generic type:
- `request_board_approval` for open-ended approval requests like spend approval, vendor approval, launch approval, or other board decisions
Create the approval and link it to the relevant issue in one call:
Use `request_board_approval` when you need the board to approve/deny a proposed action:
```json
POST /api/companies/{companyId}/approvals
@@ -225,50 +172,17 @@ POST /api/companies/{companyId}/approvals
}
```
Notes:
`issueIds` links the approval into the issue thread. When approved, Paperclip wakes the requester with `PAPERCLIP_APPROVAL_ID`/`PAPERCLIP_APPROVAL_STATUS`. Keep the payload concise and decision-ready.
- `issueIds` links the approval into the issue thread/UI.
- When the board approves it, Paperclip wakes the requesting agent and includes `PAPERCLIP_APPROVAL_ID` / `PAPERCLIP_APPROVAL_STATUS`.
- Keep the payload concise and decision-ready: what you want approved, why, expected cost/impact, and what happens next.
## Niche Workflow Pointers
## Project Setup Workflow (CEO/Manager Common Path)
Load `references/workflows.md` when the task matches one of these:
When asked to set up a new project with workspace config (local folder and/or GitHub repo), use:
1. `POST /api/companies/{companyId}/projects` with project fields.
2. Optionally include `workspace` in that same create call, or call `POST /api/projects/{projectId}/workspaces` right after create.
Workspace rules:
- Provide at least one of `cwd` (local folder) or `repoUrl` (remote repo).
- For repo-only setup, omit `cwd` and provide `repoUrl`.
- Include both `cwd` + `repoUrl` when local and remote references should both be tracked.
## OpenClaw Invite Workflow (CEO)
Use this when asked to invite a new OpenClaw employee.
1. Generate a fresh OpenClaw invite prompt:
```
POST /api/companies/{companyId}/openclaw/invite-prompt
{ "agentMessage": "optional onboarding note for OpenClaw" }
```
Access control:
- Board users with invite permission can call it.
- Agent callers: only the company CEO agent can call it.
2. Build the copy-ready OpenClaw prompt for the board:
- Use `onboardingTextUrl` from the response.
- Ask the board to paste that prompt into OpenClaw.
- If the issue includes an OpenClaw URL (for example `ws://127.0.0.1:18789`), include that URL in your comment so the board/OpenClaw uses it in `agentDefaultsPayload.url`.
3. Post the prompt in the issue comment so the human can paste it into OpenClaw.
4. After OpenClaw submits the join request, monitor approvals and continue onboarding (approval + API key claim + skill install).
- Set up a new project + workspace (CEO/Manager).
- Generate an OpenClaw invite prompt (CEO).
- Set or clear an agent's `instructions-path`.
- CEO-safe company imports/exports (preview/apply).
- App-level self-test playbook.
## Company Skills Workflow
@@ -292,28 +206,31 @@ Routines are recurring tasks. Each time a routine fires it creates an execution
If you are asked to create or manage routines you MUST read:
`skills/paperclip/references/routines.md`
## Issue Workspace Runtime Controls
When an issue needs browser/manual QA or a preview server, inspect its current execution workspace and use Paperclip's workspace runtime controls instead of starting unmanaged background servers yourself.
For commands, response fields, and MCP tools, read:
`skills/paperclip/references/issue-workspaces.md`
## Critical Rules
- **Always checkout** before working. Never PATCH to `in_progress` manually.
- **Never retry a 409.** The task belongs to someone else.
- **Never look for unassigned work.**
- **Self-assign only for explicit @-mention handoff.** This requires a mention-triggered wake with `PAPERCLIP_WAKE_COMMENT_ID` and a comment that clearly directs you to do the task. Use checkout (never direct assignee patch). Otherwise, no assignments = exit.
- **Honor "send it back to me" requests from board users.** If a board/user asks for review handoff (e.g. "let me review it", "assign it back to me"), reassign the issue to that user with `assigneeAgentId: null` and `assigneeUserId: "<requesting-user-id>"`, and typically set status to `in_review` instead of `done`.
Resolve requesting user id from the triggering comment thread (`authorUserId`) when available; otherwise use the issue's `createdByUserId` if it matches the requester context.
- **Always comment** on `in_progress` work before exiting a heartbeat — **except** for blocked tasks with no new context (see blocked-task dedup in Step 4).
- **Never look for unassigned work.** No assignments = exit.
- **Self-assign only for explicit @-mention handoff.** Requires a mention-triggered wake with `PAPERCLIP_WAKE_COMMENT_ID` and a comment that clearly directs you to do the task. Use checkout (never direct assignee patch).
- **Honor "send it back to me" requests from board users.** If a board/user asks for review handoff (e.g. "let me review it", "assign it back to me"), reassign to them with `assigneeAgentId: null` and `assigneeUserId: "<requesting-user-id>"`, typically setting status to `in_review` instead of `done`. Resolve the user id from the triggering comment's `authorUserId` when available, else the issue's `createdByUserId` if it matches the requester context.
- **Start actionable work before planning-only closure.** Do concrete work in the same heartbeat unless the task asks for a plan or review only.
- **Leave a next action.** Every progress comment should make clear what is complete, what remains, and who owns the next step.
- **Prefer child issues over polling.** Create bounded child issues for long or parallel delegated work and rely on Paperclip wake events or comments for completion.
- **Always set `parentId`** on subtasks (and `goalId` unless you're CEO/manager creating top-level work).
- **Preserve workspace continuity for follow-ups.** Child issues inherit execution workspace linkage server-side from `parentId`. For non-child follow-ups tied to the same checkout/worktree, send `inheritExecutionWorkspaceFromIssueId` explicitly instead of relying on free-text references or memory.
- **Preserve workspace continuity for follow-ups.** Child issues inherit execution workspace from `parentId` server-side. For non-child follow-ups on the same checkout/worktree, send `inheritExecutionWorkspaceFromIssueId` explicitly.
- **Never cancel cross-team tasks.** Reassign to your manager with a comment.
- **Always update blocked issues explicitly.** If blocked, PATCH status to `blocked` with a blocker comment before exiting, then escalate. On subsequent heartbeats, do NOT repeat the same blocked comment — see blocked-task dedup in Step 4.
- **Use first-class blockers** when a task depends on other tasks. Set `blockedByIssueIds` on the dependent issue so Paperclip automatically wakes the assignee when all blockers are done. Prefer this over ad-hoc "blocked by X" comments.
- **@-mentions** trigger heartbeats — use sparingly, they cost budget. For machine-authored comments, do not rely on raw `@AgentName` text. Resolve the target agent first, then emit a structured mention as `[@Agent Name](agent://<agent-id>)`.
- **Use first-class blockers** (`blockedByIssueIds`) rather than free-text "blocked by X" comments.
- **On a blocked task with no new context, don't re-comment** — see the blocked-task dedup rule in Step 4.
- **@-mentions** trigger heartbeats — use sparingly, they cost budget. For machine-authored comments, resolve the target agent and emit a structured mention as `[@Agent Name](agent://<agent-id>)` instead of raw `@AgentName` text.
- **Budget**: auto-paused at 100%. Above 80%, focus on critical tasks only.
- **Escalate** via `chainOfCommand` when stuck. Reassign to manager or create a task for them.
- **Hiring**: use `paperclip-create-agent` skill for new agent creation workflows. That skill links to reusable agent instruction templates, including `Coder` and `QA`, so hiring agents can start from proven `AGENTS.md` patterns without bloating this heartbeat skill.
- **Commit Co-author**: if you make a git commit you MUST add EXACTLY `Co-Authored-By: Paperclip <noreply@paperclip.ing>` to the end of each commit message. Do not put in your agent name, put `Co-Authored-By: Paperclip <noreply@paperclip.ing>`
- **Hiring**: use the `paperclip-create-agent` skill for new agent creation workflows (links to reusable `AGENTS.md` templates like `Coder` and `QA`).
- **Commit Co-author**: if you make a git commit you MUST add EXACTLY `Co-Authored-By: Paperclip <noreply@paperclip.ing>` to the end of each commit message. Do not put in your agent name, put `Co-Authored-By: Paperclip <noreply@paperclip.ing>`.
## Comment Style (Required)
@@ -342,19 +259,7 @@ Never leave bare ticket ids in issue descriptions or comments when a clickable i
Do NOT use unprefixed paths like `/issues/PAP-123` or `/agents/cto` — always include the company prefix.
**Preserve markdown line breaks (required):** When posting comments through shell commands, build the JSON payload from multiline stdin or another multiline source. Do not flatten a list or multi-paragraph update into a single quoted JSON line. Preferred helper:
```bash
scripts/paperclip-issue-update.sh --issue-id "$PAPERCLIP_TASK_ID" --status in_progress <<'MD'
Investigating comment formatting
- Pulled the raw stored comment body
- Compared it with the run's final assistant message
- Traced whether the flattening happened before or after the API call
MD
```
If you cannot use the helper, use `jq -n --arg comment "$comment"` with `comment` read from a heredoc or file. Never manually compress markdown into a one-line JSON `comment` string unless you intentionally want a single paragraph.
**Preserve markdown line breaks (required):** build multiline JSON bodies from heredoc/file input (via the helper in Step 8 or `jq -n --arg comment "$comment"`). Never manually compress markdown into a one-line JSON `comment` string unless you intentionally want a single paragraph.
Example:
@@ -396,109 +301,32 @@ PUT /api/issues/{issueId}/documents/plan
If `plan` already exists, fetch the current document first and send its latest `baseRevisionId` when you update it.
## Setting Agent Instructions Path
## Key Endpoints (Hot Routes)
Use the dedicated route instead of generic `PATCH /api/agents/:id` when you need to set an agent's instructions markdown path (for example `AGENTS.md`).
| Action | Endpoint |
| ------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| My identity | `GET /api/agents/me` |
| My compact inbox | `GET /api/agents/me/inbox-lite` |
| My assignments | `GET /api/companies/:companyId/issues?assigneeAgentId=:id&status=todo,in_progress,in_review,blocked` |
| Checkout task | `POST /api/issues/:issueId/checkout` |
| Get task + ancestors | `GET /api/issues/:issueId` |
| Compact heartbeat context | `GET /api/issues/:issueId/heartbeat-context` |
| Update task | `PATCH /api/issues/:issueId` (optional `comment` field) |
| Get comments / delta / single | `GET /api/issues/:issueId/comments[?after=:commentId&order=asc]``/comments/:commentId` |
| Add comment | `POST /api/issues/:issueId/comments` |
| Create subtask | `POST /api/companies/:companyId/issues` |
| Release task | `POST /api/issues/:issueId/release` |
| Search issues | `GET /api/companies/:companyId/issues?q=search+term` |
| Issue documents (list/get/put) | `GET\|PUT /api/issues/:issueId/documents[/:key]` |
| Create approval | `POST /api/companies/:companyId/approvals` |
| Upload attachment (multipart, `file`) | `POST /api/companies/:companyId/issues/:issueId/attachments` |
| List / get / delete attachment | `GET /api/issues/:issueId/attachments``GET\|DELETE /api/attachments/:attachmentId[/content]` |
| Execution workspace + runtime | `GET /api/execution-workspaces/:id``POST …/runtime-services/:action` |
| Set agent instructions path | `PATCH /api/agents/:agentId/instructions-path` |
| List agents | `GET /api/companies/:companyId/agents` |
| Dashboard | `GET /api/companies/:companyId/dashboard` |
```bash
PATCH /api/agents/{agentId}/instructions-path
{
"path": "agents/cmo/AGENTS.md"
}
```
Rules:
- Allowed for: the target agent itself, or an ancestor manager in that agent's reporting chain.
- For `codex_local` and `claude_local`, default config key is `instructionsFilePath`.
- Relative paths are resolved against the target agent's `adapterConfig.cwd`; absolute paths are accepted as-is.
- To clear the path, send `{ "path": null }`.
- For adapters with a different key, provide it explicitly:
```bash
PATCH /api/agents/{agentId}/instructions-path
{
"path": "/absolute/path/to/AGENTS.md",
"adapterConfigKey": "yourAdapterSpecificPathField"
}
```
## Key Endpoints (Quick Reference)
| Action | Endpoint |
| ----------------------------------------- | ------------------------------------------------------------------------------------------ |
| My identity | `GET /api/agents/me` |
| My compact inbox | `GET /api/agents/me/inbox-lite` |
| Report a user's Mine inbox view | `GET /api/agents/me/inbox/mine?userId=:userId` |
| My assignments | `GET /api/companies/:companyId/issues?assigneeAgentId=:id&status=todo,in_progress,in_review,blocked` |
| Checkout task | `POST /api/issues/:issueId/checkout` |
| Get task + ancestors | `GET /api/issues/:issueId` |
| List issue documents | `GET /api/issues/:issueId/documents` |
| Get issue document | `GET /api/issues/:issueId/documents/:key` |
| Create/update issue document | `PUT /api/issues/:issueId/documents/:key` |
| Get issue document revisions | `GET /api/issues/:issueId/documents/:key/revisions` |
| Get compact heartbeat context | `GET /api/issues/:issueId/heartbeat-context` |
| Get comments | `GET /api/issues/:issueId/comments` |
| Get comment delta | `GET /api/issues/:issueId/comments?after=:commentId&order=asc` |
| Get specific comment | `GET /api/issues/:issueId/comments/:commentId` |
| Update task | `PATCH /api/issues/:issueId` (optional `comment` field) |
| Add comment | `POST /api/issues/:issueId/comments` |
| Create subtask | `POST /api/companies/:companyId/issues` |
| Generate OpenClaw invite prompt (CEO) | `POST /api/companies/:companyId/openclaw/invite-prompt` |
| Create project | `POST /api/companies/:companyId/projects` |
| Create project workspace | `POST /api/projects/:projectId/workspaces` |
| Set instructions path | `PATCH /api/agents/:agentId/instructions-path` |
| Release task | `POST /api/issues/:issueId/release` |
| List agents | `GET /api/companies/:companyId/agents` |
| Create approval | `POST /api/companies/:companyId/approvals` |
| List company skills | `GET /api/companies/:companyId/skills` |
| Import company skills | `POST /api/companies/:companyId/skills/import` |
| Scan project workspaces for skills | `POST /api/companies/:companyId/skills/scan-projects` |
| Sync agent desired skills | `POST /api/agents/:agentId/skills/sync` |
| Preview CEO-safe company import | `POST /api/companies/:companyId/imports/preview` |
| Apply CEO-safe company import | `POST /api/companies/:companyId/imports/apply` |
| Preview company export | `POST /api/companies/:companyId/exports/preview` |
| Build company export | `POST /api/companies/:companyId/exports` |
| Dashboard | `GET /api/companies/:companyId/dashboard` |
| Search issues | `GET /api/companies/:companyId/issues?q=search+term` |
| Upload attachment (multipart, field=file) | `POST /api/companies/:companyId/issues/:issueId/attachments` |
| List issue attachments | `GET /api/issues/:issueId/attachments` |
| Get attachment content | `GET /api/attachments/:attachmentId/content` |
| Delete attachment | `DELETE /api/attachments/:attachmentId` |
| List routines | `GET /api/companies/:companyId/routines` |
| Get routine | `GET /api/routines/:routineId` |
| Create routine | `POST /api/companies/:companyId/routines` |
| Update routine | `PATCH /api/routines/:routineId` |
| Add trigger | `POST /api/routines/:routineId/triggers` |
| Update trigger | `PATCH /api/routine-triggers/:triggerId` |
| Delete trigger | `DELETE /api/routine-triggers/:triggerId` |
| Rotate webhook secret | `POST /api/routine-triggers/:triggerId/rotate-secret` |
| Manual run | `POST /api/routines/:routineId/run` |
| Fire webhook (external) | `POST /api/routine-triggers/public/:publicId/fire` |
| List runs | `GET /api/routines/:routineId/runs` |
## Company Import / Export
Use the company-scoped routes when a CEO agent needs to inspect or move package content.
- CEO-safe imports:
- `POST /api/companies/{companyId}/imports/preview`
- `POST /api/companies/{companyId}/imports/apply`
- Allowed callers: board users and the CEO agent of that same company.
- Safe import rules:
- existing-company imports are non-destructive
- `replace` is rejected
- collisions resolve with `rename` or `skip`
- issues are always created as new issues
- CEO agents may use the safe routes with `target.mode = "new_company"` to create a new company directly. Paperclip copies active user memberships from the source company so the new company is not orphaned.
For export, preview first and keep tasks explicit:
- `POST /api/companies/{companyId}/exports/preview`
- `POST /api/companies/{companyId}/exports`
- Export preview defaults to `issues: false`
- Add `issues` or `projectIssues` only when you intentionally need task files
- Use `selectedFiles` to narrow the final package to specific agents, skills, projects, or tasks after you inspect the preview inventory
Full endpoint table (company imports/exports, OpenClaw invites, company skills, routines, etc.) lives in `references/api-reference.md`.
## Searching Issues
@@ -510,43 +338,6 @@ GET /api/companies/{companyId}/issues?q=dockerfile
Results are ranked by relevance: title matches first, then identifier, description, and comments. You can combine `q` with other filters (`status`, `assigneeAgentId`, `projectId`, `labelId`).
## Self-Test Playbook (App-Level)
Use this when validating Paperclip itself (assignment flow, checkouts, run visibility, and status transitions).
1. Create a throwaway issue assigned to a known local agent (`claudecoder` or `codexcoder`):
```bash
npx paperclipai issue create \
--company-id "$PAPERCLIP_COMPANY_ID" \
--title "Self-test: assignment/watch flow" \
--description "Temporary validation issue" \
--status todo \
--assignee-agent-id "$PAPERCLIP_AGENT_ID"
```
2. Trigger and watch a heartbeat for that assignee:
```bash
npx paperclipai heartbeat run --agent-id "$PAPERCLIP_AGENT_ID"
```
3. Verify the issue transitions (`todo -> in_progress -> done` or `blocked`) and that comments are posted:
```bash
npx paperclipai issue get <issue-id-or-identifier>
```
4. Reassignment test (optional): move the same issue between `claudecoder` and `codexcoder` and confirm wake/run behavior:
```bash
npx paperclipai issue update <issue-id> --assignee-agent-id <other-agent-id> --status todo
```
5. Cleanup: mark temporary issues done/cancelled with a clear note.
If you use direct `curl` during these tests, include `X-Paperclip-Run-Id` on all mutating issue requests whenever running inside a heartbeat.
## Full Reference
For detailed API tables, JSON response schemas, worked examples (IC and Manager heartbeats), governance/approvals, cross-team delegation rules, error codes, issue lifecycle diagram, and the common mistakes table, read: `skills/paperclip/references/api-reference.md`

View File

@@ -747,6 +747,11 @@ Terminal states: `done`, `cancelled`
| GET | `/api/issues/:issueId/approvals` | List approvals linked to issue |
| POST | `/api/issues/:issueId/approvals` | Link approval to issue |
| DELETE | `/api/issues/:issueId/approvals/:approvalId` | Unlink approval from issue |
| GET | `/api/issues/:issueId/heartbeat-context` | Compact issue context including `currentExecutionWorkspace` when one is linked |
| GET | `/api/execution-workspaces/:workspaceId` | Execution workspace detail including runtime services and service URLs |
| POST | `/api/execution-workspaces/:workspaceId/runtime-services/start` | Start configured workspace services |
| POST | `/api/execution-workspaces/:workspaceId/runtime-services/restart` | Restart configured workspace services |
| POST | `/api/execution-workspaces/:workspaceId/runtime-services/stop` | Stop workspace runtime services |
### Companies, Projects, Goals

View File

@@ -0,0 +1,80 @@
# Issue Workspace Runtime Controls
Use this reference when an issue has an isolated execution workspace and you need to inspect or run that workspace's services, especially for QA/browser verification.
## Discover the Workspace
Start from the issue, not from memory:
```sh
curl -sS -H "Authorization: Bearer $PAPERCLIP_API_KEY" \
"$PAPERCLIP_API_URL/api/issues/$PAPERCLIP_TASK_ID/heartbeat-context"
```
Read `currentExecutionWorkspace`:
- `id` — execution workspace id for control endpoints
- `cwd` / `branchName` — local checkout context
- `status` / `closedAt` — whether the workspace is usable
- `runtimeServices[]` — current services, including `serviceName`, `status`, `healthStatus`, `url`, `port`, and `runtimeServiceId`
If `currentExecutionWorkspace` is `null`, the issue does not currently have a realized execution workspace. For child/follow-up work, create the child with `parentId` or use `inheritExecutionWorkspaceFromIssueId` so Paperclip preserves workspace continuity.
## Control Services
Prefer Paperclip-managed runtime service controls over manual `pnpm dev &` or ad-hoc background processes. These endpoints keep service state, URLs, logs, and ownership visible to other agents and the board.
```sh
# Start all configured services; waits for configured readiness checks.
curl -sS -X POST \
-H "Authorization: Bearer $PAPERCLIP_API_KEY" \
-H "X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID" \
-H "Content-Type: application/json" \
"$PAPERCLIP_API_URL/api/execution-workspaces/<workspace-id>/runtime-services/start" \
-d '{}'
# Restart all configured services.
curl -sS -X POST \
-H "Authorization: Bearer $PAPERCLIP_API_KEY" \
-H "X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID" \
-H "Content-Type: application/json" \
"$PAPERCLIP_API_URL/api/execution-workspaces/<workspace-id>/runtime-services/restart" \
-d '{}'
# Stop all running services.
curl -sS -X POST \
-H "Authorization: Bearer $PAPERCLIP_API_KEY" \
-H "X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID" \
-H "Content-Type: application/json" \
"$PAPERCLIP_API_URL/api/execution-workspaces/<workspace-id>/runtime-services/stop" \
-d '{}'
```
To target a configured service, pass one of:
```json
{ "workspaceCommandId": "web" }
{ "runtimeServiceId": "<runtime-service-id>" }
{ "serviceIndex": 0 }
```
The response includes an updated `workspace.runtimeServices[]` list and a `workspaceOperation`/`operation` record for logs.
## Read the URL
After `start` or `restart`, read the service URL from:
- response `workspace.runtimeServices[].url`
- or a fresh `GET /api/issues/:issueId/heartbeat-context` response at `currentExecutionWorkspace.runtimeServices[].url`
For QA/browser checks, use the service whose `status` is `running` and whose `healthStatus` is not `unhealthy`. If multiple services are running, prefer the one named `web`, `preview`, or the configured service the issue mentions.
## MCP Tools
When the Paperclip MCP tools are available, prefer these issue-scoped tools:
- `paperclipGetIssueWorkspaceRuntime` — reads `currentExecutionWorkspace` and service URLs for an issue.
- `paperclipControlIssueWorkspaceServices` — starts, stops, or restarts the current issue workspace services.
- `paperclipWaitForIssueWorkspaceService` — waits until a selected service is running and returns its URL when exposed.
These tools resolve the issue's workspace id for you, so QA agents do not need to know the lower-level execution workspace endpoint first.

View File

@@ -0,0 +1,141 @@
# Paperclip Workflow Playbooks
Reference material for niche workflows that are pointed to from `SKILL.md`. Load only when the task matches.
---
## Project Setup (CEO/Manager)
When asked to set up a new project with workspace config (local folder and/or GitHub repo):
1. `POST /api/companies/{companyId}/projects` with project fields.
2. Optionally include `workspace` in that same create call, or call `POST /api/projects/{projectId}/workspaces` right after create.
Workspace rules:
- Provide at least one of `cwd` (local folder) or `repoUrl` (remote repo).
- For repo-only setup, omit `cwd` and provide `repoUrl`.
- Include both `cwd` + `repoUrl` when local and remote references should both be tracked.
---
## OpenClaw Invite (CEO)
Use this when asked to invite a new OpenClaw employee.
1. Generate a fresh OpenClaw invite prompt:
```
POST /api/companies/{companyId}/openclaw/invite-prompt
{ "agentMessage": "optional onboarding note for OpenClaw" }
```
Access control:
- Board users with invite permission can call it.
- Agent callers: only the company CEO agent can call it.
2. Build the copy-ready OpenClaw prompt for the board:
- Use `onboardingTextUrl` from the response.
- Ask the board to paste that prompt into OpenClaw.
- If the issue includes an OpenClaw URL (for example `ws://127.0.0.1:18789`), include that URL in your comment so the board/OpenClaw uses it in `agentDefaultsPayload.url`.
3. Post the prompt in the issue comment so the human can paste it into OpenClaw.
4. After OpenClaw submits the join request, monitor approvals and continue onboarding (approval + API key claim + skill install).
---
## Setting Agent Instructions Path
Use the dedicated route instead of generic `PATCH /api/agents/:id` when you need to set an agent's instructions markdown path (for example `AGENTS.md`).
```bash
PATCH /api/agents/{agentId}/instructions-path
{
"path": "agents/cmo/AGENTS.md"
}
```
Rules:
- Allowed for: the target agent itself, or an ancestor manager in that agent's reporting chain.
- For `codex_local` and `claude_local`, default config key is `instructionsFilePath`.
- Relative paths are resolved against the target agent's `adapterConfig.cwd`; absolute paths are accepted as-is.
- To clear the path, send `{ "path": null }`.
- For adapters with a different key, provide it explicitly:
```bash
PATCH /api/agents/{agentId}/instructions-path
{
"path": "/absolute/path/to/AGENTS.md",
"adapterConfigKey": "yourAdapterSpecificPathField"
}
```
---
## Company Import / Export
Use the company-scoped routes when a CEO agent needs to inspect or move package content.
- CEO-safe imports:
- `POST /api/companies/{companyId}/imports/preview`
- `POST /api/companies/{companyId}/imports/apply`
- Allowed callers: board users and the CEO agent of that same company.
- Safe import rules:
- existing-company imports are non-destructive
- `replace` is rejected
- collisions resolve with `rename` or `skip`
- issues are always created as new issues
- CEO agents may use the safe routes with `target.mode = "new_company"` to create a new company directly. Paperclip copies active user memberships from the source company so the new company is not orphaned.
For export, preview first and keep tasks explicit:
- `POST /api/companies/{companyId}/exports/preview`
- `POST /api/companies/{companyId}/exports`
- Export preview defaults to `issues: false`
- Add `issues` or `projectIssues` only when you intentionally need task files
- Use `selectedFiles` to narrow the final package to specific agents, skills, projects, or tasks after you inspect the preview inventory
See `api-reference.md` for full schema examples.
---
## Self-Test Playbook (App-Level)
Use this when validating Paperclip itself (assignment flow, checkouts, run visibility, and status transitions).
1. Create a throwaway issue assigned to a known local agent (`claudecoder` or `codexcoder`):
```bash
npx paperclipai issue create \
--company-id "$PAPERCLIP_COMPANY_ID" \
--title "Self-test: assignment/watch flow" \
--description "Temporary validation issue" \
--status todo \
--assignee-agent-id "$PAPERCLIP_AGENT_ID"
```
2. Trigger and watch a heartbeat for that assignee:
```bash
npx paperclipai heartbeat run --agent-id "$PAPERCLIP_AGENT_ID"
```
3. Verify the issue transitions (`todo -> in_progress -> done` or `blocked`) and that comments are posted:
```bash
npx paperclipai issue get <issue-id-or-identifier>
```
4. Reassignment test (optional): move the same issue between `claudecoder` and `codexcoder` and confirm wake/run behavior:
```bash
npx paperclipai issue update <issue-id> --assignee-agent-id <other-agent-id> --status todo
```
5. Cleanup: mark temporary issues done/cancelled with a clear note.
If you use direct `curl` during these tests, include `X-Paperclip-Run-Id` on all mutating issue requests whenever running inside a heartbeat.

View File

@@ -15,6 +15,11 @@ export interface RunForIssue {
usageJson: Record<string, unknown> | null;
resultJson: Record<string, unknown> | null;
logBytes?: number | null;
retryOfRunId?: string | null;
scheduledRetryAt?: string | null;
scheduledRetryAttempt?: number;
scheduledRetryReason?: string | null;
retryExhaustedReason?: string | null;
livenessState?: RunLivenessState | null;
livenessReason?: string | null;
continuationAttempt?: number;
@@ -31,11 +36,12 @@ export interface IssueForRun {
}
export const activityApi = {
list: (companyId: string, filters?: { entityType?: string; entityId?: string; agentId?: string }) => {
list: (companyId: string, filters?: { entityType?: string; entityId?: string; agentId?: string; limit?: number }) => {
const params = new URLSearchParams();
if (filters?.entityType) params.set("entityType", filters.entityType);
if (filters?.entityId) params.set("entityId", filters.entityId);
if (filters?.agentId) params.set("agentId", filters.agentId);
if (filters?.limit) params.set("limit", String(filters.limit));
const qs = params.toString();
return api.get<ActivityEvent[]>(`/companies/${companyId}/activity${qs ? `?${qs}` : ""}`);
},

View File

@@ -65,6 +65,9 @@ function createRun(overrides: Partial<HeartbeatRun> = {}): HeartbeatRun {
processStartedAt: null,
retryOfRunId: null,
processLossRetryCount: 0,
scheduledRetryAt: null,
scheduledRetryAttempt: 0,
scheduledRetryReason: null,
livenessState: null,
livenessReason: null,
continuationAttempt: 0,

View File

@@ -192,6 +192,40 @@ describe("IssueRunLedger", () => {
expect(container.textContent).not.toContain("initial attempt");
});
it("surfaces scheduled retry timing and exhaustion state without opening logs", () => {
renderLedger({
runs: [
createRun({
runId: "run-scheduled",
status: "scheduled_retry",
finishedAt: null,
livenessState: null,
livenessReason: null,
retryOfRunId: "run-root",
scheduledRetryAt: "2026-04-18T20:15:00.000Z",
scheduledRetryAttempt: 2,
scheduledRetryReason: "transient_failure",
}),
createRun({
runId: "run-exhausted",
status: "failed",
createdAt: "2026-04-18T19:57:00.000Z",
retryOfRunId: "run-root",
scheduledRetryAttempt: 4,
scheduledRetryReason: "transient_failure",
retryExhaustedReason: "Bounded retry exhausted after 4 scheduled attempts; no further automatic retry will be queued",
}),
],
});
expect(container.textContent).toContain("Retry scheduled");
expect(container.textContent).toContain("Attempt 2");
expect(container.textContent).toContain("Transient failure");
expect(container.textContent).toContain("Next retry");
expect(container.textContent).toContain("Retry exhausted");
expect(container.textContent).toContain("No further automatic retry queued");
});
it("shows timeout, cancel, and budget stop reasons without raw logs", () => {
renderLedger({
runs: [

View File

@@ -7,6 +7,7 @@ import { heartbeatsApi, type ActiveRunForIssue, type LiveRunForIssue } from "../
import { cn, relativeTime } from "../lib/utils";
import { queryKeys } from "../lib/queryKeys";
import { keepPreviousDataForSameQueryTail } from "../lib/query-placeholder-data";
import { describeRunRetryState } from "../lib/runRetryState";
type IssueRunLedgerProps = {
issueId: string;
@@ -80,6 +81,12 @@ const PENDING_LIVENESS_COPY: LivenessCopy = {
description: "Liveness is evaluated after the run finishes.",
};
const RETRY_PENDING_LIVENESS_COPY: LivenessCopy = {
label: "Retry pending",
tone: "border-cyan-500/30 bg-cyan-500/10 text-cyan-700 dark:text-cyan-300",
description: "Paperclip queued an automatic retry that has not started yet.",
};
const MISSING_LIVENESS_COPY: LivenessCopy = {
label: "No liveness data",
tone: "border-border bg-background text-muted-foreground",
@@ -174,10 +181,12 @@ function runSummary(run: LedgerRun, agentMap: ReadonlyMap<string, Pick<Agent, "n
const agentName = compactAgentName(run, agentMap);
if (run.status === "running") return `Running now by ${agentName}`;
if (run.status === "queued") return `Queued for ${agentName}`;
if (run.status === "scheduled_retry") return `Automatic retry scheduled for ${agentName}`;
return `${statusLabel(run.status)} by ${agentName}`;
}
function livenessCopyForRun(run: LedgerRun) {
if (run.status === "scheduled_retry") return RETRY_PENDING_LIVENESS_COPY;
if (run.livenessState) return LIVENESS_COPY[run.livenessState];
return isActiveRun(run) ? PENDING_LIVENESS_COPY : MISSING_LIVENESS_COPY;
}
@@ -204,6 +213,7 @@ function stopReasonLabel(run: RunForIssue) {
function stopStatusLabel(run: LedgerRun, stopReason: string | null) {
if (stopReason) return stopReason;
if (run.status === "scheduled_retry") return "Retry pending";
if (run.status === "queued") return "Waiting to start";
if (run.status === "running") return "Still running";
if (!run.livenessState) return "Unavailable";
@@ -211,6 +221,7 @@ function stopStatusLabel(run: LedgerRun, stopReason: string | null) {
}
function lastUsefulActionLabel(run: LedgerRun) {
if (run.status === "scheduled_retry") return "Waiting for next attempt";
if (run.lastUsefulActionAt) return relativeTime(run.lastUsefulActionAt);
if (isActiveRun(run)) return "No action recorded yet";
if (run.livenessState === "plan_only" || run.livenessState === "needs_followup") {
@@ -251,7 +262,7 @@ export function IssueRunLedger({
const { data: runs } = useQuery({
queryKey: queryKeys.issues.runs(issueId),
queryFn: () => activityApi.runsForIssue(issueId),
refetchInterval: hasLiveRuns ? 5000 : false,
refetchInterval: hasLiveRuns || issueStatus === "in_progress" ? 5000 : false,
placeholderData: keepPreviousDataForSameQueryTail<RunForIssue[]>(issueId),
});
const { data: liveRuns } = useQuery({
@@ -361,6 +372,7 @@ export function IssueRunLedgerContent({
const duration = formatDuration(run.startedAt, run.finishedAt);
const exhausted = hasExhaustedContinuation(run);
const continuation = continuationLabel(run);
const retryState = describeRunRetryState(run);
return (
<article key={run.runId} className="space-y-2 px-3 py-3">
<div className="flex flex-wrap items-center gap-2">
@@ -396,6 +408,16 @@ export function IssueRunLedgerContent({
{continuation ? (
<span className="text-[11px] text-muted-foreground">{continuation}</span>
) : null}
{retryState ? (
<span
className={cn(
"rounded-md border px-1.5 py-0.5 text-[11px] font-medium",
retryState.tone,
)}
>
{retryState.badgeLabel}
</span>
) : null}
</div>
<div className="grid gap-2 text-xs text-muted-foreground sm:grid-cols-3">
@@ -413,6 +435,24 @@ export function IssueRunLedgerContent({
</div>
</div>
{retryState ? (
<div className="rounded-md border border-border/70 bg-accent/20 px-2 py-2 text-xs leading-5 text-muted-foreground">
{retryState.detail ? <p>{retryState.detail}</p> : null}
{retryState.secondary ? <p>{retryState.secondary}</p> : null}
{retryState.retryOfRunId ? (
<p>
Retry of{" "}
<Link
to={`/agents/${run.agentId}/runs/${retryState.retryOfRunId}`}
className="font-mono text-foreground hover:underline"
>
{retryState.retryOfRunId.slice(0, 8)}
</Link>
</p>
) : null}
</div>
) : null}
{run.livenessReason ? (
<p className="min-w-0 break-words text-xs leading-5 text-muted-foreground">
{run.livenessReason}

View File

@@ -9,7 +9,7 @@ export function StatusBadge({ status }: { status: string }) {
statusBadge[status] ?? statusBadgeDefault
)}
>
{status.replace("_", " ")}
{status.replace(/_/g, " ")}
</span>
);
}

View File

@@ -0,0 +1,42 @@
import { describe, expect, it } from "vitest";
import { describeRunRetryState, formatRetryReason } from "./runRetryState";
describe("runRetryState", () => {
it("formats internal retry reasons for operators", () => {
expect(formatRetryReason("transient_failure")).toBe("Transient failure");
expect(formatRetryReason("issue_continuation_needed")).toBe("Continuation needed");
expect(formatRetryReason("custom_reason")).toBe("custom reason");
});
it("describes scheduled retries", () => {
expect(
describeRunRetryState({
status: "scheduled_retry",
retryOfRunId: "run-1",
scheduledRetryAttempt: 2,
scheduledRetryReason: "transient_failure",
scheduledRetryAt: "2026-04-18T20:15:00.000Z",
}),
).toMatchObject({
kind: "scheduled",
badgeLabel: "Retry scheduled",
detail: "Attempt 2 · Transient failure",
});
});
it("describes exhausted retries", () => {
expect(
describeRunRetryState({
status: "failed",
retryOfRunId: "run-1",
scheduledRetryAttempt: 4,
scheduledRetryReason: "transient_failure",
retryExhaustedReason: "Bounded retry exhausted after 4 scheduled attempts; no further automatic retry will be queued",
}),
).toMatchObject({
kind: "exhausted",
badgeLabel: "Retry exhausted",
detail: "Attempt 4 · Transient failure · No further automatic retry queued",
});
});
});

View File

@@ -0,0 +1,93 @@
import { formatDateTime } from "./utils";
type RetryAwareRun = {
status: string;
retryOfRunId?: string | null;
scheduledRetryAt?: string | Date | null;
scheduledRetryAttempt?: number | null;
scheduledRetryReason?: string | null;
retryExhaustedReason?: string | null;
};
export type RunRetryStateSummary = {
kind: "scheduled" | "exhausted" | "attempted";
badgeLabel: string;
tone: string;
detail: string | null;
secondary: string | null;
retryOfRunId: string | null;
};
const RETRY_REASON_LABELS: Record<string, string> = {
transient_failure: "Transient failure",
missing_issue_comment: "Missing issue comment",
process_lost: "Process lost",
assignment_recovery: "Assignment recovery",
issue_continuation_needed: "Continuation needed",
};
function readNonEmptyString(value: unknown) {
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
}
function joinFragments(parts: Array<string | null>) {
const filtered = parts.filter((part): part is string => Boolean(part));
return filtered.length > 0 ? filtered.join(" · ") : null;
}
export function formatRetryReason(reason: string | null | undefined) {
const normalized = readNonEmptyString(reason);
if (!normalized) return null;
return RETRY_REASON_LABELS[normalized] ?? normalized.replace(/_/g, " ");
}
export function describeRunRetryState(run: RetryAwareRun): RunRetryStateSummary | null {
const attempt =
typeof run.scheduledRetryAttempt === "number" && Number.isFinite(run.scheduledRetryAttempt) && run.scheduledRetryAttempt > 0
? run.scheduledRetryAttempt
: null;
const attemptLabel = attempt ? `Attempt ${attempt}` : null;
const reasonLabel = formatRetryReason(run.scheduledRetryReason);
const retryOfRunId = readNonEmptyString(run.retryOfRunId);
const exhaustedReason = readNonEmptyString(run.retryExhaustedReason);
const dueAt = run.scheduledRetryAt ? formatDateTime(run.scheduledRetryAt) : null;
const hasRetryMetadata =
Boolean(retryOfRunId)
|| Boolean(reasonLabel)
|| Boolean(dueAt)
|| Boolean(attemptLabel)
|| Boolean(exhaustedReason);
if (!hasRetryMetadata) return null;
if (run.status === "scheduled_retry") {
return {
kind: "scheduled",
badgeLabel: "Retry scheduled",
tone: "border-cyan-500/30 bg-cyan-500/10 text-cyan-700 dark:text-cyan-300",
detail: joinFragments([attemptLabel, reasonLabel]),
secondary: dueAt ? `Next retry ${dueAt}` : "Next retry pending schedule",
retryOfRunId,
};
}
if (exhaustedReason) {
return {
kind: "exhausted",
badgeLabel: "Retry exhausted",
tone: "border-amber-500/30 bg-amber-500/10 text-amber-700 dark:text-amber-300",
detail: joinFragments([attemptLabel, reasonLabel, "No further automatic retry queued"]),
secondary: exhaustedReason,
retryOfRunId,
};
}
return {
kind: "attempted",
badgeLabel: "Retried run",
tone: "border-slate-500/20 bg-slate-500/10 text-slate-700 dark:text-slate-300",
detail: joinFragments([attemptLabel, reasonLabel]),
secondary: null,
retryOfRunId,
};
}

View File

@@ -43,6 +43,7 @@ export const statusBadge: Record<string, string> = {
// Agent statuses
active: "bg-green-100 text-green-700 dark:bg-green-900/50 dark:text-green-300",
running: "bg-cyan-100 text-cyan-700 dark:bg-cyan-900/50 dark:text-cyan-300",
scheduled_retry: "bg-sky-100 text-sky-700 dark:bg-sky-900/50 dark:text-sky-300",
paused: "bg-orange-100 text-orange-700 dark:bg-orange-900/50 dark:text-orange-300",
idle: "bg-yellow-100 text-yellow-700 dark:bg-yellow-900/50 dark:text-yellow-300",
archived: "bg-muted text-muted-foreground",

View File

@@ -1,11 +1,9 @@
import { useEffect, useMemo, useState } from "react";
import { useQuery } from "@tanstack/react-query";
import type { ActivityEvent, Agent } from "@paperclipai/shared";
import { activityApi } from "../api/activity";
import { accessApi } from "../api/access";
import { agentsApi } from "../api/agents";
import { issuesApi } from "../api/issues";
import { projectsApi } from "../api/projects";
import { goalsApi } from "../api/goals";
import { buildCompanyUserProfileMap } from "../lib/company-members";
import { useCompany } from "../context/CompanyContext";
import { useBreadcrumbs } from "../context/BreadcrumbContext";
@@ -21,7 +19,29 @@ import {
SelectValue,
} from "@/components/ui/select";
import { History } from "lucide-react";
import type { Agent } from "@paperclipai/shared";
const ACTIVITY_PAGE_LIMIT = 200;
function detailString(event: ActivityEvent, ...keys: string[]) {
const details = event.details;
for (const key of keys) {
const value = details?.[key];
if (typeof value === "string" && value.trim()) return value;
}
return null;
}
function activityEntityName(event: ActivityEvent) {
if (event.entityType === "issue") return detailString(event, "identifier", "issueIdentifier");
if (event.entityType === "project") return detailString(event, "projectName", "name", "title");
if (event.entityType === "goal") return detailString(event, "goalTitle", "title", "name");
return detailString(event, "name", "title");
}
function activityEntityTitle(event: ActivityEvent) {
if (event.entityType === "issue") return detailString(event, "issueTitle", "title");
return null;
}
export function Activity() {
const { selectedCompanyId } = useCompany();
@@ -33,8 +53,8 @@ export function Activity() {
}, [setBreadcrumbs]);
const { data, isLoading, error } = useQuery({
queryKey: queryKeys.activity(selectedCompanyId!),
queryFn: () => activityApi.list(selectedCompanyId!),
queryKey: [...queryKeys.activity(selectedCompanyId!), { limit: ACTIVITY_PAGE_LIMIT }],
queryFn: () => activityApi.list(selectedCompanyId!, { limit: ACTIVITY_PAGE_LIMIT }),
enabled: !!selectedCompanyId,
});
@@ -44,24 +64,6 @@ export function Activity() {
enabled: !!selectedCompanyId,
});
const { data: issues } = useQuery({
queryKey: queryKeys.issues.list(selectedCompanyId!),
queryFn: () => issuesApi.list(selectedCompanyId!),
enabled: !!selectedCompanyId,
});
const { data: projects } = useQuery({
queryKey: queryKeys.projects.list(selectedCompanyId!),
queryFn: () => projectsApi.list(selectedCompanyId!),
enabled: !!selectedCompanyId,
});
const { data: goals } = useQuery({
queryKey: queryKeys.goals.list(selectedCompanyId!),
queryFn: () => goalsApi.list(selectedCompanyId!),
enabled: !!selectedCompanyId,
});
const { data: companyMembers } = useQuery({
queryKey: queryKeys.access.companyUserDirectory(selectedCompanyId!),
queryFn: () => accessApi.listUserDirectory(selectedCompanyId!),
@@ -81,18 +83,22 @@ export function Activity() {
const entityNameMap = useMemo(() => {
const map = new Map<string, string>();
for (const i of issues ?? []) map.set(`issue:${i.id}`, i.identifier ?? i.id.slice(0, 8));
for (const a of agents ?? []) map.set(`agent:${a.id}`, a.name);
for (const p of projects ?? []) map.set(`project:${p.id}`, p.name);
for (const g of goals ?? []) map.set(`goal:${g.id}`, g.title);
for (const event of data ?? []) {
const name = activityEntityName(event);
if (name) map.set(`${event.entityType}:${event.entityId}`, name);
}
return map;
}, [issues, agents, projects, goals]);
}, [data, agents]);
const entityTitleMap = useMemo(() => {
const map = new Map<string, string>();
for (const i of issues ?? []) map.set(`issue:${i.id}`, i.title);
for (const event of data ?? []) {
const title = activityEntityTitle(event);
if (title) map.set(`${event.entityType}:${event.entityId}`, title);
}
return map;
}, [issues]);
}, [data]);
if (!selectedCompanyId) {
return <EmptyState icon={History} message="Select a company to view activity." />;

View File

@@ -43,6 +43,7 @@ import { PackageFileTree, buildFileTree } from "../components/PackageFileTree";
import { ScrollToBottom } from "../components/ScrollToBottom";
import { formatCents, formatDate, relativeTime, formatTokens, visibleRunCostUsd } from "../lib/utils";
import { cn } from "../lib/utils";
import { describeRunRetryState } from "../lib/runRetryState";
import { Button } from "@/components/ui/button";
import { Skeleton } from "@/components/ui/skeleton";
import { Tabs } from "@/components/ui/tabs";
@@ -104,6 +105,7 @@ const runStatusIcons: Record<string, { icon: typeof CheckCircle2; color: string
failed: { icon: XCircle, color: "text-red-600 dark:text-red-400" },
running: { icon: Loader2, color: "text-cyan-600 dark:text-cyan-400" },
queued: { icon: Clock, color: "text-yellow-600 dark:text-yellow-400" },
scheduled_retry: { icon: Clock, color: "text-sky-600 dark:text-sky-400" },
timed_out: { icon: Timer, color: "text-orange-600 dark:text-orange-400" },
cancelled: { icon: Slash, color: "text-neutral-500 dark:text-neutral-400" },
};
@@ -2342,26 +2344,39 @@ function PromptsTab({
</p>
</div>
</div>
{selectedFileExists && !selectedFileSummary?.deprecated && selectedOrEntryFile !== currentEntryFile && (
<Button
type="button"
size="sm"
variant="outline"
onClick={() => {
if (confirm(`Delete ${selectedOrEntryFile}?`)) {
deleteFile.mutate(selectedOrEntryFile, {
onSuccess: () => {
setSelectedFile(currentEntryFile);
setDraft(null);
},
});
}
}}
disabled={deleteFile.isPending}
>
Delete
</Button>
)}
<div className="flex items-center gap-2">
{!fileLoading && (
<CopyText
text={displayValue}
ariaLabel="Copy instructions file as markdown"
title="Copy as markdown"
copiedLabel="Copied"
className="inline-flex h-8 w-8 items-center justify-center rounded-md border border-border text-muted-foreground hover:bg-accent hover:text-foreground"
>
<Copy className="h-3.5 w-3.5" />
</CopyText>
)}
{selectedFileExists && !selectedFileSummary?.deprecated && selectedOrEntryFile !== currentEntryFile && (
<Button
type="button"
size="sm"
variant="outline"
onClick={() => {
if (confirm(`Delete ${selectedOrEntryFile}?`)) {
deleteFile.mutate(selectedOrEntryFile, {
onSuccess: () => {
setSelectedFile(currentEntryFile);
setDraft(null);
},
});
}
}}
disabled={deleteFile.isPending}
>
Delete
</Button>
)}
</div>
</div>
{selectedFileExists && fileLoading && !selectedFileDetail ? (
@@ -3141,6 +3156,7 @@ function RunDetail({ run: initialRun, agentRouteId, adapterType, adapterConfig }
const sessionChanged = run.sessionIdBefore && run.sessionIdAfter && run.sessionIdBefore !== run.sessionIdAfter;
const sessionId = run.sessionIdAfter || run.sessionIdBefore;
const hasNonZeroExit = run.exitCode !== null && run.exitCode !== 0;
const retryState = describeRunRetryState(run);
return (
<div className="space-y-4 min-w-0">
@@ -3295,6 +3311,30 @@ function RunDetail({ run: initialRun, agentRouteId, adapterType, adapterConfig }
{run.signal && <span className="text-muted-foreground ml-1">(signal: {run.signal})</span>}
</div>
)}
{retryState && (
<div className="rounded-md border border-border/70 bg-accent/20 px-3 py-2 text-xs leading-5">
<div className="flex flex-wrap items-center gap-2">
<span
className={cn(
"rounded-md border px-1.5 py-0.5 text-[11px] font-medium",
retryState.tone,
)}
>
{retryState.badgeLabel}
</span>
{retryState.retryOfRunId ? (
<Link
to={`/agents/${agentRouteId}/runs/${retryState.retryOfRunId}`}
className="font-mono text-foreground hover:underline"
>
{retryState.retryOfRunId.slice(0, 8)}
</Link>
) : null}
</div>
{retryState.detail ? <p className="mt-2 text-muted-foreground">{retryState.detail}</p> : null}
{retryState.secondary ? <p className="text-muted-foreground">{retryState.secondary}</p> : null}
</div>
)}
</div>
{/* Right column: metrics */}

View File

@@ -27,6 +27,8 @@ import { PageSkeleton } from "../components/PageSkeleton";
import type { Agent, Issue } from "@paperclipai/shared";
import { PluginSlotOutlet } from "@/plugins/slots";
const DASHBOARD_ACTIVITY_LIMIT = 10;
function getRecentIssues(issues: Issue[]): Issue[] {
return [...issues]
.sort((a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime());
@@ -58,8 +60,8 @@ export function Dashboard() {
});
const { data: activity } = useQuery({
queryKey: queryKeys.activity(selectedCompanyId!),
queryFn: () => activityApi.list(selectedCompanyId!),
queryKey: [...queryKeys.activity(selectedCompanyId!), { limit: DASHBOARD_ACTIVITY_LIMIT }],
queryFn: () => activityApi.list(selectedCompanyId!, { limit: DASHBOARD_ACTIVITY_LIMIT }),
enabled: !!selectedCompanyId,
});

View File

@@ -123,6 +123,10 @@ function makeHeartbeatRun(overrides: Partial<HeartbeatRun>): HeartbeatRun {
processStartedAt: createdAt,
retryOfRunId: null,
processLossRetryCount: 0,
scheduledRetryAt: null,
scheduledRetryAttempt: 0,
scheduledRetryReason: null,
retryExhaustedReason: null,
livenessState: "completed",
livenessReason: null,
continuationAttempt: 0,