mirror of
https://github.com/glittercowboy/get-shit-done
synced 2026-04-26 01:35:29 +02:00
Compare commits
77 Commits
feat/2071-
...
fix/2134-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb03ba3dd8 | ||
|
|
637daa831b | ||
|
|
553d9db56e | ||
|
|
8009b67e3e | ||
|
|
6b7b6a0ae8 | ||
|
|
177cb544cb | ||
|
|
3d096cb83c | ||
|
|
805696bd03 | ||
|
|
e24cb18b72 | ||
|
|
d19b61a158 | ||
|
|
29f8bfeead | ||
|
|
d59d635560 | ||
|
|
ce1bb1f9ca | ||
|
|
121839e039 | ||
|
|
6b643b37f4 | ||
|
|
50be9321e3 | ||
|
|
190804fc73 | ||
|
|
0c266958e4 | ||
|
|
d8e7a1166b | ||
|
|
3e14904afe | ||
|
|
6d590dfe19 | ||
|
|
f1960fad67 | ||
|
|
898dbf03e6 | ||
|
|
362e5ac36c | ||
|
|
3865afd254 | ||
|
|
091793d2c6 | ||
|
|
06daaf4c68 | ||
|
|
4ad7ecc6c6 | ||
|
|
9d5d7d76e7 | ||
|
|
bae220c5ad | ||
|
|
8961322141 | ||
|
|
3c2cc7189a | ||
|
|
9ff6ca20cf | ||
|
|
73be20215e | ||
|
|
ae17848ef1 | ||
|
|
f425bf9142 | ||
|
|
4553d356d2 | ||
|
|
319663deb7 | ||
|
|
868e3d488f | ||
|
|
3f3fd0a723 | ||
|
|
21ebeb8713 | ||
|
|
53995faa8f | ||
|
|
9ac7b7f579 | ||
|
|
ff0b06b43a | ||
|
|
72e789432e | ||
|
|
23763f920b | ||
|
|
9435c4dd38 | ||
|
|
f34dc66fa9 | ||
|
|
1f7ca6b9e8 | ||
|
|
6b0e3904c2 | ||
|
|
aa4532b820 | ||
|
|
0e1711b460 | ||
|
|
b84dfd4c9b | ||
|
|
5a302f477a | ||
|
|
01f0b4b540 | ||
|
|
f1b3702be8 | ||
|
|
0a18fc3464 | ||
|
|
7752234e75 | ||
|
|
7be9affea2 | ||
|
|
42ad3fe853 | ||
|
|
67aeb049c2 | ||
|
|
5638448296 | ||
|
|
e5cc0bb48b | ||
|
|
bd7048985d | ||
|
|
e0b766a08b | ||
|
|
2efce9fd2a | ||
|
|
2cd0e0d8f0 | ||
|
|
cad40fff8b | ||
|
|
053269823b | ||
|
|
08d1767a1b | ||
|
|
6c2795598a | ||
|
|
1274e0e82c | ||
|
|
7a674c81b7 | ||
|
|
5c0e801322 | ||
|
|
96eef85c40 | ||
|
|
2b4b48401c | ||
|
|
f8cf54bd01 |
4
.github/workflows/auto-branch.yml
vendored
4
.github/workflows/auto-branch.yml
vendored
@@ -16,10 +16,10 @@ jobs:
|
||||
contains(fromJSON('["bug", "enhancement", "priority: critical", "type: chore", "area: docs"]'),
|
||||
github.event.label.name)
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Create branch
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const label = context.payload.label.name;
|
||||
|
||||
2
.github/workflows/auto-label-issues.yml
vendored
2
.github/workflows/auto-label-issues.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/github-script@v8
|
||||
- uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.addLabels({
|
||||
|
||||
123
.github/workflows/branch-cleanup.yml
vendored
Normal file
123
.github/workflows/branch-cleanup.yml
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
name: Branch Cleanup
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
schedule:
|
||||
- cron: '0 4 * * 0' # Sunday 4am UTC — weekly orphan sweep
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
# Runs immediately when a PR is merged — deletes the head branch.
|
||||
# Belt-and-suspenders alongside the repo's delete_branch_on_merge setting,
|
||||
# which handles web/API merges but may be bypassed by some CLI paths.
|
||||
delete-merged-branch:
|
||||
name: Delete merged PR branch
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 2
|
||||
if: github.event_name == 'pull_request' && github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Delete head branch
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const protectedBranches = ['main', 'develop', 'release'];
|
||||
if (protectedBranches.includes(branch)) {
|
||||
core.info(`Skipping protected branch: ${branch}`);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await github.rest.git.deleteRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${branch}`,
|
||||
});
|
||||
core.info(`Deleted branch: ${branch}`);
|
||||
} catch (e) {
|
||||
// 422 = branch already deleted (e.g. by delete_branch_on_merge setting)
|
||||
if (e.status === 422) {
|
||||
core.info(`Branch already deleted: ${branch}`);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
# Runs weekly to catch any orphaned branches whose PRs were merged
|
||||
# before this workflow existed, or that slipped through edge cases.
|
||||
sweep-orphaned-branches:
|
||||
name: Weekly orphaned branch sweep
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Delete branches from merged PRs
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const protectedBranches = new Set(['main', 'develop', 'release']);
|
||||
const deleted = [];
|
||||
const skipped = [];
|
||||
|
||||
// Paginate through all branches (100 per page)
|
||||
let page = 1;
|
||||
let allBranches = [];
|
||||
while (true) {
|
||||
const { data } = await github.rest.repos.listBranches({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100,
|
||||
page,
|
||||
});
|
||||
allBranches = allBranches.concat(data);
|
||||
if (data.length < 100) break;
|
||||
page++;
|
||||
}
|
||||
|
||||
core.info(`Scanning ${allBranches.length} branches...`);
|
||||
|
||||
for (const branch of allBranches) {
|
||||
if (protectedBranches.has(branch.name)) continue;
|
||||
|
||||
// Find the most recent closed PR for this branch
|
||||
const { data: prs } = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${branch.name}`,
|
||||
state: 'closed',
|
||||
per_page: 1,
|
||||
sort: 'updated',
|
||||
direction: 'desc',
|
||||
});
|
||||
|
||||
if (prs.length === 0 || !prs[0].merged_at) {
|
||||
skipped.push(branch.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
await github.rest.git.deleteRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${branch.name}`,
|
||||
});
|
||||
deleted.push(branch.name);
|
||||
} catch (e) {
|
||||
if (e.status !== 422) {
|
||||
core.warning(`Failed to delete ${branch.name}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const summary = [
|
||||
`Deleted ${deleted.length} orphaned branch(es).`,
|
||||
deleted.length > 0 ? ` Removed: ${deleted.join(', ')}` : '',
|
||||
skipped.length > 0 ? ` Skipped (no merged PR): ${skipped.length} branch(es)` : '',
|
||||
].filter(Boolean).join('\n');
|
||||
|
||||
core.info(summary);
|
||||
await core.summary.addRaw(summary).write();
|
||||
2
.github/workflows/branch-naming.yml
vendored
2
.github/workflows/branch-naming.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
timeout-minutes: 1
|
||||
steps:
|
||||
- name: Validate branch naming convention
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
|
||||
2
.github/workflows/close-draft-prs.yml
vendored
2
.github/workflows/close-draft-prs.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Comment and close draft PR
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const pr = context.payload.pull_request;
|
||||
|
||||
10
.github/workflows/hotfix.yml
vendored
10
.github/workflows/hotfix.yml
vendored
@@ -190,6 +190,16 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
gh release create "v${VERSION}" \
|
||||
--title "v${VERSION} (hotfix)" \
|
||||
--generate-notes
|
||||
|
||||
- name: Clean up next dist-tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
|
||||
4
.github/workflows/pr-gate.yml
vendored
4
.github/workflows/pr-gate.yml
vendored
@@ -13,12 +13,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 2
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check PR size
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
script: |
|
||||
const files = await github.paginate(github.rest.pulls.listFiles, {
|
||||
|
||||
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@@ -208,6 +208,17 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Create GitHub pre-release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PRE_VERSION: ${{ steps.prerelease.outputs.pre_version }}
|
||||
run: |
|
||||
gh release create "v${PRE_VERSION}" \
|
||||
--title "v${PRE_VERSION}" \
|
||||
--generate-notes \
|
||||
--prerelease
|
||||
|
||||
- name: Verify publish
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
@@ -331,6 +342,17 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
gh release create "v${VERSION}" \
|
||||
--title "v${VERSION}" \
|
||||
--generate-notes \
|
||||
--latest
|
||||
|
||||
- name: Clean up next dist-tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
|
||||
2
.github/workflows/require-issue-link.yml
vendored
2
.github/workflows/require-issue-link.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
- name: Comment and fail if no issue link
|
||||
if: steps.check.outputs.found == 'false'
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
with:
|
||||
# Uses GitHub API SDK — no shell string interpolation of untrusted input
|
||||
script: |
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
with:
|
||||
days-before-stale: 28
|
||||
days-before-close: 14
|
||||
|
||||
26
CHANGELOG.md
26
CHANGELOG.md
@@ -6,9 +6,35 @@ Format follows [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.35.0] - 2026-04-10
|
||||
|
||||
### Added
|
||||
- **Cline runtime support** — First-class Cline runtime via rules-based integration. Installs to `~/.cline/` or `./.cline/` as `.clinerules`. No custom slash commands — uses rules. `--cline` flag. (#1605 follow-up)
|
||||
- **CodeBuddy runtime support** — Skills-based install to `~/.codebuddy/skills/gsd-*/SKILL.md`. `--codebuddy` flag.
|
||||
- **Qwen Code runtime support** — Skills-based install to `~/.qwen/skills/gsd-*/SKILL.md`, same open standard as Claude Code 2.1.88+. `QWEN_CONFIG_DIR` env var for custom paths. `--qwen` flag.
|
||||
- **`/gsd-from-gsd2` command** (`gsd:from-gsd2`) — Reverse migration from GSD-2 format (`.gsd/` with Milestone→Slice→Task hierarchy) back to v1 `.planning/` format. Flags: `--dry-run` (preview only), `--force` (overwrite existing `.planning/`), `--path <dir>` (specify GSD-2 root). Produces `PROJECT.md`, `REQUIREMENTS.md`, `ROADMAP.md`, `STATE.md`, and sequential phase dirs. Flattens Milestone→Slice hierarchy to sequential phase numbers (M001/S01→phase 01, M001/S02→phase 02, M002/S01→phase 03, etc.).
|
||||
- **`/gsd-ai-integration-phase` command** (`gsd:ai-integration-phase`) — AI framework selection wizard for integrating AI/LLM capabilities into a project phase. Interactive decision matrix with domain-specific failure modes and eval criteria. Produces `AI-SPEC.md` with framework recommendation, implementation guidance, and evaluation strategy. Runs 3 parallel specialist agents: domain-researcher, framework-selector, ai-researcher, eval-planner.
|
||||
- **`/gsd-eval-review` command** (`gsd:eval-review`) — Retroactive audit of an implemented AI phase's evaluation coverage. Checks implementation against `AI-SPEC.md` evaluation plan. Scores each eval dimension as COVERED/PARTIAL/MISSING. Produces `EVAL-REVIEW.md` with findings, gaps, and remediation guidance.
|
||||
- **Review model configuration** — Per-CLI model selection for /gsd-review via `review.models.<cli>` config keys. Falls back to CLI defaults when not set. (#1849)
|
||||
- **Statusline now surfaces GSD milestone/phase/status** — when no `in_progress` todo is active, `gsd-statusline.js` reads `.planning/STATE.md` (walking up from the workspace dir) and fills the middle slot with `<milestone> · <status> · <phase> (N/total)`. Gracefully degrades when fields are missing; identical to previous behavior when there is no STATE.md or an active todo wins the slot. Uses the YAML frontmatter added for #628.
|
||||
- **Qwen Code and Cursor CLI peer reviewers** — Added as reviewers in `/gsd-review` with `--qwen` and `--cursor` flags. (#1966)
|
||||
|
||||
### Changed
|
||||
- **Worktree safety — `git clean` prohibition** — `gsd-executor` now prohibits `git clean` in worktree context to prevent deletion of prior wave output. (#2075)
|
||||
- **Executor deletion verification** — Pre-merge deletion checks added to catch missing artifacts before executor commit. (#2070)
|
||||
- **Hard reset in worktree branch check** — `--hard` flag in `worktree_branch_check` now correctly resets the file tree, not just HEAD. (#2073)
|
||||
|
||||
### Fixed
|
||||
- **Context7 MCP CLI fallback** — Handles `tools: []` response that previously broke Context7 availability detection. (#1885)
|
||||
- **`Agent` tool in gsd-autonomous** — Added `Agent` to `allowed-tools` to unblock subagent spawning. (#2043)
|
||||
- **`intel.enabled` in config-set whitelist** — Config key now accepted by `config-set` without validation error. (#2021)
|
||||
- **`writeSettings` null guard** — Guards against null `settingsPath` for Cline runtime to prevent crash on install. (#2046)
|
||||
- **Shell hook absolute paths** — `.sh` hooks now receive absolute quoted paths in `buildHookCommand`, fixing path resolution in non-standard working directories. (#2045)
|
||||
- **`processAttribution` runtime-aware** — Was hardcoded to `'claude'`; now reads actual runtime from environment.
|
||||
- **`AskUserQuestion` plain-text fallback** — Non-Claude runtimes now receive plain-text numbered lists instead of broken TUI menus.
|
||||
- **iOS app scaffold uses XcodeGen** — Prevents SPM execution errors in generated iOS scaffolds. (#2023)
|
||||
- **`acceptance_criteria` hard gate** — Enforced as a hard gate in executor — plans missing acceptance criteria are rejected before execution begins. (#1958)
|
||||
- **`normalizePhaseName` preserves letter suffix case** — Phase names with letter suffixes (e.g., `1a`, `2B`) now preserve original case. (#1963)
|
||||
|
||||
## [1.34.2] - 2026-04-06
|
||||
|
||||
|
||||
16
README.md
16
README.md
@@ -4,7 +4,7 @@
|
||||
|
||||
**English** · [Português](README.pt-BR.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja-JP.md) · [한국어](README.ko-KR.md)
|
||||
|
||||
**A light-weight and powerful meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini CLI, Kilo, Codex, Copilot, Cursor, Windsurf, Antigravity, Augment, Trae, CodeBuddy, and Cline.**
|
||||
**A light-weight and powerful meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini CLI, Kilo, Codex, Copilot, Cursor, Windsurf, Antigravity, Augment, Trae, Qwen Code, Cline, and CodeBuddy.**
|
||||
|
||||
**Solves context rot — the quality degradation that happens as Claude fills its context window.**
|
||||
|
||||
@@ -106,17 +106,17 @@ npx get-shit-done-cc@latest
|
||||
```
|
||||
|
||||
The installer prompts you to choose:
|
||||
1. **Runtime** — Claude Code, OpenCode, Gemini, Kilo, Codex, Copilot, Cursor, Windsurf, Antigravity, Augment, Trae, CodeBuddy, Cline, or all (interactive multi-select — pick multiple runtimes in a single install session)
|
||||
1. **Runtime** — Claude Code, OpenCode, Gemini, Kilo, Codex, Copilot, Cursor, Windsurf, Antigravity, Augment, Trae, Qwen Code, CodeBuddy, Cline, or all (interactive multi-select — pick multiple runtimes in a single install session)
|
||||
2. **Location** — Global (all projects) or local (current project only)
|
||||
|
||||
Verify with:
|
||||
- Claude Code / Gemini / Copilot / Antigravity: `/gsd-help`
|
||||
- Claude Code / Gemini / Copilot / Antigravity / Qwen Code: `/gsd-help`
|
||||
- OpenCode / Kilo / Augment / Trae / CodeBuddy: `/gsd-help`
|
||||
- Codex: `$gsd-help`
|
||||
- Cline: GSD installs via `.clinerules` — verify by checking `.clinerules` exists
|
||||
|
||||
> [!NOTE]
|
||||
> Claude Code 2.1.88+ and Codex install as skills (`skills/gsd-*/SKILL.md`). Older Claude Code versions use `commands/gsd/`. Cline uses `.clinerules` for configuration. The installer handles all formats automatically.
|
||||
> Claude Code 2.1.88+, Qwen Code, and Codex install as skills (`skills/gsd-*/SKILL.md`). Older Claude Code versions use `commands/gsd/`. Cline uses `.clinerules` for configuration. The installer handles all formats automatically.
|
||||
|
||||
> [!TIP]
|
||||
> For source-based installs or environments where npm is unavailable, see **[docs/manual-update.md](docs/manual-update.md)**.
|
||||
@@ -175,6 +175,10 @@ npx get-shit-done-cc --augment --local # Install to ./.augment/
|
||||
npx get-shit-done-cc --trae --global # Install to ~/.trae/
|
||||
npx get-shit-done-cc --trae --local # Install to ./.trae/
|
||||
|
||||
# Qwen Code
|
||||
npx get-shit-done-cc --qwen --global # Install to ~/.qwen/
|
||||
npx get-shit-done-cc --qwen --local # Install to ./.qwen/
|
||||
|
||||
# CodeBuddy
|
||||
npx get-shit-done-cc --codebuddy --global # Install to ~/.codebuddy/
|
||||
npx get-shit-done-cc --codebuddy --local # Install to ./.codebuddy/
|
||||
@@ -188,7 +192,7 @@ npx get-shit-done-cc --all --global # Install to all directories
|
||||
```
|
||||
|
||||
Use `--global` (`-g`) or `--local` (`-l`) to skip the location prompt.
|
||||
Use `--claude`, `--opencode`, `--gemini`, `--kilo`, `--codex`, `--copilot`, `--cursor`, `--windsurf`, `--antigravity`, `--augment`, `--trae`, `--codebuddy`, `--cline`, or `--all` to skip the runtime prompt.
|
||||
Use `--claude`, `--opencode`, `--gemini`, `--kilo`, `--codex`, `--copilot`, `--cursor`, `--windsurf`, `--antigravity`, `--augment`, `--trae`, `--qwen`, `--codebuddy`, `--cline`, or `--all` to skip the runtime prompt.
|
||||
Use `--sdk` to also install the GSD SDK CLI (`gsd-sdk`) for headless autonomous execution.
|
||||
|
||||
</details>
|
||||
@@ -850,6 +854,7 @@ npx get-shit-done-cc --windsurf --global --uninstall
|
||||
npx get-shit-done-cc --antigravity --global --uninstall
|
||||
npx get-shit-done-cc --augment --global --uninstall
|
||||
npx get-shit-done-cc --trae --global --uninstall
|
||||
npx get-shit-done-cc --qwen --global --uninstall
|
||||
npx get-shit-done-cc --codebuddy --global --uninstall
|
||||
npx get-shit-done-cc --cline --global --uninstall
|
||||
|
||||
@@ -865,6 +870,7 @@ npx get-shit-done-cc --windsurf --local --uninstall
|
||||
npx get-shit-done-cc --antigravity --local --uninstall
|
||||
npx get-shit-done-cc --augment --local --uninstall
|
||||
npx get-shit-done-cc --trae --local --uninstall
|
||||
npx get-shit-done-cc --qwen --local --uninstall
|
||||
npx get-shit-done-cc --codebuddy --local --uninstall
|
||||
npx get-shit-done-cc --cline --local --uninstall
|
||||
```
|
||||
|
||||
@@ -17,6 +17,29 @@ Spawned by `discuss-phase` via `Task()`. You do NOT present output directly to t
|
||||
- Return structured markdown output for the main agent to synthesize
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<input>
|
||||
Agent receives via prompt:
|
||||
|
||||
|
||||
@@ -16,6 +16,29 @@ You are a GSD AI researcher. Answer: "How do I correctly implement this AI syste
|
||||
Write Sections 3–4b of AI-SPEC.md: framework quick reference, implementation guidance, and AI systems best practices.
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<required_reading>
|
||||
Read `~/.claude/get-shit-done/references/ai-frameworks.md` for framework profiles and known pitfalls before fetching docs.
|
||||
</required_reading>
|
||||
|
||||
@@ -16,6 +16,29 @@ You are a GSD domain researcher. Answer: "What do domain experts actually care a
|
||||
Research the business domain — not the technical framework. Write Section 1b of AI-SPEC.md.
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<required_reading>
|
||||
Read `~/.claude/get-shit-done/references/ai-evals.md` — specifically the rubric design and domain expert sections.
|
||||
</required_reading>
|
||||
|
||||
@@ -22,12 +22,32 @@ Your job: Execute the plan completely, commit each task, create SUMMARY.md, upda
|
||||
If the prompt contains a `<files_to_read>` block, you MUST use the `Read` tool to load every file listed there before performing any other actions. This is your primary context.
|
||||
</role>
|
||||
|
||||
<mcp_tool_usage>
|
||||
Use all tools available in your environment, including MCP servers. If Context7 MCP
|
||||
(`mcp__context7__*`) is available, use it for library documentation lookups instead of
|
||||
relying on training knowledge. Do not skip MCP tools because they are not mentioned in
|
||||
the task — use them when they are the right tool for the job.
|
||||
</mcp_tool_usage>
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Example: `npx --yes ctx7@latest library react "useEffect hook"`
|
||||
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
Example: `npx --yes ctx7@latest docs /facebook/react "useEffect hook"`
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output. Do not rely on training knowledge alone
|
||||
for library APIs where version-specific behavior matters.
|
||||
</documentation_lookup>
|
||||
|
||||
<project_context>
|
||||
Before executing, discover project context:
|
||||
@@ -193,6 +213,10 @@ Track auto-fix attempts per task. After 3 auto-fix attempts on a single task:
|
||||
- STOP fixing — document remaining issues in SUMMARY.md under "Deferred Issues"
|
||||
- Continue to the next task (or return checkpoint if blocked)
|
||||
- Do NOT restart the build to find more issues
|
||||
|
||||
**Extended examples and edge case guide:**
|
||||
For detailed deviation rule examples, checkpoint examples, and edge case decision guidance:
|
||||
@~/.claude/get-shit-done/references/executor-examples.md
|
||||
</deviation_rules>
|
||||
|
||||
<analysis_paralysis_guard>
|
||||
@@ -320,7 +344,20 @@ When executing task with `tdd="true"`:
|
||||
|
||||
**4. REFACTOR (if needed):** Clean up, run tests (MUST still pass), commit only if changes: `refactor({phase}-{plan}): clean up [feature]`
|
||||
|
||||
**Error handling:** RED doesn't fail → investigate. GREEN doesn't pass → debug/iterate. REFACTOR breaks → undo.
|
||||
**Error handling:** RED doesn't fail <EFBFBD><EFBFBD><EFBFBD> investigate. GREEN doesn't pass → debug/iterate. REFACTOR breaks → undo.
|
||||
|
||||
## Plan-Level TDD Gate Enforcement (type: tdd plans)
|
||||
|
||||
When the plan frontmatter has `type: tdd`, the entire plan follows the RED/GREEN/REFACTOR cycle as a single feature. Gate sequence is mandatory:
|
||||
|
||||
**Fail-fast rule:** If a test passes unexpectedly during the RED phase (before any implementation), STOP. The feature may already exist or the test is not testing what you think. Investigate and fix the test before proceeding to GREEN. Do NOT skip RED by proceeding with a passing test.
|
||||
|
||||
**Gate sequence validation:** After completing the plan, verify in git log:
|
||||
1. A `test(...)` commit exists (RED gate)
|
||||
2. A `feat(...)` commit exists after it (GREEN gate)
|
||||
3. Optionally a `refactor(...)` commit exists after GREEN (REFACTOR gate)
|
||||
|
||||
If RED or GREEN gate commits are missing, add a warning to SUMMARY.md under a `## TDD Gate Compliance` section.
|
||||
</tdd_execution>
|
||||
|
||||
<task_commit_protocol>
|
||||
@@ -380,6 +417,31 @@ Intentional deletions (e.g., removing a deprecated file as part of the task) are
|
||||
**7. Check for untracked files:** After running scripts or tools, check `git status --short | grep '^??'`. For any new untracked files: commit if intentional, add to `.gitignore` if generated/runtime output. Never leave generated files untracked.
|
||||
</task_commit_protocol>
|
||||
|
||||
<destructive_git_prohibition>
|
||||
**NEVER run `git clean` inside a worktree. This is an absolute rule with no exceptions.**
|
||||
|
||||
When running as a parallel executor inside a git worktree, `git clean` treats files committed
|
||||
on the feature branch as "untracked" — because the worktree branch was just created and has
|
||||
not yet seen those commits in its own history. Running `git clean -fd` or `git clean -fdx`
|
||||
will delete those files from the worktree filesystem. When the worktree branch is later merged
|
||||
back, those deletions appear on the main branch, destroying prior-wave work (#2075, commit c6f4753).
|
||||
|
||||
**Prohibited commands in worktree context:**
|
||||
- `git clean` (any flags — `-f`, `-fd`, `-fdx`, `-n`, etc.)
|
||||
- `git rm` on files not explicitly created by the current task
|
||||
- `git checkout -- .` or `git restore .` (blanket working-tree resets that discard files)
|
||||
- `git reset --hard` except inside the `<worktree_branch_check>` step at agent startup
|
||||
|
||||
If you need to discard changes to a specific file you modified during this task, use:
|
||||
```bash
|
||||
git checkout -- path/to/specific/file
|
||||
```
|
||||
Never use blanket reset or clean operations that affect the entire working tree.
|
||||
|
||||
To inspect what is untracked vs. genuinely new, use `git status --short` and evaluate each
|
||||
file individually. If a file appears untracked but is not part of your task, leave it alone.
|
||||
</destructive_git_prohibition>
|
||||
|
||||
<summary_creation>
|
||||
After all tasks complete, create `{phase}-{plan}-SUMMARY.md` at `.planning/phases/XX-name/`.
|
||||
|
||||
|
||||
319
agents/gsd-pattern-mapper.md
Normal file
319
agents/gsd-pattern-mapper.md
Normal file
@@ -0,0 +1,319 @@
|
||||
---
|
||||
name: gsd-pattern-mapper
|
||||
description: Analyzes codebase for existing patterns and produces PATTERNS.md mapping new files to closest analogs. Read-only codebase analysis spawned by /gsd-plan-phase orchestrator before planning.
|
||||
tools: Read, Bash, Glob, Grep, Write
|
||||
color: magenta
|
||||
# hooks:
|
||||
# PostToolUse:
|
||||
# - matcher: "Write|Edit"
|
||||
# hooks:
|
||||
# - type: command
|
||||
# command: "npx eslint --fix $FILE 2>/dev/null || true"
|
||||
---
|
||||
|
||||
<role>
|
||||
You are a GSD pattern mapper. You answer "What existing code should new files copy patterns from?" and produce a single PATTERNS.md that the planner consumes.
|
||||
|
||||
Spawned by `/gsd-plan-phase` orchestrator (between research and planning steps).
|
||||
|
||||
**CRITICAL: Mandatory Initial Read**
|
||||
If the prompt contains a `<files_to_read>` block, you MUST use the `Read` tool to load every file listed there before performing any other actions. This is your primary context.
|
||||
|
||||
**Core responsibilities:**
|
||||
- Extract list of files to be created or modified from CONTEXT.md and RESEARCH.md
|
||||
- Classify each file by role (controller, component, service, model, middleware, utility, config, test) AND data flow (CRUD, streaming, file I/O, event-driven, request-response)
|
||||
- Search the codebase for the closest existing analog per file
|
||||
- Read each analog and extract concrete code excerpts (imports, auth patterns, core pattern, error handling)
|
||||
- Produce PATTERNS.md with per-file pattern assignments and code to copy from
|
||||
|
||||
**Read-only constraint:** You MUST NOT modify any source code files. The only file you write is PATTERNS.md in the phase directory. All codebase interaction is read-only (Read, Bash, Glob, Grep). Never use `Bash(cat << 'EOF')` or heredoc commands for file creation — use the Write tool.
|
||||
</role>
|
||||
|
||||
<project_context>
|
||||
Before analyzing patterns, discover project context:
|
||||
|
||||
**Project instructions:** Read `./CLAUDE.md` if it exists in the working directory. Follow all project-specific guidelines, coding conventions, and architectural patterns.
|
||||
|
||||
**Project skills:** Check `.claude/skills/` or `.agents/skills/` directory if either exists:
|
||||
1. List available skills (subdirectories)
|
||||
2. Read `SKILL.md` for each skill (lightweight index ~130 lines)
|
||||
3. Load specific `rules/*.md` files as needed during analysis
|
||||
4. Do NOT load full `AGENTS.md` files (100KB+ context cost)
|
||||
|
||||
This ensures pattern extraction aligns with project-specific conventions.
|
||||
</project_context>
|
||||
|
||||
<upstream_input>
|
||||
**CONTEXT.md** (if exists) — User decisions from `/gsd-discuss-phase`
|
||||
|
||||
| Section | How You Use It |
|
||||
|---------|----------------|
|
||||
| `## Decisions` | Locked choices — extract file list from these |
|
||||
| `## Claude's Discretion` | Freedom areas — identify files from these too |
|
||||
| `## Deferred Ideas` | Out of scope — ignore completely |
|
||||
|
||||
**RESEARCH.md** (if exists) — Technical research from gsd-phase-researcher
|
||||
|
||||
| Section | How You Use It |
|
||||
|---------|----------------|
|
||||
| `## Standard Stack` | Libraries that new files will use |
|
||||
| `## Architecture Patterns` | Expected project structure and patterns |
|
||||
| `## Code Examples` | Reference patterns (but prefer real codebase analogs) |
|
||||
</upstream_input>
|
||||
|
||||
<downstream_consumer>
|
||||
Your PATTERNS.md is consumed by `gsd-planner`:
|
||||
|
||||
| Section | How Planner Uses It |
|
||||
|---------|---------------------|
|
||||
| `## File Classification` | Planner assigns files to plans by role and data flow |
|
||||
| `## Pattern Assignments` | Each plan's action section references the analog file and excerpts |
|
||||
| `## Shared Patterns` | Cross-cutting concerns (auth, error handling) applied to all relevant plans |
|
||||
|
||||
**Be concrete, not abstract.** "Copy auth pattern from `src/controllers/users.ts` lines 12-25" not "follow the auth pattern."
|
||||
</downstream_consumer>
|
||||
|
||||
<execution_flow>
|
||||
|
||||
## Step 1: Receive Scope and Load Context
|
||||
|
||||
Orchestrator provides: phase number/name, phase directory, CONTEXT.md path, RESEARCH.md path.
|
||||
|
||||
Read CONTEXT.md and RESEARCH.md to extract:
|
||||
1. **Explicit file list** — files mentioned by name in decisions or research
|
||||
2. **Implied files** — files inferred from features described (e.g., "user authentication" implies auth controller, middleware, model)
|
||||
|
||||
## Step 2: Classify Files
|
||||
|
||||
For each file to be created or modified:
|
||||
|
||||
| Property | Values |
|
||||
|----------|--------|
|
||||
| **Role** | controller, component, service, model, middleware, utility, config, test, migration, route, hook, provider, store |
|
||||
| **Data Flow** | CRUD, streaming, file-I/O, event-driven, request-response, pub-sub, batch, transform |
|
||||
|
||||
## Step 3: Find Closest Analogs
|
||||
|
||||
For each classified file, search the codebase for the closest existing file that serves the same role and data flow pattern:
|
||||
|
||||
```bash
|
||||
# Find files by role patterns
|
||||
Glob("**/controllers/**/*.{ts,js,py,go,rs}")
|
||||
Glob("**/services/**/*.{ts,js,py,go,rs}")
|
||||
Glob("**/components/**/*.{ts,tsx,jsx}")
|
||||
```
|
||||
|
||||
```bash
|
||||
# Search for specific patterns
|
||||
Grep("class.*Controller", type: "ts")
|
||||
Grep("export.*function.*handler", type: "ts")
|
||||
Grep("router\.(get|post|put|delete)", type: "ts")
|
||||
```
|
||||
|
||||
**Ranking criteria for analog selection:**
|
||||
1. Same role AND same data flow — best match
|
||||
2. Same role, different data flow — good match
|
||||
3. Different role, same data flow — partial match
|
||||
4. Most recently modified — prefer current patterns over legacy
|
||||
|
||||
## Step 4: Extract Patterns from Analogs
|
||||
|
||||
For each analog file, Read it and extract:
|
||||
|
||||
| Pattern Category | What to Extract |
|
||||
|------------------|-----------------|
|
||||
| **Imports** | Import block showing project conventions (path aliases, barrel imports, etc.) |
|
||||
| **Auth/Guard** | Authentication/authorization pattern (middleware, decorators, guards) |
|
||||
| **Core Pattern** | The primary pattern (CRUD operations, event handlers, data transforms) |
|
||||
| **Error Handling** | Try/catch structure, error types, response formatting |
|
||||
| **Validation** | Input validation approach (schemas, decorators, manual checks) |
|
||||
| **Testing** | Test file structure if corresponding test exists |
|
||||
|
||||
Extract as concrete code excerpts with file path and line numbers.
|
||||
|
||||
## Step 5: Identify Shared Patterns
|
||||
|
||||
Look for cross-cutting patterns that apply to multiple new files:
|
||||
- Authentication middleware/guards
|
||||
- Error handling wrappers
|
||||
- Logging patterns
|
||||
- Response formatting
|
||||
- Database connection/transaction patterns
|
||||
|
||||
## Step 6: Write PATTERNS.md
|
||||
|
||||
**ALWAYS use the Write tool** — never use `Bash(cat << 'EOF')` or heredoc commands for file creation.
|
||||
|
||||
Write to: `$PHASE_DIR/$PADDED_PHASE-PATTERNS.md`
|
||||
|
||||
## Step 7: Return Structured Result
|
||||
|
||||
</execution_flow>
|
||||
|
||||
<output_format>
|
||||
|
||||
## PATTERNS.md Structure
|
||||
|
||||
**Location:** `.planning/phases/XX-name/{phase_num}-PATTERNS.md`
|
||||
|
||||
```markdown
|
||||
# Phase [X]: [Name] - Pattern Map
|
||||
|
||||
**Mapped:** [date]
|
||||
**Files analyzed:** [count of new/modified files]
|
||||
**Analogs found:** [count with matches] / [total]
|
||||
|
||||
## File Classification
|
||||
|
||||
| New/Modified File | Role | Data Flow | Closest Analog | Match Quality |
|
||||
|-------------------|------|-----------|----------------|---------------|
|
||||
| `src/controllers/auth.ts` | controller | request-response | `src/controllers/users.ts` | exact |
|
||||
| `src/services/payment.ts` | service | CRUD | `src/services/orders.ts` | role-match |
|
||||
| `src/middleware/rateLimit.ts` | middleware | request-response | `src/middleware/auth.ts` | role-match |
|
||||
|
||||
## Pattern Assignments
|
||||
|
||||
### `src/controllers/auth.ts` (controller, request-response)
|
||||
|
||||
**Analog:** `src/controllers/users.ts`
|
||||
|
||||
**Imports pattern** (lines 1-8):
|
||||
\`\`\`typescript
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { validate } from '../middleware/validate';
|
||||
import { AuthService } from '../services/auth';
|
||||
import { AppError } from '../utils/errors';
|
||||
\`\`\`
|
||||
|
||||
**Auth pattern** (lines 12-18):
|
||||
\`\`\`typescript
|
||||
router.use(authenticate);
|
||||
router.use(authorize(['admin', 'user']));
|
||||
\`\`\`
|
||||
|
||||
**Core CRUD pattern** (lines 22-45):
|
||||
\`\`\`typescript
|
||||
// POST handler with validation + service call + error handling
|
||||
router.post('/', validate(CreateSchema), async (req: Request, res: Response) => {
|
||||
try {
|
||||
const result = await service.create(req.body);
|
||||
res.status(201).json({ data: result });
|
||||
} catch (err) {
|
||||
if (err instanceof AppError) {
|
||||
res.status(err.statusCode).json({ error: err.message });
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
**Error handling pattern** (lines 50-60):
|
||||
\`\`\`typescript
|
||||
// Centralized error handler at bottom of file
|
||||
router.use((err: Error, req: Request, res: Response, next: NextFunction) => {
|
||||
logger.error(err);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
### `src/services/payment.ts` (service, CRUD)
|
||||
|
||||
**Analog:** `src/services/orders.ts`
|
||||
|
||||
[... same structure: imports, core pattern, error handling, validation ...]
|
||||
|
||||
---
|
||||
|
||||
## Shared Patterns
|
||||
|
||||
### Authentication
|
||||
**Source:** `src/middleware/auth.ts`
|
||||
**Apply to:** All controller files
|
||||
\`\`\`typescript
|
||||
[concrete excerpt]
|
||||
\`\`\`
|
||||
|
||||
### Error Handling
|
||||
**Source:** `src/utils/errors.ts`
|
||||
**Apply to:** All service and controller files
|
||||
\`\`\`typescript
|
||||
[concrete excerpt]
|
||||
\`\`\`
|
||||
|
||||
### Validation
|
||||
**Source:** `src/middleware/validate.ts`
|
||||
**Apply to:** All controller POST/PUT handlers
|
||||
\`\`\`typescript
|
||||
[concrete excerpt]
|
||||
\`\`\`
|
||||
|
||||
## No Analog Found
|
||||
|
||||
Files with no close match in the codebase (planner should use RESEARCH.md patterns instead):
|
||||
|
||||
| File | Role | Data Flow | Reason |
|
||||
|------|------|-----------|--------|
|
||||
| `src/services/webhook.ts` | service | event-driven | No event-driven services exist yet |
|
||||
|
||||
## Metadata
|
||||
|
||||
**Analog search scope:** [directories searched]
|
||||
**Files scanned:** [count]
|
||||
**Pattern extraction date:** [date]
|
||||
```
|
||||
|
||||
</output_format>
|
||||
|
||||
<structured_returns>
|
||||
|
||||
## Pattern Mapping Complete
|
||||
|
||||
```markdown
|
||||
## PATTERN MAPPING COMPLETE
|
||||
|
||||
**Phase:** {phase_number} - {phase_name}
|
||||
**Files classified:** {count}
|
||||
**Analogs found:** {matched} / {total}
|
||||
|
||||
### Coverage
|
||||
- Files with exact analog: {count}
|
||||
- Files with role-match analog: {count}
|
||||
- Files with no analog: {count}
|
||||
|
||||
### Key Patterns Identified
|
||||
- [pattern 1 — e.g., "All controllers use express Router + validate middleware"]
|
||||
- [pattern 2 — e.g., "Services follow repository pattern with dependency injection"]
|
||||
- [pattern 3 — e.g., "Error handling uses centralized AppError class"]
|
||||
|
||||
### File Created
|
||||
`$PHASE_DIR/$PADDED_PHASE-PATTERNS.md`
|
||||
|
||||
### Ready for Planning
|
||||
Pattern mapping complete. Planner can now reference analog patterns in PLAN.md files.
|
||||
```
|
||||
|
||||
</structured_returns>
|
||||
|
||||
<success_criteria>
|
||||
|
||||
Pattern mapping is complete when:
|
||||
|
||||
- [ ] All files from CONTEXT.md and RESEARCH.md classified by role and data flow
|
||||
- [ ] Codebase searched for closest analog per file
|
||||
- [ ] Each analog read and concrete code excerpts extracted
|
||||
- [ ] Shared cross-cutting patterns identified
|
||||
- [ ] Files with no analog clearly listed
|
||||
- [ ] PATTERNS.md written to correct phase directory
|
||||
- [ ] Structured return provided to orchestrator
|
||||
|
||||
Quality indicators:
|
||||
|
||||
- **Concrete, not abstract:** Excerpts include file paths and line numbers
|
||||
- **Accurate classification:** Role and data flow match the file's actual purpose
|
||||
- **Best analog selected:** Closest match by role + data flow, preferring recent files
|
||||
- **Actionable for planner:** Planner can copy patterns directly into plan actions
|
||||
|
||||
</success_criteria>
|
||||
@@ -34,6 +34,29 @@ If the prompt contains a `<files_to_read>` block, you MUST use the `Read` tool t
|
||||
Claims tagged `[ASSUMED]` signal to the planner and discuss-phase that the information needs user confirmation before becoming a locked decision. Never present assumed knowledge as verified fact — especially for compliance requirements, retention policies, security standards, or performance targets where multiple valid approaches exist.
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<project_context>
|
||||
Before researching, discover project context:
|
||||
|
||||
@@ -253,6 +276,12 @@ Priority: Context7 > Exa (verified) > Firecrawl (official docs) > Official GitHu
|
||||
|
||||
**Primary recommendation:** [one-liner actionable guidance]
|
||||
|
||||
## Architectural Responsibility Map
|
||||
|
||||
| Capability | Primary Tier | Secondary Tier | Rationale |
|
||||
|------------|-------------|----------------|-----------|
|
||||
| [capability] | [tier] | [tier or —] | [why this tier owns it] |
|
||||
|
||||
## Standard Stack
|
||||
|
||||
### Core
|
||||
@@ -497,6 +526,33 @@ cat "$phase_dir"/*-CONTEXT.md 2>/dev/null
|
||||
- User decided "simple UI, no animations" → don't research animation libraries
|
||||
- Marked as Claude's discretion → research options and recommend
|
||||
|
||||
## Step 1.5: Architectural Responsibility Mapping
|
||||
|
||||
Before diving into framework-specific research, map each capability in this phase to its standard architectural tier owner. This is a pure reasoning step — no tool calls needed.
|
||||
|
||||
**For each capability in the phase description:**
|
||||
|
||||
1. Identify what the capability does (e.g., "user authentication", "data visualization", "file upload")
|
||||
2. Determine which architectural tier owns the primary responsibility:
|
||||
|
||||
| Tier | Examples |
|
||||
|------|----------|
|
||||
| **Browser / Client** | DOM manipulation, client-side routing, local storage, service workers |
|
||||
| **Frontend Server (SSR)** | Server-side rendering, hydration, middleware, auth cookies |
|
||||
| **API / Backend** | REST/GraphQL endpoints, business logic, auth, data validation |
|
||||
| **CDN / Static** | Static assets, edge caching, image optimization |
|
||||
| **Database / Storage** | Persistence, queries, migrations, caching layers |
|
||||
|
||||
3. Record the mapping in a table:
|
||||
|
||||
| Capability | Primary Tier | Secondary Tier | Rationale |
|
||||
|------------|-------------|----------------|-----------|
|
||||
| [capability] | [tier] | [tier or —] | [why this tier owns it] |
|
||||
|
||||
**Output:** Include an `## Architectural Responsibility Map` section in RESEARCH.md immediately after the Summary section. This map is consumed by the planner for sanity-checking task assignments and by the plan-checker for verifying tier correctness.
|
||||
|
||||
**Why this matters:** Multi-tier applications frequently have capabilities misassigned during planning — e.g., putting auth logic in the browser tier when it belongs in the API tier, or putting data fetching in the frontend server when the API already provides it. Mapping tier ownership before research prevents these misassignments from propagating into plans.
|
||||
|
||||
## Step 2: Identify Research Domains
|
||||
|
||||
Based on phase description, identify what needs investigating:
|
||||
|
||||
@@ -338,6 +338,8 @@ issue:
|
||||
- `"future enhancement"`, `"placeholder"`, `"basic version"`, `"minimal"`
|
||||
- `"will be wired later"`, `"dynamic in future"`, `"skip for now"`
|
||||
- `"not wired to"`, `"not connected to"`, `"stub"`
|
||||
- `"too complex"`, `"too difficult"`, `"challenging"`, `"non-trivial"` (when used to justify omission)
|
||||
- Time estimates used as scope justification: `"would take"`, `"hours"`, `"days"`, `"minutes"` (in sizing context)
|
||||
2. For each match, cross-reference with the CONTEXT.md decision it claims to implement
|
||||
3. Compare: does the task deliver what D-XX actually says, or a reduced version?
|
||||
4. If reduced: BLOCKER — the planner must either deliver fully or propose phase split
|
||||
@@ -369,6 +371,54 @@ Plans reduce {N} user decisions. Options:
|
||||
2. Split phase: [suggested grouping of D-XX into sub-phases]
|
||||
```
|
||||
|
||||
## Dimension 7c: Architectural Tier Compliance
|
||||
|
||||
**Question:** Do plan tasks assign capabilities to the correct architectural tier as defined in the Architectural Responsibility Map?
|
||||
|
||||
**Skip if:** No RESEARCH.md exists for this phase, or RESEARCH.md has no `## Architectural Responsibility Map` section. Output: "Dimension 7c: SKIPPED (no responsibility map found)"
|
||||
|
||||
**Process:**
|
||||
1. Read the phase's RESEARCH.md and extract the `## Architectural Responsibility Map` table
|
||||
2. For each plan task, identify which capability it implements and which tier it targets (inferred from file paths, action description, and artifacts)
|
||||
3. Cross-reference against the responsibility map — does the task place work in the tier that owns the capability?
|
||||
4. Flag any tier mismatch where a task assigns logic to a tier that doesn't own the capability
|
||||
|
||||
**Red flags:**
|
||||
- Auth validation logic placed in browser/client tier when responsibility map assigns it to API tier
|
||||
- Data persistence logic in frontend server when it belongs in database tier
|
||||
- Business rule enforcement in CDN/static tier when it belongs in API tier
|
||||
- Server-side rendering logic assigned to API tier when frontend server owns it
|
||||
|
||||
**Severity:** WARNING for potential tier mismatches. BLOCKER if a security-sensitive capability (auth, access control, input validation) is assigned to a less-trusted tier than the responsibility map specifies.
|
||||
|
||||
**Example — tier mismatch:**
|
||||
```yaml
|
||||
issue:
|
||||
dimension: architectural_tier_compliance
|
||||
severity: blocker
|
||||
description: "Task places auth token validation in browser tier, but Architectural Responsibility Map assigns auth to API tier"
|
||||
plan: "01"
|
||||
task: 2
|
||||
capability: "Authentication token validation"
|
||||
expected_tier: "API / Backend"
|
||||
actual_tier: "Browser / Client"
|
||||
fix_hint: "Move token validation to API route handler per Architectural Responsibility Map"
|
||||
```
|
||||
|
||||
**Example — non-security mismatch (warning):**
|
||||
```yaml
|
||||
issue:
|
||||
dimension: architectural_tier_compliance
|
||||
severity: warning
|
||||
description: "Task places data formatting in API tier, but Architectural Responsibility Map assigns it to Frontend Server"
|
||||
plan: "02"
|
||||
task: 1
|
||||
capability: "Date/currency formatting for display"
|
||||
expected_tier: "Frontend Server (SSR)"
|
||||
actual_tier: "API / Backend"
|
||||
fix_hint: "Consider moving display formatting to frontend server per Architectural Responsibility Map"
|
||||
```
|
||||
|
||||
## Dimension 8: Nyquist Compliance
|
||||
|
||||
Skip if: `workflow.nyquist_validation` is explicitly set to `false` in config.json (absent key = enabled), phase has no RESEARCH.md, or RESEARCH.md has no "Validation Architecture" section. Output: "Dimension 8: SKIPPED (nyquist_validation disabled or not applicable)"
|
||||
@@ -529,6 +579,49 @@ issue:
|
||||
2. **Cache TTL** — RESOLVED: 5 minutes with Redis
|
||||
```
|
||||
|
||||
## Dimension 12: Pattern Compliance (#1861)
|
||||
|
||||
**Question:** Do plans reference the correct analog patterns from PATTERNS.md for each new/modified file?
|
||||
|
||||
**Skip if:** No PATTERNS.md exists for this phase. Output: "Dimension 12: SKIPPED (no PATTERNS.md found)"
|
||||
|
||||
**Process:**
|
||||
1. Read the phase's PATTERNS.md file
|
||||
2. For each file listed in the `## File Classification` table:
|
||||
a. Find the corresponding PLAN.md that creates/modifies this file
|
||||
b. Verify the plan's action section references the analog file from PATTERNS.md
|
||||
c. Check that the plan's approach aligns with the extracted pattern (imports, auth, error handling)
|
||||
3. For files in `## No Analog Found`, verify the plan references RESEARCH.md patterns instead
|
||||
4. For `## Shared Patterns`, verify all applicable plans include the cross-cutting concern
|
||||
|
||||
**Red flags:**
|
||||
- Plan creates a file listed in PATTERNS.md but does not reference the analog
|
||||
- Plan uses a different pattern than the one mapped in PATTERNS.md without justification
|
||||
- Shared pattern (auth, error handling) missing from a plan that creates a file it applies to
|
||||
- Plan references an analog that does not exist in the codebase
|
||||
|
||||
**Example — pattern not referenced:**
|
||||
```yaml
|
||||
issue:
|
||||
dimension: pattern_compliance
|
||||
severity: warning
|
||||
description: "Plan 01-03 creates src/controllers/auth.ts but does not reference analog src/controllers/users.ts from PATTERNS.md"
|
||||
file: "01-03-PLAN.md"
|
||||
expected_analog: "src/controllers/users.ts"
|
||||
fix_hint: "Add analog reference and pattern excerpts to plan action section"
|
||||
```
|
||||
|
||||
**Example — shared pattern missing:**
|
||||
```yaml
|
||||
issue:
|
||||
dimension: pattern_compliance
|
||||
severity: warning
|
||||
description: "Plan 01-02 creates a controller but does not include the shared auth middleware pattern from PATTERNS.md"
|
||||
file: "01-02-PLAN.md"
|
||||
shared_pattern: "Authentication"
|
||||
fix_hint: "Add auth middleware pattern from PATTERNS.md ## Shared Patterns to plan"
|
||||
```
|
||||
|
||||
</verification_dimensions>
|
||||
|
||||
<verification_process>
|
||||
@@ -859,6 +952,7 @@ Plan verification complete when:
|
||||
- [ ] No tasks contradict locked decisions
|
||||
- [ ] Deferred ideas not included in plans
|
||||
- [ ] Overall status determined (passed | issues_found)
|
||||
- [ ] Architectural tier compliance checked (tasks match responsibility map tiers)
|
||||
- [ ] Cross-plan data contracts checked (no conflicting transforms on shared data)
|
||||
- [ ] CLAUDE.md compliance checked (plans respect project conventions)
|
||||
- [ ] Structured issues returned (if any found)
|
||||
|
||||
@@ -35,12 +35,15 @@ If the prompt contains a `<files_to_read>` block, you MUST use the `Read` tool t
|
||||
- Return structured results to orchestrator
|
||||
</role>
|
||||
|
||||
<mcp_tool_usage>
|
||||
Use all tools available in your environment, including MCP servers. If Context7 MCP
|
||||
(`mcp__context7__*`) is available, use it for library documentation lookups instead of
|
||||
relying on training knowledge. Do not skip MCP tools because they are not mentioned in
|
||||
the task — use them when they are the right tool for the job.
|
||||
</mcp_tool_usage>
|
||||
<documentation_lookup>
|
||||
For library docs: use Context7 MCP (`mcp__context7__*`) if available. If not (upstream
|
||||
bug #13898 strips MCP from `tools:`-restricted agents), use the Bash CLI fallback:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>" # resolve library ID
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>" # fetch docs
|
||||
```
|
||||
Do not skip — the CLI fallback works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<project_context>
|
||||
Before planning, discover project context:
|
||||
@@ -95,38 +98,47 @@ The orchestrator provides user decisions in `<user_decisions>` tags from `/gsd-d
|
||||
- "v1", "v2", "simplified version", "static for now", "hardcoded for now"
|
||||
- "future enhancement", "placeholder", "basic version", "minimal implementation"
|
||||
- "will be wired later", "dynamic in future phase", "skip for now"
|
||||
- Any language that reduces a CONTEXT.md decision to less than what the user decided
|
||||
- Any language that reduces a source artifact decision to less than what was specified
|
||||
|
||||
**The rule:** If D-XX says "display cost calculated from billing table in impulses", the plan MUST deliver cost calculated from billing table in impulses. NOT "static label /min" as a "v1".
|
||||
|
||||
**When the phase is too complex to implement ALL decisions:**
|
||||
**When the plan set cannot cover all source items within context budget:**
|
||||
|
||||
Do NOT silently simplify decisions. Instead:
|
||||
Do NOT silently omit features. Instead:
|
||||
|
||||
1. **Create a decision coverage matrix** mapping every D-XX to a plan/task
|
||||
2. **If any D-XX cannot fit** within the plan budget (too many tasks, too complex):
|
||||
1. **Create a multi-source coverage audit** (see below) covering ALL four artifact types
|
||||
2. **If any item cannot fit** within the plan budget (context cost exceeds capacity):
|
||||
- Return `## PHASE SPLIT RECOMMENDED` to the orchestrator
|
||||
- Propose how to split: which D-XX groups form natural sub-phases
|
||||
- Example: "D-01 to D-19 = Phase 17a (processing core), D-20 to D-27 = Phase 17b (billing + config UX)"
|
||||
3. The orchestrator will present the split to the user for approval
|
||||
- Propose how to split: which item groups form natural sub-phases
|
||||
3. The orchestrator presents the split to the user for approval
|
||||
4. After approval, plan each sub-phase within budget
|
||||
|
||||
**Why this matters:** The user spent time making decisions. Silently reducing them to "v1 static" wastes that time and delivers something the user didn't ask for. Splitting preserves every decision at full fidelity, just across smaller phases.
|
||||
## Multi-Source Coverage Audit (MANDATORY in every plan set)
|
||||
|
||||
**Decision coverage matrix (MANDATORY in every plan set):**
|
||||
@planner-source-audit.md for full format, examples, and gap-handling rules.
|
||||
|
||||
Before finalizing plans, produce internally:
|
||||
Audit ALL four source types before finalizing: **GOAL** (ROADMAP phase goal), **REQ** (phase_req_ids from REQUIREMENTS.md), **RESEARCH** (RESEARCH.md features/constraints), **CONTEXT** (D-XX decisions from CONTEXT.md).
|
||||
|
||||
```
|
||||
D-XX | Plan | Task | Full/Partial | Notes
|
||||
D-01 | 01 | 1 | Full |
|
||||
D-02 | 01 | 2 | Full |
|
||||
D-23 | 03 | 1 | PARTIAL | ← BLOCKER: must be Full or split phase
|
||||
```
|
||||
Every item must be COVERED by a plan. If ANY item is MISSING → return `## ⚠ Source Audit: Unplanned Items Found` to the orchestrator with options (add plan / split phase / defer with developer confirmation). Never finalize silently with gaps.
|
||||
|
||||
If ANY decision is "Partial" → either fix the task to deliver fully, or return PHASE SPLIT RECOMMENDED.
|
||||
Exclusions (not gaps): Deferred Ideas in CONTEXT.md, items scoped to other phases, RESEARCH.md "out of scope" items.
|
||||
</scope_reduction_prohibition>
|
||||
|
||||
<planner_authority_limits>
|
||||
## The Planner Does Not Decide What Is Too Hard
|
||||
|
||||
@planner-source-audit.md for constraint examples.
|
||||
|
||||
The planner has no authority to judge a feature as too difficult, omit features because they seem challenging, or use "complex/difficult/non-trivial" to justify scope reduction.
|
||||
|
||||
**Only three legitimate reasons to split or flag:**
|
||||
1. **Context cost:** implementation would consume >50% of a single agent's context window
|
||||
2. **Missing information:** required data not present in any source artifact
|
||||
3. **Dependency conflict:** feature cannot be built until another phase ships
|
||||
|
||||
If a feature has none of these three constraints, it gets planned. Period.
|
||||
</planner_authority_limits>
|
||||
|
||||
<philosophy>
|
||||
|
||||
## Solo Developer + Claude Workflow
|
||||
@@ -134,7 +146,7 @@ If ANY decision is "Partial" → either fix the task to deliver fully, or return
|
||||
Planning for ONE person (the user) and ONE implementer (Claude).
|
||||
- No teams, stakeholders, ceremonies, coordination overhead
|
||||
- User = visionary/product owner, Claude = builder
|
||||
- Estimate effort in Claude execution time, not human dev time
|
||||
- Estimate effort in context window cost, not time
|
||||
|
||||
## Plans Are Prompts
|
||||
|
||||
@@ -162,7 +174,8 @@ Plan -> Execute -> Ship -> Learn -> Repeat
|
||||
**Anti-enterprise patterns (delete if seen):**
|
||||
- Team structures, RACI matrices, stakeholder management
|
||||
- Sprint ceremonies, change management processes
|
||||
- Human dev time estimates (hours, days, weeks)
|
||||
- Time estimates in human units (see `<planner_authority_limits>`)
|
||||
- Complexity/difficulty as scope justification (see `<planner_authority_limits>`)
|
||||
- Documentation for documentation's sake
|
||||
|
||||
</philosophy>
|
||||
@@ -243,13 +256,19 @@ Every task has four required fields:
|
||||
|
||||
## Task Sizing
|
||||
|
||||
Each task: **15-60 minutes** Claude execution time.
|
||||
Each task targets **10–30% context consumption**.
|
||||
|
||||
| Duration | Action |
|
||||
|----------|--------|
|
||||
| < 15 min | Too small — combine with related task |
|
||||
| 15-60 min | Right size |
|
||||
| > 60 min | Too large — split |
|
||||
| Context Cost | Action |
|
||||
|--------------|--------|
|
||||
| < 10% context | Too small — combine with a related task |
|
||||
| 10-30% context | Right size — proceed |
|
||||
| > 30% context | Too large — split into two tasks |
|
||||
|
||||
**Context cost signals (use these, not time estimates):**
|
||||
- Files modified: 0-3 = ~10-15%, 4-6 = ~20-30%, 7+ = ~40%+ (split)
|
||||
- New subsystem: ~25-35%
|
||||
- Migration + data transform: ~30-40%
|
||||
- Pure config/wiring: ~5-10%
|
||||
|
||||
**Too large signals:** Touches >3-5 files, multiple distinct chunks, action section >1 paragraph.
|
||||
|
||||
@@ -265,20 +284,16 @@ When a plan creates new interfaces consumed by subsequent tasks:
|
||||
|
||||
This prevents the "scavenger hunt" anti-pattern where executors explore the codebase to understand contracts. They receive the contracts in the plan itself.
|
||||
|
||||
## Specificity Examples
|
||||
## Specificity
|
||||
|
||||
| TOO VAGUE | JUST RIGHT |
|
||||
|-----------|------------|
|
||||
| "Add authentication" | "Add JWT auth with refresh rotation using jose library, store in httpOnly cookie, 15min access / 7day refresh" |
|
||||
| "Create the API" | "Create POST /api/projects endpoint accepting {name, description}, validates name length 3-50 chars, returns 201 with project object" |
|
||||
| "Style the dashboard" | "Add Tailwind classes to Dashboard.tsx: grid layout (3 cols on lg, 1 on mobile), card shadows, hover states on action buttons" |
|
||||
| "Handle errors" | "Wrap API calls in try/catch, return {error: string} on 4xx/5xx, show toast via sonner on client" |
|
||||
| "Set up the database" | "Add User and Project models to schema.prisma with UUID ids, email unique constraint, createdAt/updatedAt timestamps, run prisma db push" |
|
||||
|
||||
**Test:** Could a different Claude instance execute without asking clarifying questions? If not, add specificity.
|
||||
**Test:** Could a different Claude instance execute without asking clarifying questions? If not, add specificity. See @~/.claude/get-shit-done/references/planner-antipatterns.md for vague-vs-specific comparison table.
|
||||
|
||||
## TDD Detection
|
||||
|
||||
**When `workflow.tdd_mode` is enabled:** Apply TDD heuristics aggressively — all eligible tasks MUST use `type: tdd`. Read @~/.claude/get-shit-done/references/tdd.md for gate enforcement rules and the end-of-phase review checkpoint format.
|
||||
|
||||
**When `workflow.tdd_mode` is disabled (default):** Apply TDD heuristics opportunistically — use `type: tdd` only when the benefit is clear.
|
||||
|
||||
**Heuristic:** Can you write `expect(fn(input)).toBe(output)` before writing `fn`?
|
||||
- Yes → Create a dedicated TDD plan (type: tdd)
|
||||
- No → Standard task in standard plan
|
||||
@@ -333,49 +348,9 @@ Record in `user_setup` frontmatter. Only include what Claude literally cannot do
|
||||
- `creates`: What this produces
|
||||
- `has_checkpoint`: Requires user interaction?
|
||||
|
||||
**Example with 6 tasks:**
|
||||
**Example:** A→C, B→D, C+D→E, E→F(checkpoint). Waves: {A,B} → {C,D} → {E} → {F}.
|
||||
|
||||
```
|
||||
Task A (User model): needs nothing, creates src/models/user.ts
|
||||
Task B (Product model): needs nothing, creates src/models/product.ts
|
||||
Task C (User API): needs Task A, creates src/api/users.ts
|
||||
Task D (Product API): needs Task B, creates src/api/products.ts
|
||||
Task E (Dashboard): needs Task C + D, creates src/components/Dashboard.tsx
|
||||
Task F (Verify UI): checkpoint:human-verify, needs Task E
|
||||
|
||||
Graph:
|
||||
A --> C --\
|
||||
--> E --> F
|
||||
B --> D --/
|
||||
|
||||
Wave analysis:
|
||||
Wave 1: A, B (independent roots)
|
||||
Wave 2: C, D (depend only on Wave 1)
|
||||
Wave 3: E (depends on Wave 2)
|
||||
Wave 4: F (checkpoint, depends on Wave 3)
|
||||
```
|
||||
|
||||
## Vertical Slices vs Horizontal Layers
|
||||
|
||||
**Vertical slices (PREFER):**
|
||||
```
|
||||
Plan 01: User feature (model + API + UI)
|
||||
Plan 02: Product feature (model + API + UI)
|
||||
Plan 03: Order feature (model + API + UI)
|
||||
```
|
||||
Result: All three run parallel (Wave 1)
|
||||
|
||||
**Horizontal layers (AVOID):**
|
||||
```
|
||||
Plan 01: Create User model, Product model, Order model
|
||||
Plan 02: Create User API, Product API, Order API
|
||||
Plan 03: Create User UI, Product UI, Order UI
|
||||
```
|
||||
Result: Fully sequential (02 needs 01, 03 needs 02)
|
||||
|
||||
**When vertical slices work:** Features are independent, self-contained, no cross-feature dependencies.
|
||||
|
||||
**When horizontal layers necessary:** Shared foundation required (auth before protected features), genuine type dependencies, infrastructure setup.
|
||||
**Prefer vertical slices** (User feature: model+API+UI) over horizontal layers (all models → all APIs → all UIs). Vertical = parallel. Horizontal = sequential. Use horizontal only when shared foundation is required.
|
||||
|
||||
## File Ownership for Parallel Execution
|
||||
|
||||
@@ -401,11 +376,11 @@ Plans should complete within ~50% context (not 80%). No context anxiety, quality
|
||||
|
||||
**Each plan: 2-3 tasks maximum.**
|
||||
|
||||
| Task Complexity | Tasks/Plan | Context/Task | Total |
|
||||
|-----------------|------------|--------------|-------|
|
||||
| Simple (CRUD, config) | 3 | ~10-15% | ~30-45% |
|
||||
| Complex (auth, payments) | 2 | ~20-30% | ~40-50% |
|
||||
| Very complex (migrations) | 1-2 | ~30-40% | ~30-50% |
|
||||
| Context Weight | Tasks/Plan | Context/Task | Total |
|
||||
|----------------|------------|--------------|-------|
|
||||
| Light (CRUD, config) | 3 | ~10-15% | ~30-45% |
|
||||
| Medium (auth, payments) | 2 | ~20-30% | ~40-50% |
|
||||
| Heavy (migrations, multi-subsystem) | 1-2 | ~30-40% | ~30-50% |
|
||||
|
||||
## Split Signals
|
||||
|
||||
@@ -416,7 +391,7 @@ Plans should complete within ~50% context (not 80%). No context anxiety, quality
|
||||
- Checkpoint + implementation in same plan
|
||||
- Discovery + implementation in same plan
|
||||
|
||||
**CONSIDER splitting:** >5 files total, complex domains, uncertainty about approach, natural semantic boundaries.
|
||||
**CONSIDER splitting:** >5 files total, natural semantic boundaries, context cost estimate exceeds 40% for a single plan. See `<planner_authority_limits>` for prohibited split reasons.
|
||||
|
||||
## Granularity Calibration
|
||||
|
||||
@@ -426,22 +401,7 @@ Plans should complete within ~50% context (not 80%). No context anxiety, quality
|
||||
| Standard | 3-5 | 2-3 |
|
||||
| Fine | 5-10 | 2-3 |
|
||||
|
||||
Derive plans from actual work. Granularity determines compression tolerance, not a target. Don't pad small work to hit a number. Don't compress complex work to look efficient.
|
||||
|
||||
## Context Per Task Estimates
|
||||
|
||||
| Files Modified | Context Impact |
|
||||
|----------------|----------------|
|
||||
| 0-3 files | ~10-15% (small) |
|
||||
| 4-6 files | ~20-30% (medium) |
|
||||
| 7+ files | ~40%+ (split) |
|
||||
|
||||
| Complexity | Context/Task |
|
||||
|------------|--------------|
|
||||
| Simple CRUD | ~15% |
|
||||
| Business logic | ~25% |
|
||||
| Complex algorithms | ~40% |
|
||||
| Domain modeling | ~35% |
|
||||
Derive plans from actual work. Granularity determines compression tolerance, not a target.
|
||||
|
||||
</scope_estimation>
|
||||
|
||||
@@ -794,36 +754,10 @@ When Claude tries CLI/API and gets auth error → creates checkpoint → user au
|
||||
|
||||
**DON'T:** Ask human to do work Claude can automate, mix multiple verifications, place checkpoints before automation completes.
|
||||
|
||||
## Anti-Patterns
|
||||
## Anti-Patterns and Extended Examples
|
||||
|
||||
**Bad - Asking human to automate:**
|
||||
```xml
|
||||
<task type="checkpoint:human-action">
|
||||
<action>Deploy to Vercel</action>
|
||||
<instructions>Visit vercel.com, import repo, click deploy...</instructions>
|
||||
</task>
|
||||
```
|
||||
Why bad: Vercel has a CLI. Claude should run `vercel --yes`.
|
||||
|
||||
**Bad - Too many checkpoints:**
|
||||
```xml
|
||||
<task type="auto">Create schema</task>
|
||||
<task type="checkpoint:human-verify">Check schema</task>
|
||||
<task type="auto">Create API</task>
|
||||
<task type="checkpoint:human-verify">Check API</task>
|
||||
```
|
||||
Why bad: Verification fatigue. Combine into one checkpoint at end.
|
||||
|
||||
**Good - Single verification checkpoint:**
|
||||
```xml
|
||||
<task type="auto">Create schema</task>
|
||||
<task type="auto">Create API</task>
|
||||
<task type="auto">Create UI</task>
|
||||
<task type="checkpoint:human-verify">
|
||||
<what-built>Complete auth flow (schema + API + UI)</what-built>
|
||||
<how-to-verify>Test full flow: register, login, access protected page</how-to-verify>
|
||||
</task>
|
||||
```
|
||||
For checkpoint anti-patterns, specificity comparison tables, context section anti-patterns, and scope reduction patterns:
|
||||
@~/.claude/get-shit-done/references/planner-antipatterns.md
|
||||
|
||||
</checkpoints>
|
||||
|
||||
@@ -1023,6 +957,8 @@ cat "$phase_dir"/*-DISCOVERY.md 2>/dev/null # From mandatory discovery
|
||||
**If CONTEXT.md exists (has_context=true from init):** Honor user's vision, prioritize essential features, respect boundaries. Locked decisions — do not revisit.
|
||||
|
||||
**If RESEARCH.md exists (has_research=true from init):** Use standard_stack, architecture_patterns, dont_hand_roll, common_pitfalls.
|
||||
|
||||
**Architectural Responsibility Map sanity check:** If RESEARCH.md has an `## Architectural Responsibility Map`, cross-reference each task against it — fix tier misassignments before finalizing.
|
||||
</step>
|
||||
|
||||
<step name="break_into_tasks">
|
||||
|
||||
@@ -32,6 +32,29 @@ Your files feed the roadmap:
|
||||
**Be comprehensive but opinionated.** "Use X because Y" not "Options are X, Y, Z."
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<philosophy>
|
||||
|
||||
## Training Data = Hypothesis
|
||||
|
||||
@@ -27,6 +27,29 @@ If the prompt contains a `<files_to_read>` block, you MUST use the `Read` tool t
|
||||
- Return structured result to orchestrator
|
||||
</role>
|
||||
|
||||
<documentation_lookup>
|
||||
When you need library or framework documentation, check in this order:
|
||||
|
||||
1. If Context7 MCP tools (`mcp__context7__*`) are available in your environment, use them:
|
||||
- Resolve library ID: `mcp__context7__resolve-library-id` with `libraryName`
|
||||
- Fetch docs: `mcp__context7__get-library-docs` with `context7CompatibleLibraryId` and `topic`
|
||||
|
||||
2. If Context7 MCP is not available (upstream bug anthropics/claude-code#13898 strips MCP
|
||||
tools from agents with a `tools:` frontmatter restriction), use the CLI fallback via Bash:
|
||||
|
||||
Step 1 — Resolve library ID:
|
||||
```bash
|
||||
npx --yes ctx7@latest library <name> "<query>"
|
||||
```
|
||||
Step 2 — Fetch documentation:
|
||||
```bash
|
||||
npx --yes ctx7@latest docs <libraryId> "<query>"
|
||||
```
|
||||
|
||||
Do not skip documentation lookups because MCP tools are unavailable — the CLI fallback
|
||||
works via Bash and produces equivalent output.
|
||||
</documentation_lookup>
|
||||
|
||||
<project_context>
|
||||
Before researching, discover project context:
|
||||
|
||||
|
||||
126
bin/install.js
126
bin/install.js
@@ -70,6 +70,7 @@ const hasCursor = args.includes('--cursor');
|
||||
const hasWindsurf = args.includes('--windsurf');
|
||||
const hasAugment = args.includes('--augment');
|
||||
const hasTrae = args.includes('--trae');
|
||||
const hasQwen = args.includes('--qwen');
|
||||
const hasCodebuddy = args.includes('--codebuddy');
|
||||
const hasCline = args.includes('--cline');
|
||||
const hasBoth = args.includes('--both'); // Legacy flag, keeps working
|
||||
@@ -79,7 +80,7 @@ const hasUninstall = args.includes('--uninstall') || args.includes('-u');
|
||||
// Runtime selection - can be set by flags or interactive prompt
|
||||
let selectedRuntimes = [];
|
||||
if (hasAll) {
|
||||
selectedRuntimes = ['claude', 'kilo', 'opencode', 'gemini', 'codex', 'copilot', 'antigravity', 'cursor', 'windsurf', 'augment', 'trae', 'codebuddy', 'cline'];
|
||||
selectedRuntimes = ['claude', 'kilo', 'opencode', 'gemini', 'codex', 'copilot', 'antigravity', 'cursor', 'windsurf', 'augment', 'trae', 'qwen', 'codebuddy', 'cline'];
|
||||
} else if (hasBoth) {
|
||||
selectedRuntimes = ['claude', 'opencode'];
|
||||
} else {
|
||||
@@ -94,6 +95,7 @@ if (hasAll) {
|
||||
if (hasWindsurf) selectedRuntimes.push('windsurf');
|
||||
if (hasAugment) selectedRuntimes.push('augment');
|
||||
if (hasTrae) selectedRuntimes.push('trae');
|
||||
if (hasQwen) selectedRuntimes.push('qwen');
|
||||
if (hasCodebuddy) selectedRuntimes.push('codebuddy');
|
||||
if (hasCline) selectedRuntimes.push('cline');
|
||||
}
|
||||
@@ -144,6 +146,7 @@ function getDirName(runtime) {
|
||||
if (runtime === 'windsurf') return '.windsurf';
|
||||
if (runtime === 'augment') return '.augment';
|
||||
if (runtime === 'trae') return '.trae';
|
||||
if (runtime === 'qwen') return '.qwen';
|
||||
if (runtime === 'codebuddy') return '.codebuddy';
|
||||
if (runtime === 'cline') return '.cline';
|
||||
return '.claude';
|
||||
@@ -178,6 +181,7 @@ function getConfigDirFromHome(runtime, isGlobal) {
|
||||
if (runtime === 'windsurf') return "'.windsurf'";
|
||||
if (runtime === 'augment') return "'.augment'";
|
||||
if (runtime === 'trae') return "'.trae'";
|
||||
if (runtime === 'qwen') return "'.qwen'";
|
||||
if (runtime === 'codebuddy') return "'.codebuddy'";
|
||||
if (runtime === 'cline') return "'.cline'";
|
||||
return "'.claude'";
|
||||
@@ -342,6 +346,16 @@ function getGlobalDir(runtime, explicitDir = null) {
|
||||
return path.join(os.homedir(), '.trae');
|
||||
}
|
||||
|
||||
if (runtime === 'qwen') {
|
||||
if (explicitDir) {
|
||||
return expandTilde(explicitDir);
|
||||
}
|
||||
if (process.env.QWEN_CONFIG_DIR) {
|
||||
return expandTilde(process.env.QWEN_CONFIG_DIR);
|
||||
}
|
||||
return path.join(os.homedir(), '.qwen');
|
||||
}
|
||||
|
||||
if (runtime === 'codebuddy') {
|
||||
// CodeBuddy: --config-dir > CODEBUDDY_CONFIG_DIR > ~/.codebuddy
|
||||
if (explicitDir) {
|
||||
@@ -384,7 +398,7 @@ const banner = '\n' +
|
||||
'\n' +
|
||||
' Get Shit Done ' + dim + 'v' + pkg.version + reset + '\n' +
|
||||
' A meta-prompting, context engineering and spec-driven\n' +
|
||||
' development system for Claude Code, OpenCode, Gemini, Kilo, Codex, Copilot, Antigravity, Cursor, Windsurf, Augment, Trae, Cline and CodeBuddy by TÂCHES.\n';
|
||||
' development system for Claude Code, OpenCode, Gemini, Kilo, Codex, Copilot, Antigravity, Cursor, Windsurf, Augment, Trae, Qwen Code, Cline and CodeBuddy by TÂCHES.\n';
|
||||
|
||||
// Parse --config-dir argument
|
||||
function parseConfigDirArg() {
|
||||
@@ -422,7 +436,7 @@ if (hasUninstall) {
|
||||
|
||||
// Show help if requested
|
||||
if (hasHelp) {
|
||||
console.log(` ${yellow}Usage:${reset} npx get-shit-done-cc [options]\n\n ${yellow}Options:${reset}\n ${cyan}-g, --global${reset} Install globally (to config directory)\n ${cyan}-l, --local${reset} Install locally (to current directory)\n ${cyan}--claude${reset} Install for Claude Code only\n ${cyan}--opencode${reset} Install for OpenCode only\n ${cyan}--gemini${reset} Install for Gemini only\n ${cyan}--kilo${reset} Install for Kilo only\n ${cyan}--codex${reset} Install for Codex only\n ${cyan}--copilot${reset} Install for Copilot only\n ${cyan}--antigravity${reset} Install for Antigravity only\n ${cyan}--cursor${reset} Install for Cursor only\n ${cyan}--windsurf${reset} Install for Windsurf only\n ${cyan}--augment${reset} Install for Augment only\n ${cyan}--trae${reset} Install for Trae only\n ${cyan}--cline${reset} Install for Cline only\n ${cyan}--codebuddy${reset} Install for CodeBuddy only\n ${cyan}--all${reset} Install for all runtimes\n ${cyan}-u, --uninstall${reset} Uninstall GSD (remove all GSD files)\n ${cyan}-c, --config-dir <path>${reset} Specify custom config directory\n ${cyan}-h, --help${reset} Show this help message\n ${cyan}--force-statusline${reset} Replace existing statusline config\n\n ${yellow}Examples:${reset}\n ${dim}# Interactive install (prompts for runtime and location)${reset}\n npx get-shit-done-cc\n\n ${dim}# Install for Claude Code globally${reset}\n npx get-shit-done-cc --claude --global\n\n ${dim}# Install for Gemini globally${reset}\n npx get-shit-done-cc --gemini --global\n\n ${dim}# Install for Kilo globally${reset}\n npx get-shit-done-cc --kilo --global\n\n ${dim}# Install for Codex globally${reset}\n npx get-shit-done-cc --codex --global\n\n ${dim}# Install for Copilot globally${reset}\n npx get-shit-done-cc --copilot --global\n\n ${dim}# Install for Copilot locally${reset}\n npx get-shit-done-cc --copilot --local\n\n ${dim}# Install for Antigravity globally${reset}\n npx get-shit-done-cc --antigravity --global\n\n ${dim}# Install for Antigravity locally${reset}\n npx get-shit-done-cc --antigravity --local\n\n ${dim}# Install for Cursor globally${reset}\n npx get-shit-done-cc --cursor --global\n\n ${dim}# Install for Cursor locally${reset}\n npx get-shit-done-cc --cursor --local\n\n ${dim}# Install for Windsurf globally${reset}\n npx get-shit-done-cc --windsurf --global\n\n ${dim}# Install for Windsurf locally${reset}\n npx get-shit-done-cc --windsurf --local\n\n ${dim}# Install for Augment globally${reset}\n npx get-shit-done-cc --augment --global\n\n ${dim}# Install for Augment locally${reset}\n npx get-shit-done-cc --augment --local\n\n ${dim}# Install for Trae globally${reset}\n npx get-shit-done-cc --trae --global\n\n ${dim}# Install for Trae locally${reset}\n npx get-shit-done-cc --trae --local\n\n ${dim}# Install for Cline locally${reset}\n npx get-shit-done-cc --cline --local\n\n ${dim}# Install for CodeBuddy globally${reset}\n npx get-shit-done-cc --codebuddy --global\n\n ${dim}# Install for CodeBuddy locally${reset}\n npx get-shit-done-cc --codebuddy --local\n\n ${dim}# Install for all runtimes globally${reset}\n npx get-shit-done-cc --all --global\n\n ${dim}# Install to custom config directory${reset}\n npx get-shit-done-cc --kilo --global --config-dir ~/.kilo-work\n\n ${dim}# Install to current project only${reset}\n npx get-shit-done-cc --claude --local\n\n ${dim}# Uninstall GSD from Cursor globally${reset}\n npx get-shit-done-cc --cursor --global --uninstall\n\n ${yellow}Notes:${reset}\n The --config-dir option is useful when you have multiple configurations.\n It takes priority over CLAUDE_CONFIG_DIR / OPENCODE_CONFIG_DIR / GEMINI_CONFIG_DIR / KILO_CONFIG_DIR / CODEX_HOME / COPILOT_CONFIG_DIR / ANTIGRAVITY_CONFIG_DIR / CURSOR_CONFIG_DIR / WINDSURF_CONFIG_DIR / AUGMENT_CONFIG_DIR / TRAE_CONFIG_DIR / CLINE_CONFIG_DIR / CODEBUDDY_CONFIG_DIR environment variables.\n`);
|
||||
console.log(` ${yellow}Usage:${reset} npx get-shit-done-cc [options]\n\n ${yellow}Options:${reset}\n ${cyan}-g, --global${reset} Install globally (to config directory)\n ${cyan}-l, --local${reset} Install locally (to current directory)\n ${cyan}--claude${reset} Install for Claude Code only\n ${cyan}--opencode${reset} Install for OpenCode only\n ${cyan}--gemini${reset} Install for Gemini only\n ${cyan}--kilo${reset} Install for Kilo only\n ${cyan}--codex${reset} Install for Codex only\n ${cyan}--copilot${reset} Install for Copilot only\n ${cyan}--antigravity${reset} Install for Antigravity only\n ${cyan}--cursor${reset} Install for Cursor only\n ${cyan}--windsurf${reset} Install for Windsurf only\n ${cyan}--augment${reset} Install for Augment only\n ${cyan}--trae${reset} Install for Trae only\n ${cyan}--qwen${reset} Install for Qwen Code only\n ${cyan}--cline${reset} Install for Cline only\n ${cyan}--codebuddy${reset} Install for CodeBuddy only\n ${cyan}--all${reset} Install for all runtimes\n ${cyan}-u, --uninstall${reset} Uninstall GSD (remove all GSD files)\n ${cyan}-c, --config-dir <path>${reset} Specify custom config directory\n ${cyan}-h, --help${reset} Show this help message\n ${cyan}--force-statusline${reset} Replace existing statusline config\n\n ${yellow}Examples:${reset}\n ${dim}# Interactive install (prompts for runtime and location)${reset}\n npx get-shit-done-cc\n\n ${dim}# Install for Claude Code globally${reset}\n npx get-shit-done-cc --claude --global\n\n ${dim}# Install for Gemini globally${reset}\n npx get-shit-done-cc --gemini --global\n\n ${dim}# Install for Kilo globally${reset}\n npx get-shit-done-cc --kilo --global\n\n ${dim}# Install for Codex globally${reset}\n npx get-shit-done-cc --codex --global\n\n ${dim}# Install for Copilot globally${reset}\n npx get-shit-done-cc --copilot --global\n\n ${dim}# Install for Copilot locally${reset}\n npx get-shit-done-cc --copilot --local\n\n ${dim}# Install for Antigravity globally${reset}\n npx get-shit-done-cc --antigravity --global\n\n ${dim}# Install for Antigravity locally${reset}\n npx get-shit-done-cc --antigravity --local\n\n ${dim}# Install for Cursor globally${reset}\n npx get-shit-done-cc --cursor --global\n\n ${dim}# Install for Cursor locally${reset}\n npx get-shit-done-cc --cursor --local\n\n ${dim}# Install for Windsurf globally${reset}\n npx get-shit-done-cc --windsurf --global\n\n ${dim}# Install for Windsurf locally${reset}\n npx get-shit-done-cc --windsurf --local\n\n ${dim}# Install for Augment globally${reset}\n npx get-shit-done-cc --augment --global\n\n ${dim}# Install for Augment locally${reset}\n npx get-shit-done-cc --augment --local\n\n ${dim}# Install for Trae globally${reset}\n npx get-shit-done-cc --trae --global\n\n ${dim}# Install for Trae locally${reset}\n npx get-shit-done-cc --trae --local\n\n ${dim}# Install for Cline locally${reset}\n npx get-shit-done-cc --cline --local\n\n ${dim}# Install for CodeBuddy globally${reset}\n npx get-shit-done-cc --codebuddy --global\n\n ${dim}# Install for CodeBuddy locally${reset}\n npx get-shit-done-cc --codebuddy --local\n\n ${dim}# Install for all runtimes globally${reset}\n npx get-shit-done-cc --all --global\n\n ${dim}# Install to custom config directory${reset}\n npx get-shit-done-cc --kilo --global --config-dir ~/.kilo-work\n\n ${dim}# Install to current project only${reset}\n npx get-shit-done-cc --claude --local\n\n ${dim}# Uninstall GSD from Cursor globally${reset}\n npx get-shit-done-cc --cursor --global --uninstall\n\n ${yellow}Notes:${reset}\n The --config-dir option is useful when you have multiple configurations.\n It takes priority over CLAUDE_CONFIG_DIR / OPENCODE_CONFIG_DIR / GEMINI_CONFIG_DIR / KILO_CONFIG_DIR / CODEX_HOME / COPILOT_CONFIG_DIR / ANTIGRAVITY_CONFIG_DIR / CURSOR_CONFIG_DIR / WINDSURF_CONFIG_DIR / AUGMENT_CONFIG_DIR / TRAE_CONFIG_DIR / QWEN_CONFIG_DIR / CLINE_CONFIG_DIR / CODEBUDDY_CONFIG_DIR environment variables.\n`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
@@ -3939,7 +3953,16 @@ function copyCommandsAsClaudeSkills(srcDir, skillsDir, prefix, pathPrefix, runti
|
||||
content = content.replace(/~\/\.claude\//g, pathPrefix);
|
||||
content = content.replace(/\$HOME\/\.claude\//g, pathPrefix);
|
||||
content = content.replace(/\.\/\.claude\//g, `./${getDirName(runtime)}/`);
|
||||
content = processAttribution(content, getCommitAttribution('claude'));
|
||||
content = content.replace(/~\/\.qwen\//g, pathPrefix);
|
||||
content = content.replace(/\$HOME\/\.qwen\//g, pathPrefix);
|
||||
content = content.replace(/\.\/\.qwen\//g, `./${getDirName(runtime)}/`);
|
||||
// Qwen reuses Claude skill format but needs runtime-specific content replacement
|
||||
if (runtime === 'qwen') {
|
||||
content = content.replace(/CLAUDE\.md/g, 'QWEN.md');
|
||||
content = content.replace(/\bClaude Code\b/g, 'Qwen Code');
|
||||
content = content.replace(/\.claude\//g, '.qwen/');
|
||||
}
|
||||
content = processAttribution(content, getCommitAttribution(runtime));
|
||||
content = convertClaudeCommandToClaudeSkill(content, skillName);
|
||||
|
||||
fs.writeFileSync(path.join(skillDir, 'SKILL.md'), content);
|
||||
@@ -4057,6 +4080,7 @@ function copyWithPathReplacement(srcDir, destDir, pathPrefix, runtime, isCommand
|
||||
const isWindsurf = runtime === 'windsurf';
|
||||
const isAugment = runtime === 'augment';
|
||||
const isTrae = runtime === 'trae';
|
||||
const isQwen = runtime === 'qwen';
|
||||
const isCline = runtime === 'cline';
|
||||
const dirName = getDirName(runtime);
|
||||
|
||||
@@ -4085,6 +4109,9 @@ function copyWithPathReplacement(srcDir, destDir, pathPrefix, runtime, isCommand
|
||||
content = content.replace(globalClaudeRegex, pathPrefix);
|
||||
content = content.replace(globalClaudeHomeRegex, pathPrefix);
|
||||
content = content.replace(localClaudeRegex, `./${dirName}/`);
|
||||
content = content.replace(/~\/\.qwen\//g, pathPrefix);
|
||||
content = content.replace(/\$HOME\/\.qwen\//g, pathPrefix);
|
||||
content = content.replace(/\.\/\.qwen\//g, `./${dirName}/`);
|
||||
}
|
||||
content = processAttribution(content, getCommitAttribution(runtime));
|
||||
|
||||
@@ -4128,6 +4155,11 @@ function copyWithPathReplacement(srcDir, destDir, pathPrefix, runtime, isCommand
|
||||
} else if (isCline) {
|
||||
content = convertClaudeToCliineMarkdown(content);
|
||||
fs.writeFileSync(destPath, content);
|
||||
} else if (isQwen) {
|
||||
content = content.replace(/CLAUDE\.md/g, 'QWEN.md');
|
||||
content = content.replace(/\bClaude Code\b/g, 'Qwen Code');
|
||||
content = content.replace(/\.claude\//g, '.qwen/');
|
||||
fs.writeFileSync(destPath, content);
|
||||
} else {
|
||||
fs.writeFileSync(destPath, content);
|
||||
}
|
||||
@@ -4172,6 +4204,13 @@ function copyWithPathReplacement(srcDir, destDir, pathPrefix, runtime, isCommand
|
||||
jsContent = jsContent.replace(/CLAUDE\.md/g, '.clinerules');
|
||||
jsContent = jsContent.replace(/\bClaude Code\b/g, 'Cline');
|
||||
fs.writeFileSync(destPath, jsContent);
|
||||
} else if (isQwen && (entry.name.endsWith('.cjs') || entry.name.endsWith('.js'))) {
|
||||
let jsContent = fs.readFileSync(srcPath, 'utf8');
|
||||
jsContent = jsContent.replace(/\.claude\/skills\//g, '.qwen/skills/');
|
||||
jsContent = jsContent.replace(/\.claude\//g, '.qwen/');
|
||||
jsContent = jsContent.replace(/CLAUDE\.md/g, 'QWEN.md');
|
||||
jsContent = jsContent.replace(/\bClaude Code\b/g, 'Qwen Code');
|
||||
fs.writeFileSync(destPath, jsContent);
|
||||
} else {
|
||||
fs.copyFileSync(srcPath, destPath);
|
||||
}
|
||||
@@ -4349,6 +4388,7 @@ function uninstall(isGlobal, runtime = 'claude') {
|
||||
const isWindsurf = runtime === 'windsurf';
|
||||
const isAugment = runtime === 'augment';
|
||||
const isTrae = runtime === 'trae';
|
||||
const isQwen = runtime === 'qwen';
|
||||
const isCodebuddy = runtime === 'codebuddy';
|
||||
const dirName = getDirName(runtime);
|
||||
|
||||
@@ -4372,6 +4412,7 @@ function uninstall(isGlobal, runtime = 'claude') {
|
||||
if (runtime === 'windsurf') runtimeLabel = 'Windsurf';
|
||||
if (runtime === 'augment') runtimeLabel = 'Augment';
|
||||
if (runtime === 'trae') runtimeLabel = 'Trae';
|
||||
if (runtime === 'qwen') runtimeLabel = 'Qwen Code';
|
||||
if (runtime === 'codebuddy') runtimeLabel = 'CodeBuddy';
|
||||
|
||||
console.log(` Uninstalling GSD from ${cyan}${runtimeLabel}${reset} at ${cyan}${locationLabel}${reset}\n`);
|
||||
@@ -4502,6 +4543,31 @@ function uninstall(isGlobal, runtime = 'claude') {
|
||||
console.log(` ${green}✓${reset} Removed ${skillCount} Antigravity skills`);
|
||||
}
|
||||
}
|
||||
} else if (isQwen) {
|
||||
const skillsDir = path.join(targetDir, 'skills');
|
||||
if (fs.existsSync(skillsDir)) {
|
||||
let skillCount = 0;
|
||||
const entries = fs.readdirSync(skillsDir, { withFileTypes: true });
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory() && entry.name.startsWith('gsd-')) {
|
||||
fs.rmSync(path.join(skillsDir, entry.name), { recursive: true });
|
||||
skillCount++;
|
||||
}
|
||||
}
|
||||
if (skillCount > 0) {
|
||||
removedCount++;
|
||||
console.log(` ${green}✓${reset} Removed ${skillCount} Qwen Code skills`);
|
||||
}
|
||||
}
|
||||
|
||||
const legacyCommandsDir = path.join(targetDir, 'commands', 'gsd');
|
||||
if (fs.existsSync(legacyCommandsDir)) {
|
||||
const savedLegacyArtifacts = preserveUserArtifacts(legacyCommandsDir, ['dev-preferences.md']);
|
||||
fs.rmSync(legacyCommandsDir, { recursive: true });
|
||||
removedCount++;
|
||||
console.log(` ${green}✓${reset} Removed legacy commands/gsd/`);
|
||||
restoreUserArtifacts(legacyCommandsDir, savedLegacyArtifacts);
|
||||
}
|
||||
} else if (isGemini) {
|
||||
// Gemini: still uses commands/gsd/
|
||||
const gsdCommandsDir = path.join(targetDir, 'commands', 'gsd');
|
||||
@@ -5298,6 +5364,7 @@ function install(isGlobal, runtime = 'claude') {
|
||||
const isWindsurf = runtime === 'windsurf';
|
||||
const isAugment = runtime === 'augment';
|
||||
const isTrae = runtime === 'trae';
|
||||
const isQwen = runtime === 'qwen';
|
||||
const isCodebuddy = runtime === 'codebuddy';
|
||||
const isCline = runtime === 'cline';
|
||||
const dirName = getDirName(runtime);
|
||||
@@ -5338,6 +5405,7 @@ function install(isGlobal, runtime = 'claude') {
|
||||
if (isWindsurf) runtimeLabel = 'Windsurf';
|
||||
if (isAugment) runtimeLabel = 'Augment';
|
||||
if (isTrae) runtimeLabel = 'Trae';
|
||||
if (isQwen) runtimeLabel = 'Qwen Code';
|
||||
if (isCodebuddy) runtimeLabel = 'CodeBuddy';
|
||||
if (isCline) runtimeLabel = 'Cline';
|
||||
|
||||
@@ -5447,6 +5515,29 @@ function install(isGlobal, runtime = 'claude') {
|
||||
} else {
|
||||
failures.push('skills/gsd-*');
|
||||
}
|
||||
} else if (isQwen) {
|
||||
const skillsDir = path.join(targetDir, 'skills');
|
||||
const gsdSrc = path.join(src, 'commands', 'gsd');
|
||||
copyCommandsAsClaudeSkills(gsdSrc, skillsDir, 'gsd', pathPrefix, runtime, isGlobal);
|
||||
if (fs.existsSync(skillsDir)) {
|
||||
const count = fs.readdirSync(skillsDir, { withFileTypes: true })
|
||||
.filter(e => e.isDirectory() && e.name.startsWith('gsd-')).length;
|
||||
if (count > 0) {
|
||||
console.log(` ${green}✓${reset} Installed ${count} skills to skills/`);
|
||||
} else {
|
||||
failures.push('skills/gsd-*');
|
||||
}
|
||||
} else {
|
||||
failures.push('skills/gsd-*');
|
||||
}
|
||||
|
||||
const legacyCommandsDir = path.join(targetDir, 'commands', 'gsd');
|
||||
if (fs.existsSync(legacyCommandsDir)) {
|
||||
const savedLegacyArtifacts = preserveUserArtifacts(legacyCommandsDir, ['dev-preferences.md']);
|
||||
fs.rmSync(legacyCommandsDir, { recursive: true });
|
||||
console.log(` ${green}✓${reset} Removed legacy commands/gsd/ directory`);
|
||||
restoreUserArtifacts(legacyCommandsDir, savedLegacyArtifacts);
|
||||
}
|
||||
} else if (isCodebuddy) {
|
||||
const skillsDir = path.join(targetDir, 'skills');
|
||||
const gsdSrc = path.join(src, 'commands', 'gsd');
|
||||
@@ -5598,6 +5689,10 @@ function install(isGlobal, runtime = 'claude') {
|
||||
content = convertClaudeAgentToCodebuddyAgent(content);
|
||||
} else if (isCline) {
|
||||
content = convertClaudeAgentToClineAgent(content);
|
||||
} else if (isQwen) {
|
||||
content = content.replace(/CLAUDE\.md/g, 'QWEN.md');
|
||||
content = content.replace(/\bClaude Code\b/g, 'Qwen Code');
|
||||
content = content.replace(/\.claude\//g, '.qwen/');
|
||||
}
|
||||
const destName = isCopilot ? entry.name.replace('.md', '.agent.md') : entry.name;
|
||||
fs.writeFileSync(path.join(agentsDest, destName), content);
|
||||
@@ -5656,6 +5751,11 @@ function install(isGlobal, runtime = 'claude') {
|
||||
if (entry.endsWith('.js')) {
|
||||
let content = fs.readFileSync(srcFile, 'utf8');
|
||||
content = content.replace(/'\.claude'/g, configDirReplacement);
|
||||
content = content.replace(/\/\.claude\//g, `/${getDirName(runtime)}/`);
|
||||
if (isQwen) {
|
||||
content = content.replace(/CLAUDE\.md/g, 'QWEN.md');
|
||||
content = content.replace(/\bClaude Code\b/g, 'Qwen Code');
|
||||
}
|
||||
content = content.replace(/\{\{GSD_VERSION\}\}/g, pkg.version);
|
||||
fs.writeFileSync(destFile, content);
|
||||
// Ensure hook files are executable (fixes #1162 — missing +x permission)
|
||||
@@ -6188,6 +6288,7 @@ function finishInstall(settingsPath, settings, statuslineCommand, shouldInstallS
|
||||
if (runtime === 'augment') program = 'Augment';
|
||||
if (runtime === 'trae') program = 'Trae';
|
||||
if (runtime === 'cline') program = 'Cline';
|
||||
if (runtime === 'qwen') program = 'Qwen Code';
|
||||
|
||||
let command = '/gsd-new-project';
|
||||
if (runtime === 'opencode') command = '/gsd-new-project';
|
||||
@@ -6200,6 +6301,7 @@ function finishInstall(settingsPath, settings, statuslineCommand, shouldInstallS
|
||||
if (runtime === 'augment') command = '/gsd-new-project';
|
||||
if (runtime === 'trae') command = '/gsd-new-project';
|
||||
if (runtime === 'cline') command = '/gsd-new-project';
|
||||
if (runtime === 'qwen') command = '/gsd-new-project';
|
||||
console.log(`
|
||||
${green}Done!${reset} Open a blank directory in ${program} and run ${cyan}${command}${reset}.
|
||||
|
||||
@@ -6289,10 +6391,11 @@ function promptRuntime(callback) {
|
||||
'9': 'gemini',
|
||||
'10': 'kilo',
|
||||
'11': 'opencode',
|
||||
'12': 'trae',
|
||||
'13': 'windsurf'
|
||||
'12': 'qwen',
|
||||
'13': 'trae',
|
||||
'14': 'windsurf'
|
||||
};
|
||||
const allRuntimes = ['claude', 'antigravity', 'augment', 'cline', 'codebuddy', 'codex', 'copilot', 'cursor', 'gemini', 'kilo', 'opencode', 'trae', 'windsurf'];
|
||||
const allRuntimes = ['claude', 'antigravity', 'augment', 'cline', 'codebuddy', 'codex', 'copilot', 'cursor', 'gemini', 'kilo', 'opencode', 'qwen', 'trae', 'windsurf'];
|
||||
|
||||
console.log(` ${yellow}Which runtime(s) would you like to install for?${reset}\n\n ${cyan}1${reset}) Claude Code ${dim}(~/.claude)${reset}
|
||||
${cyan}2${reset}) Antigravity ${dim}(~/.gemini/antigravity)${reset}
|
||||
@@ -6305,9 +6408,10 @@ function promptRuntime(callback) {
|
||||
${cyan}9${reset}) Gemini ${dim}(~/.gemini)${reset}
|
||||
${cyan}10${reset}) Kilo ${dim}(~/.config/kilo)${reset}
|
||||
${cyan}11${reset}) OpenCode ${dim}(~/.config/opencode)${reset}
|
||||
${cyan}12${reset}) Trae ${dim}(~/.trae)${reset}
|
||||
${cyan}13${reset}) Windsurf ${dim}(~/.codeium/windsurf)${reset}
|
||||
${cyan}14${reset}) All
|
||||
${cyan}12${reset}) Qwen Code ${dim}(~/.qwen)${reset}
|
||||
${cyan}13${reset}) Trae ${dim}(~/.trae)${reset}
|
||||
${cyan}14${reset}) Windsurf ${dim}(~/.codeium/windsurf)${reset}
|
||||
${cyan}15${reset}) All
|
||||
|
||||
${dim}Select multiple: 1,2,6 or 1 2 6${reset}
|
||||
`);
|
||||
@@ -6318,7 +6422,7 @@ function promptRuntime(callback) {
|
||||
const input = answer.trim() || '1';
|
||||
|
||||
// "All" shortcut
|
||||
if (input === '14') {
|
||||
if (input === '15') {
|
||||
callback(allRuntimes);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: gsd:execute-phase
|
||||
description: Execute all plans in a phase with wave-based parallelization
|
||||
argument-hint: "<phase-number> [--wave N] [--gaps-only] [--interactive]"
|
||||
argument-hint: "<phase-number> [--wave N] [--gaps-only] [--interactive] [--tdd]"
|
||||
allowed-tools:
|
||||
- Read
|
||||
- Write
|
||||
|
||||
22
commands/gsd/extract_learnings.md
Normal file
22
commands/gsd/extract_learnings.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: gsd:extract-learnings
|
||||
description: Extract decisions, lessons, patterns, and surprises from completed phase artifacts
|
||||
argument-hint: <phase-number>
|
||||
allowed-tools:
|
||||
- Read
|
||||
- Write
|
||||
- Bash
|
||||
- Grep
|
||||
- Glob
|
||||
- Agent
|
||||
type: prompt
|
||||
---
|
||||
<objective>
|
||||
Extract structured learnings from completed phase artifacts (PLAN.md, SUMMARY.md, VERIFICATION.md, UAT.md, STATE.md) into a LEARNINGS.md file that captures decisions, lessons learned, patterns discovered, and surprises encountered.
|
||||
</objective>
|
||||
|
||||
<execution_context>
|
||||
@~/.claude/get-shit-done/workflows/extract_learnings.md
|
||||
</execution_context>
|
||||
|
||||
Execute the extract-learnings workflow from @~/.claude/get-shit-done/workflows/extract_learnings.md end-to-end.
|
||||
45
commands/gsd/from-gsd2.md
Normal file
45
commands/gsd/from-gsd2.md
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: gsd:from-gsd2
|
||||
description: Import a GSD-2 (.gsd/) project back to GSD v1 (.planning/) format
|
||||
argument-hint: "[--path <dir>] [--force]"
|
||||
allowed-tools:
|
||||
- Read
|
||||
- Write
|
||||
- Bash
|
||||
type: prompt
|
||||
---
|
||||
|
||||
<objective>
|
||||
Reverse-migrate a GSD-2 project (`.gsd/` directory) back to GSD v1 (`.planning/`) format.
|
||||
|
||||
Maps the GSD-2 hierarchy (Milestone → Slice → Task) to the GSD v1 hierarchy (Milestone sections in ROADMAP.md → Phase → Plan), preserving completion state, research files, and summaries.
|
||||
</objective>
|
||||
|
||||
<process>
|
||||
|
||||
1. **Locate the .gsd/ directory** — check the current working directory (or `--path` argument):
|
||||
```bash
|
||||
node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" from-gsd2 --dry-run
|
||||
```
|
||||
If no `.gsd/` is found, report the error and stop.
|
||||
|
||||
2. **Show the dry-run preview** — present the full file list and migration statistics to the user. Ask for confirmation before writing anything.
|
||||
|
||||
3. **Run the migration** after confirmation:
|
||||
```bash
|
||||
node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" from-gsd2
|
||||
```
|
||||
Use `--force` if `.planning/` already exists and the user has confirmed overwrite.
|
||||
|
||||
4. **Report the result** — show the `filesWritten` count, `planningDir` path, and the preview summary.
|
||||
|
||||
</process>
|
||||
|
||||
<notes>
|
||||
- The migration is non-destructive: `.gsd/` is never modified or removed.
|
||||
- Pass `--path <dir>` to migrate a project at a different path than the current directory.
|
||||
- Slices are numbered sequentially across all milestones (M001/S01 → phase 01, M001/S02 → phase 02, M002/S01 → phase 03, etc.).
|
||||
- Tasks within each slice become plans (T01 → plan 01, T02 → plan 02, etc.).
|
||||
- Completed slices and tasks carry their done state into ROADMAP.md checkboxes and SUMMARY.md files.
|
||||
- GSD-2 cost/token ledger, database state, and VS Code extension state cannot be migrated.
|
||||
</notes>
|
||||
@@ -14,7 +14,9 @@ No arguments needed — reads STATE.md, ROADMAP.md, and phase directories to det
|
||||
|
||||
Designed for rapid multi-project workflows where remembering which phase/step you're on is overhead.
|
||||
|
||||
Supports `--force` flag to bypass safety gates (checkpoint, error state, verification failures).
|
||||
Supports `--force` flag to bypass safety gates (checkpoint, error state, verification failures, and prior-phase completeness scan).
|
||||
|
||||
Before routing to the next step, scans all prior phases for incomplete work: plans that ran without producing summaries, verification failures without overrides, and phases where discussion happened but planning never ran. When incomplete work is found, shows a structured report and offers three options: defer the gaps to the backlog and continue, stop and resolve manually, or force advance without recording. When prior phases are clean, routes silently with no interruption.
|
||||
</objective>
|
||||
|
||||
<execution_context>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: gsd:plan-phase
|
||||
description: Create detailed phase plan (PLAN.md) with verification loop
|
||||
argument-hint: "[phase] [--auto] [--research] [--skip-research] [--gaps] [--skip-verify] [--prd <file>] [--reviews] [--text]"
|
||||
argument-hint: "[phase] [--auto] [--research] [--skip-research] [--gaps] [--skip-verify] [--prd <file>] [--reviews] [--text] [--tdd]"
|
||||
agent: gsd-planner
|
||||
allowed-tools:
|
||||
- Read
|
||||
|
||||
@@ -21,6 +21,7 @@ node gsd-tools.cjs <command> [args] [--raw] [--cwd <path>]
|
||||
|------|-------------|
|
||||
| `--raw` | Machine-readable output (JSON or plain text, no formatting) |
|
||||
| `--cwd <path>` | Override working directory (for sandboxed subagents) |
|
||||
| `--ws <name>` | Target a specific workstream context (SDK only) |
|
||||
|
||||
---
|
||||
|
||||
@@ -275,6 +276,10 @@ node gsd-tools.cjs init todos [area]
|
||||
node gsd-tools.cjs init milestone-op
|
||||
node gsd-tools.cjs init map-codebase
|
||||
node gsd-tools.cjs init progress
|
||||
|
||||
# Workstream-scoped init (SDK --ws flag)
|
||||
node gsd-tools.cjs init execute-phase <phase> --ws <name>
|
||||
node gsd-tools.cjs init plan-phase <phase> --ws <name>
|
||||
```
|
||||
|
||||
**Large payload handling:** When output exceeds ~50KB, the CLI writes to a temp file and returns `@file:/tmp/gsd-init-XXXXX.json`. Workflows check for the `@file:` prefix and read from disk:
|
||||
@@ -299,6 +304,22 @@ node gsd-tools.cjs requirements mark-complete <ids>
|
||||
|
||||
---
|
||||
|
||||
## Skill Manifest
|
||||
|
||||
Pre-compute and cache skill discovery for faster command loading.
|
||||
|
||||
```bash
|
||||
# Generate skill manifest (writes to .claude/skill-manifest.json)
|
||||
node gsd-tools.cjs skill-manifest
|
||||
|
||||
# Generate with custom output path
|
||||
node gsd-tools.cjs skill-manifest --output <path>
|
||||
```
|
||||
|
||||
Returns JSON mapping of all available GSD skills with their metadata (name, description, file path, argument hints). Used by the installer and session-start hooks to avoid repeated filesystem scans.
|
||||
|
||||
---
|
||||
|
||||
## Utility Commands
|
||||
|
||||
```bash
|
||||
|
||||
@@ -151,6 +151,8 @@ Research, plan, and verify a phase.
|
||||
| `--prd <file>` | Use a PRD file instead of discuss-phase for context |
|
||||
| `--reviews` | Replan with cross-AI review feedback from REVIEWS.md |
|
||||
| `--validate` | Run state validation before planning begins |
|
||||
| `--bounce` | Run external plan bounce validation after planning (uses `workflow.plan_bounce_script`) |
|
||||
| `--skip-bounce` | Skip plan bounce even if enabled in config |
|
||||
|
||||
**Prerequisites:** `.planning/ROADMAP.md` exists
|
||||
**Produces:** `{phase}-RESEARCH.md`, `{phase}-{N}-PLAN.md`, `{phase}-VALIDATION.md`
|
||||
@@ -160,6 +162,7 @@ Research, plan, and verify a phase.
|
||||
/gsd-plan-phase 3 --skip-research # Plan without research (familiar domain)
|
||||
/gsd-plan-phase --auto # Non-interactive planning
|
||||
/gsd-plan-phase 2 --validate # Validate state before planning
|
||||
/gsd-plan-phase 1 --bounce # Plan + external bounce validation
|
||||
```
|
||||
|
||||
---
|
||||
@@ -173,6 +176,8 @@ Execute all plans in a phase with wave-based parallelization, or run a specific
|
||||
| `N` | **Yes** | Phase number to execute |
|
||||
| `--wave N` | No | Execute only Wave `N` in the phase |
|
||||
| `--validate` | No | Run state validation before execution begins |
|
||||
| `--cross-ai` | No | Delegate execution to an external AI CLI (uses `workflow.cross_ai_command`) |
|
||||
| `--no-cross-ai` | No | Force local execution even if cross-AI is enabled in config |
|
||||
|
||||
**Prerequisites:** Phase has PLAN.md files
|
||||
**Produces:** per-plan `{phase}-{N}-SUMMARY.md`, git commits, and `{phase}-VERIFICATION.md` when the phase is fully complete
|
||||
@@ -181,6 +186,7 @@ Execute all plans in a phase with wave-based parallelization, or run a specific
|
||||
/gsd-execute-phase 1 # Execute phase 1
|
||||
/gsd-execute-phase 1 --wave 2 # Execute only Wave 2
|
||||
/gsd-execute-phase 1 --validate # Validate state before execution
|
||||
/gsd-execute-phase 2 --cross-ai # Delegate phase 2 to external AI CLI
|
||||
```
|
||||
|
||||
---
|
||||
@@ -593,6 +599,31 @@ Ingest an external plan file into the GSD planning system with conflict detectio
|
||||
|
||||
---
|
||||
|
||||
### `/gsd-from-gsd2`
|
||||
|
||||
Reverse migration from GSD-2 format (`.gsd/` with Milestone→Slice→Task hierarchy) back to v1 `.planning/` format.
|
||||
|
||||
| Flag | Required | Description |
|
||||
|------|----------|-------------|
|
||||
| `--dry-run` | No | Preview what would be migrated without writing anything |
|
||||
| `--force` | No | Overwrite existing `.planning/` directory |
|
||||
| `--path <dir>` | No | Specify GSD-2 root directory (defaults to current directory) |
|
||||
|
||||
**Flattening:** Milestone→Slice hierarchy is flattened to sequential phase numbers (M001/S01→phase 01, M001/S02→phase 02, M002/S01→phase 03, etc.).
|
||||
|
||||
**Produces:** `PROJECT.md`, `REQUIREMENTS.md`, `ROADMAP.md`, `STATE.md`, and sequential phase directories in `.planning/`.
|
||||
|
||||
**Safety:** Guards against overwriting an existing `.planning/` directory without `--force`.
|
||||
|
||||
```bash
|
||||
/gsd-from-gsd2 # Migrate .gsd/ in current directory
|
||||
/gsd-from-gsd2 --dry-run # Preview migration without writing
|
||||
/gsd-from-gsd2 --force # Overwrite existing .planning/
|
||||
/gsd-from-gsd2 --path /path/to/gsd2-project # Specify GSD-2 root
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `/gsd-quick`
|
||||
|
||||
Execute ad-hoc task with GSD guarantees.
|
||||
@@ -785,6 +816,36 @@ Post-mortem investigation of failed or stuck GSD workflows.
|
||||
|
||||
---
|
||||
|
||||
### `/gsd-extract-learnings`
|
||||
|
||||
Extract reusable patterns, anti-patterns, and architectural decisions from completed phase work.
|
||||
|
||||
| Argument | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `N` | **Yes** | Phase number to extract learnings from |
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--all` | Extract learnings from all completed phases |
|
||||
| `--format` | Output format: `markdown` (default), `json` |
|
||||
|
||||
**Prerequisites:** Phase has been executed (SUMMARY.md files exist)
|
||||
**Produces:** `.planning/learnings/{phase}-LEARNINGS.md`
|
||||
|
||||
**Extracts:**
|
||||
- Architectural decisions and their rationale
|
||||
- Patterns that worked well (reusable in future phases)
|
||||
- Anti-patterns encountered and how they were resolved
|
||||
- Technology-specific insights
|
||||
- Performance and testing observations
|
||||
|
||||
```bash
|
||||
/gsd-extract-learnings 3 # Extract learnings from phase 3
|
||||
/gsd-extract-learnings --all # Extract from all completed phases
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workstream Management
|
||||
|
||||
### `/gsd-workstreams`
|
||||
@@ -900,6 +961,37 @@ Query, inspect, or refresh queryable codebase intelligence files stored in `.pla
|
||||
|
||||
---
|
||||
|
||||
## AI Integration Commands
|
||||
|
||||
### `/gsd-ai-integration-phase`
|
||||
|
||||
AI framework selection wizard for integrating AI/LLM capabilities into a project phase. Presents an interactive decision matrix, surfaces domain-specific failure modes and eval criteria, and produces `AI-SPEC.md` with a framework recommendation, implementation guidance, and evaluation strategy.
|
||||
|
||||
**Produces:** `{phase}-AI-SPEC.md` in the phase directory
|
||||
|
||||
**Spawns:** 3 parallel specialist agents: domain-researcher, framework-selector, ai-researcher, and eval-planner
|
||||
|
||||
```bash
|
||||
/gsd-ai-integration-phase # Wizard for the current phase
|
||||
/gsd-ai-integration-phase 3 # Wizard for a specific phase
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `/gsd-eval-review`
|
||||
|
||||
Retroactive audit of an implemented AI phase's evaluation coverage. Checks implementation against the `AI-SPEC.md` evaluation plan produced by `/gsd-ai-integration-phase`. Scores each eval dimension as COVERED/PARTIAL/MISSING.
|
||||
|
||||
**Prerequisites:** Phase has been executed and has an `AI-SPEC.md`
|
||||
**Produces:** `{phase}-EVAL-REVIEW.md` with findings, gaps, and remediation guidance
|
||||
|
||||
```bash
|
||||
/gsd-eval-review # Audit current phase
|
||||
/gsd-eval-review 3 # Audit a specific phase
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Update Commands
|
||||
|
||||
### `/gsd-update`
|
||||
|
||||
@@ -34,10 +34,18 @@ GSD stores project settings in `.planning/config.json`. Created during `/gsd-new
|
||||
"research_before_questions": false,
|
||||
"discuss_mode": "discuss",
|
||||
"skip_discuss": false,
|
||||
"tdd_mode": false,
|
||||
"text_mode": false,
|
||||
"use_worktrees": true,
|
||||
"code_review": true,
|
||||
"code_review_depth": "standard"
|
||||
"code_review_depth": "standard",
|
||||
"plan_bounce": false,
|
||||
"plan_bounce_script": null,
|
||||
"plan_bounce_passes": 2,
|
||||
"code_review_command": null,
|
||||
"cross_ai_execution": false,
|
||||
"cross_ai_command": null,
|
||||
"cross_ai_timeout": 300
|
||||
},
|
||||
"hooks": {
|
||||
"context_warnings": true,
|
||||
@@ -86,7 +94,8 @@ GSD stores project settings in `.planning/config.json`. Created during `/gsd-new
|
||||
},
|
||||
"intel": {
|
||||
"enabled": false
|
||||
}
|
||||
},
|
||||
"claude_md_path": null
|
||||
}
|
||||
```
|
||||
|
||||
@@ -102,6 +111,7 @@ GSD stores project settings in `.planning/config.json`. Created during `/gsd-new
|
||||
| `project_code` | string | any short string | (none) | Prefix for phase directory names (e.g., `"ABC"` produces `ABC-01-setup/`). Added in v1.31 |
|
||||
| `response_language` | string | language code | (none) | Language for agent responses (e.g., `"pt"`, `"ko"`, `"ja"`). Propagates to all spawned agents for cross-phase language consistency. Added in v1.32 |
|
||||
| `context_profile` | string | `dev`, `research`, `review` | (none) | Execution context preset that applies a pre-configured bundle of mode, model, and workflow settings for the current type of work. Added in v1.34 |
|
||||
| `claude_md_path` | string | any file path | (none) | Custom output path for the generated CLAUDE.md file. Useful for monorepos or projects that need CLAUDE.md in a non-root location. When set, GSD writes its CLAUDE.md content to this path instead of the project root. Added in v1.36 |
|
||||
|
||||
> **Note:** `granularity` was renamed from `depth` in v1.22.3. Existing configs are auto-migrated.
|
||||
|
||||
@@ -129,6 +139,14 @@ All workflow toggles follow the **absent = enabled** pattern. If a key is missin
|
||||
| `workflow.use_worktrees` | boolean | `true` | When `false`, disables git worktree isolation for parallel execution. Users who prefer sequential execution or whose environment does not support worktrees can disable this. Added in v1.31 |
|
||||
| `workflow.code_review` | boolean | `true` | Enable `/gsd-code-review` and `/gsd-code-review-fix` commands. When `false`, the commands exit with a configuration gate message. Added in v1.34 |
|
||||
| `workflow.code_review_depth` | string | `standard` | Default review depth for `/gsd-code-review`: `quick` (pattern-matching only), `standard` (per-file analysis), or `deep` (cross-file with import graphs). Can be overridden per-run with `--depth=`. Added in v1.34 |
|
||||
| `workflow.plan_bounce` | boolean | `false` | Run external validation script against generated plans. When enabled, the plan-phase orchestrator pipes each PLAN.md through the script specified by `plan_bounce_script` and blocks on non-zero exit. Added in v1.36 |
|
||||
| `workflow.plan_bounce_script` | string | (none) | Path to the external script invoked for plan bounce validation. Receives the PLAN.md path as its first argument. Required when `plan_bounce` is `true`. Added in v1.36 |
|
||||
| `workflow.plan_bounce_passes` | number | `2` | Number of sequential bounce passes to run. Each pass feeds the previous pass's output back into the validator. Higher values increase rigor at the cost of latency. Added in v1.36 |
|
||||
| `workflow.code_review_command` | string | (none) | Shell command for external code review integration in `/gsd-ship`. Receives changed file paths via stdin. Non-zero exit blocks the ship workflow. Added in v1.36 |
|
||||
| `workflow.tdd_mode` | boolean | `false` | Enable TDD pipeline as a first-class execution mode. When `true`, the planner aggressively applies `type: tdd` to eligible tasks (business logic, APIs, validations, algorithms) and the executor enforces RED/GREEN/REFACTOR gate sequence. An end-of-phase collaborative review checkpoint verifies gate compliance. Added in v1.37 |
|
||||
| `workflow.cross_ai_execution` | boolean | `false` | Delegate phase execution to an external AI CLI instead of spawning local executor agents. Useful for leveraging a different model's strengths for specific phases. Added in v1.36 |
|
||||
| `workflow.cross_ai_command` | string | (none) | Shell command template for cross-AI execution. Receives the phase prompt via stdin. Must produce SUMMARY.md-compatible output. Required when `cross_ai_execution` is `true`. Added in v1.36 |
|
||||
| `workflow.cross_ai_timeout` | number | `300` | Timeout in seconds for cross-AI execution commands. Prevents runaway external processes. Added in v1.36 |
|
||||
|
||||
### Recommended Presets
|
||||
|
||||
@@ -360,6 +378,36 @@ Settings for the security enforcement feature (v1.31). All follow the **absent =
|
||||
|
||||
---
|
||||
|
||||
## Review Settings
|
||||
|
||||
Configure per-CLI model selection for `/gsd-review`. When set, overrides the CLI's default model for that reviewer.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `review.models.gemini` | string | (CLI default) | Model used when `--gemini` reviewer is invoked |
|
||||
| `review.models.claude` | string | (CLI default) | Model used when `--claude` reviewer is invoked |
|
||||
| `review.models.codex` | string | (CLI default) | Model used when `--codex` reviewer is invoked |
|
||||
| `review.models.opencode` | string | (CLI default) | Model used when `--opencode` reviewer is invoked |
|
||||
| `review.models.qwen` | string | (CLI default) | Model used when `--qwen` reviewer is invoked |
|
||||
| `review.models.cursor` | string | (CLI default) | Model used when `--cursor` reviewer is invoked |
|
||||
|
||||
### Example
|
||||
|
||||
```json
|
||||
{
|
||||
"review": {
|
||||
"models": {
|
||||
"gemini": "gemini-2.5-pro",
|
||||
"qwen": "qwen-max"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Falls back to each CLI's configured default when a key is absent. Added in v1.35.0 (#1849).
|
||||
|
||||
---
|
||||
|
||||
## Manager Passthrough Flags
|
||||
|
||||
Configure per-step flags that `/gsd-manager` appends to each dispatched command. This allows customizing how the manager runs discuss, plan, and execute steps without manual flag entry.
|
||||
|
||||
256
docs/FEATURES.md
256
docs/FEATURES.md
@@ -102,6 +102,20 @@
|
||||
- [Hard Stop Safety Gates in /gsd-next](#101-hard-stop-safety-gates-in-gsd-next)
|
||||
- [Adaptive Model Preset](#102-adaptive-model-preset)
|
||||
- [Post-Merge Hunk Verification](#103-post-merge-hunk-verification)
|
||||
- [v1.35.0 Features](#v1350-features)
|
||||
- [New Runtime Support (Cline, CodeBuddy, Qwen Code)](#104-new-runtime-support-cline-codebuddy-qwen-code)
|
||||
- [GSD-2 Reverse Migration](#105-gsd-2-reverse-migration)
|
||||
- [AI Integration Phase Wizard](#106-ai-integration-phase-wizard)
|
||||
- [AI Eval Review](#107-ai-eval-review)
|
||||
- [v1.36.0 Features](#v1360-features)
|
||||
- [Plan Bounce](#108-plan-bounce)
|
||||
- [External Code Review Command](#109-external-code-review-command)
|
||||
- [Cross-AI Execution Delegation](#110-cross-ai-execution-delegation)
|
||||
- [Architectural Responsibility Mapping](#111-architectural-responsibility-mapping)
|
||||
- [Extract Learnings](#112-extract-learnings)
|
||||
- [SDK Workstream Support](#113-sdk-workstream-support)
|
||||
- [Context-Window-Aware Prompt Thinning](#114-context-window-aware-prompt-thinning)
|
||||
- [Configurable CLAUDE.md Path](#115-configurable-claudemd-path)
|
||||
- [v1.32 Features](#v132-features)
|
||||
- [STATE.md Consistency Gates](#69-statemd-consistency-gates)
|
||||
- [Autonomous `--to N` Flag](#70-autonomous---to-n-flag)
|
||||
@@ -917,7 +931,7 @@ fix(03-01): correct auth token expiry
|
||||
**Purpose:** Run GSD across multiple AI coding agent runtimes.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-RUNTIME-01: System MUST support Claude Code, OpenCode, Gemini CLI, Kilo, Codex, Copilot, Antigravity, Trae, Cline, Augment Code
|
||||
- REQ-RUNTIME-01: System MUST support Claude Code, OpenCode, Gemini CLI, Kilo, Codex, Copilot, Antigravity, Trae, Cline, Augment Code, CodeBuddy, Qwen Code
|
||||
- REQ-RUNTIME-02: Installer MUST transform content per runtime (tool names, paths, frontmatter)
|
||||
- REQ-RUNTIME-03: Installer MUST support interactive and non-interactive (`--claude --global`) modes
|
||||
- REQ-RUNTIME-04: Installer MUST support both global and local installation
|
||||
@@ -926,12 +940,12 @@ fix(03-01): correct auth token expiry
|
||||
|
||||
**Runtime Transformations:**
|
||||
|
||||
| Aspect | Claude Code | OpenCode | Gemini | Kilo | Codex | Copilot | Antigravity | Trae | Cline | Augment |
|
||||
|--------|------------|----------|--------|-------|-------|---------|-------------|------|-------|---------|
|
||||
| Commands | Slash commands | Slash commands | Slash commands | Slash commands | Skills (TOML) | Slash commands | Skills | Skills | Rules | Skills |
|
||||
| Agent format | Claude native | `mode: subagent` | Claude native | `mode: subagent` | Skills | Tool mapping | Skills | Skills | Rules | Skills |
|
||||
| Hook events | `PostToolUse` | N/A | `AfterTool` | N/A | N/A | N/A | N/A | N/A | N/A | N/A |
|
||||
| Config | `settings.json` | `opencode.json(c)` | `settings.json` | `kilo.json(c)` | TOML | Instructions | Config | Config | Config | Config |
|
||||
| Aspect | Claude Code | OpenCode | Gemini | Kilo | Codex | Copilot | Antigravity | Trae | Cline | Augment | CodeBuddy | Qwen Code |
|
||||
|--------|------------|----------|--------|-------|-------|---------|-------------|------|-------|---------|-----------|-----------|
|
||||
| Commands | Slash commands | Slash commands | Slash commands | Slash commands | Skills (TOML) | Slash commands | Skills | Skills | Rules | Skills | Skills | Skills |
|
||||
| Agent format | Claude native | `mode: subagent` | Claude native | `mode: subagent` | Skills | Tool mapping | Skills | Skills | Rules | Skills | Skills | Skills |
|
||||
| Hook events | `PostToolUse` | N/A | `AfterTool` | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A |
|
||||
| Config | `settings.json` | `opencode.json(c)` | `settings.json` | `kilo.json(c)` | TOML | Instructions | Config | Config | `.clinerules` | Config | Config | Config |
|
||||
|
||||
---
|
||||
|
||||
@@ -2179,3 +2193,231 @@ Test suite that scans all agent, workflow, and command files for embedded inject
|
||||
- REQ-PATCH-VERIFY-01: Reapply-patches MUST verify each hunk was applied after the merge
|
||||
- REQ-PATCH-VERIFY-02: Dropped or partial hunks MUST be reported to the user with file and line context
|
||||
- REQ-PATCH-VERIFY-03: Verification MUST run after all patches are applied, not per-patch
|
||||
|
||||
---
|
||||
|
||||
## v1.35.0 Features
|
||||
|
||||
- [New Runtime Support (Cline, CodeBuddy, Qwen Code)](#104-new-runtime-support-cline-codebuddy-qwen-code)
|
||||
- [GSD-2 Reverse Migration](#105-gsd-2-reverse-migration)
|
||||
- [AI Integration Phase Wizard](#106-ai-integration-phase-wizard)
|
||||
- [AI Eval Review](#107-ai-eval-review)
|
||||
|
||||
---
|
||||
|
||||
### 104. New Runtime Support (Cline, CodeBuddy, Qwen Code)
|
||||
|
||||
**Part of:** `npx get-shit-done-cc`
|
||||
|
||||
**Purpose:** Extend GSD installation to Cline, CodeBuddy, and Qwen Code runtimes.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-CLINE-02: Cline install MUST write `.clinerules` to `~/.cline/` (global) or `./.cline/` (local). No custom slash commands — rules-based integration only. Flag: `--cline`.
|
||||
- REQ-CODEBUDDY-01: CodeBuddy install MUST deploy skills to `~/.codebuddy/skills/gsd-*/SKILL.md`. Flag: `--codebuddy`.
|
||||
- REQ-QWEN-01: Qwen Code install MUST deploy skills to `~/.qwen/skills/gsd-*/SKILL.md`, following the open standard used by Claude Code 2.1.88+. `QWEN_CONFIG_DIR` env var overrides the default path. Flag: `--qwen`.
|
||||
|
||||
**Runtime summary:**
|
||||
|
||||
| Runtime | Install Format | Config Path | Flag |
|
||||
|---------|---------------|-------------|------|
|
||||
| Cline | `.clinerules` | `~/.cline/` or `./.cline/` | `--cline` |
|
||||
| CodeBuddy | Skills (`SKILL.md`) | `~/.codebuddy/skills/` | `--codebuddy` |
|
||||
| Qwen Code | Skills (`SKILL.md`) | `~/.qwen/skills/` | `--qwen` |
|
||||
|
||||
---
|
||||
|
||||
### 105. GSD-2 Reverse Migration
|
||||
|
||||
**Command:** `/gsd-from-gsd2 [--dry-run] [--force] [--path <dir>]`
|
||||
|
||||
**Purpose:** Migrate a project from GSD-2 format (`.gsd/` directory with Milestone→Slice→Task hierarchy) back to the v1 `.planning/` format, restoring full compatibility with all GSD v1 commands.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-FROM-GSD2-01: Importer MUST read `.gsd/` from the specified or current directory
|
||||
- REQ-FROM-GSD2-02: Milestone→Slice hierarchy MUST be flattened to sequential phase numbers (M001/S01→phase 01, M001/S02→phase 02, M002/S01→phase 03, etc.)
|
||||
- REQ-FROM-GSD2-03: System MUST guard against overwriting an existing `.planning/` directory without `--force`
|
||||
- REQ-FROM-GSD2-04: `--dry-run` MUST preview all changes without writing any files
|
||||
- REQ-FROM-GSD2-05: Migration MUST produce `PROJECT.md`, `REQUIREMENTS.md`, `ROADMAP.md`, `STATE.md`, and sequential phase directories
|
||||
|
||||
**Flags:**
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--dry-run` | Preview migration output without writing files |
|
||||
| `--force` | Overwrite an existing `.planning/` directory |
|
||||
| `--path <dir>` | Specify the GSD-2 root directory |
|
||||
|
||||
---
|
||||
|
||||
### 106. AI Integration Phase Wizard
|
||||
|
||||
**Command:** `/gsd-ai-integration-phase [N]`
|
||||
|
||||
**Purpose:** Guide developers through selecting, integrating, and planning evaluation for AI/LLM capabilities in a project phase. Produces a structured `AI-SPEC.md` that feeds into planning and verification.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-AISPEC-01: Wizard MUST present an interactive decision matrix covering framework selection, model choice, and integration approach
|
||||
- REQ-AISPEC-02: System MUST surface domain-specific failure modes and eval criteria relevant to the project type
|
||||
- REQ-AISPEC-03: System MUST spawn 3 parallel specialist agents: domain-researcher, framework-selector, and eval-planner
|
||||
- REQ-AISPEC-04: Output MUST produce `{phase}-AI-SPEC.md` with framework recommendation, implementation guidance, and evaluation strategy
|
||||
|
||||
**Produces:** `{phase}-AI-SPEC.md` in the phase directory
|
||||
|
||||
---
|
||||
|
||||
### 107. AI Eval Review
|
||||
|
||||
**Command:** `/gsd-eval-review [N]`
|
||||
|
||||
**Purpose:** Retroactively audit an executed AI phase's evaluation coverage against the `AI-SPEC.md` plan. Identifies gaps between planned and implemented evaluation before the phase is closed.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-EVALREVIEW-01: Review MUST read `AI-SPEC.md` from the specified phase
|
||||
- REQ-EVALREVIEW-02: Each eval dimension MUST be scored as COVERED, PARTIAL, or MISSING
|
||||
- REQ-EVALREVIEW-03: Output MUST include findings, gap descriptions, and remediation guidance
|
||||
- REQ-EVALREVIEW-04: `EVAL-REVIEW.md` MUST be written to the phase directory
|
||||
|
||||
**Produces:** `{phase}-EVAL-REVIEW.md` with scored eval dimensions, gap analysis, and remediation steps
|
||||
|
||||
---
|
||||
|
||||
## v1.36.0 Features
|
||||
|
||||
### 108. Plan Bounce
|
||||
|
||||
**Command:** `/gsd-plan-phase N --bounce`
|
||||
|
||||
**Purpose:** After plans pass the checker, optionally refine them through an external script (a second AI, a linter, a custom validator). The bounce step backs up each plan, runs the script, validates YAML frontmatter integrity on the result, re-runs the plan checker, and restores the original if anything fails.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-BOUNCE-01: `--bounce` flag or `workflow.plan_bounce: true` activates the step; `--skip-bounce` always disables it
|
||||
- REQ-BOUNCE-02: `workflow.plan_bounce_script` must point to a valid executable; missing script produces a warning and skips
|
||||
- REQ-BOUNCE-03: Each plan is backed up to `*-PLAN.pre-bounce.md` before the script runs
|
||||
- REQ-BOUNCE-04: Bounced plans with broken YAML frontmatter or that fail the plan checker are restored from backup
|
||||
- REQ-BOUNCE-05: `workflow.plan_bounce_passes` (default: 2) controls how many refinement passes the script receives
|
||||
|
||||
**Configuration:** `workflow.plan_bounce`, `workflow.plan_bounce_script`, `workflow.plan_bounce_passes`
|
||||
|
||||
---
|
||||
|
||||
### 109. External Code Review Command
|
||||
|
||||
**Command:** `/gsd-ship` (enhanced)
|
||||
|
||||
**Purpose:** Before the manual review step in `/gsd-ship`, automatically run an external code review command if configured. The command receives the diff and phase context via stdin and returns a JSON verdict (`APPROVED` or `REVISE`). Falls through to the existing manual review flow regardless of outcome.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-EXTREVIEW-01: `workflow.code_review_command` must be set to a command string; null means skip
|
||||
- REQ-EXTREVIEW-02: Diff is generated against `BASE_BRANCH` with `--stat` summary included
|
||||
- REQ-EXTREVIEW-03: Review prompt is piped via stdin (never shell-interpolated)
|
||||
- REQ-EXTREVIEW-04: 120-second timeout; stderr captured on failure
|
||||
- REQ-EXTREVIEW-05: JSON output parsed for `verdict`, `confidence`, `summary`, `issues` fields
|
||||
|
||||
**Configuration:** `workflow.code_review_command`
|
||||
|
||||
---
|
||||
|
||||
### 110. Cross-AI Execution Delegation
|
||||
|
||||
**Command:** `/gsd-execute-phase N --cross-ai`
|
||||
|
||||
**Purpose:** Delegate individual plans to an external AI runtime for execution. Plans with `cross_ai: true` in their frontmatter (or all plans when `--cross-ai` is used) are sent to the configured command via stdin. Successfully handled plans are removed from the normal executor queue.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-CROSSAI-01: `--cross-ai` forces all plans through cross-AI; `--no-cross-ai` disables it
|
||||
- REQ-CROSSAI-02: `workflow.cross_ai_execution: true` and plan frontmatter `cross_ai: true` required for per-plan activation
|
||||
- REQ-CROSSAI-03: Task prompt is piped via stdin to prevent injection
|
||||
- REQ-CROSSAI-04: Dirty working tree produces a warning before execution
|
||||
- REQ-CROSSAI-05: On failure, user chooses: retry, skip (fall back to normal executor), or abort
|
||||
|
||||
**Configuration:** `workflow.cross_ai_execution`, `workflow.cross_ai_command`, `workflow.cross_ai_timeout`
|
||||
|
||||
---
|
||||
|
||||
### 111. Architectural Responsibility Mapping
|
||||
|
||||
**Command:** `/gsd-plan-phase` (enhanced research step)
|
||||
|
||||
**Purpose:** During phase research, the phase-researcher now maps each capability to its architectural tier owner (browser, frontend server, API, CDN/static, database). The planner cross-references tasks against this map, and the plan-checker enforces tier compliance as Dimension 7c.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-ARM-01: Phase researcher produces an Architectural Responsibility Map table in RESEARCH.md (Step 1.5)
|
||||
- REQ-ARM-02: Planner sanity-checks task-to-tier assignments against the map
|
||||
- REQ-ARM-03: Plan checker validates tier compliance as Dimension 7c (WARNING for general mismatches, BLOCKER for security-sensitive ones)
|
||||
|
||||
**Produces:** `## Architectural Responsibility Map` section in `{phase}-RESEARCH.md`
|
||||
|
||||
---
|
||||
|
||||
### 112. Extract Learnings
|
||||
|
||||
**Command:** `/gsd-extract-learnings N`
|
||||
|
||||
**Purpose:** Extract structured knowledge from completed phase artifacts. Reads PLAN.md and SUMMARY.md (required) plus VERIFICATION.md, UAT.md, and STATE.md (optional) to produce four categories of learnings: decisions, lessons, patterns, and surprises. Optionally captures each item to an external knowledge base via `capture_thought` tool.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-LEARN-01: Requires PLAN.md and SUMMARY.md; exits with clear error if missing
|
||||
- REQ-LEARN-02: Each extracted item includes source attribution (artifact and section)
|
||||
- REQ-LEARN-03: If `capture_thought` tool is available, captures items with `source`, `project`, and `phase` metadata
|
||||
- REQ-LEARN-04: If `capture_thought` is unavailable, completes successfully and logs that external capture was skipped
|
||||
- REQ-LEARN-05: Running twice overwrites the previous `LEARNINGS.md`
|
||||
|
||||
**Produces:** `{phase}-LEARNINGS.md` with YAML frontmatter (phase, project, counts per category, missing_artifacts)
|
||||
|
||||
---
|
||||
|
||||
### 113. SDK Workstream Support
|
||||
|
||||
**Command:** `gsd-sdk init @prd.md --ws my-workstream`
|
||||
|
||||
**Purpose:** Route all SDK `.planning/` paths to `.planning/workstreams/<name>/`, enabling multi-workstream projects without "Project already exists" errors. The `--ws` flag validates the workstream name and propagates to all subsystems (tools, config, context engine).
|
||||
|
||||
**Requirements:**
|
||||
- REQ-WS-01: `--ws <name>` routes all `.planning/` paths to `.planning/workstreams/<name>/`
|
||||
- REQ-WS-02: Without `--ws`, behavior is unchanged (flat mode)
|
||||
- REQ-WS-03: Name validated to alphanumeric, hyphens, underscores, and dots only
|
||||
- REQ-WS-04: Config resolves from workstream path first, falls back to root `.planning/config.json`
|
||||
|
||||
---
|
||||
|
||||
### 114. Context-Window-Aware Prompt Thinning
|
||||
|
||||
**Purpose:** Reduce static prompt overhead by ~40% for models with context windows under 200K tokens. Extended examples and anti-pattern lists are extracted from agent definitions into reference files loaded on demand via `@` required_reading.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-THIN-01: When `CONTEXT_WINDOW < 200000`, executor and planner agent prompts omit inline examples
|
||||
- REQ-THIN-02: Extracted content lives in `references/executor-examples.md` and `references/planner-antipatterns.md`
|
||||
- REQ-THIN-03: Standard (200K-500K) and enriched (500K+) tiers are unaffected
|
||||
- REQ-THIN-04: Core rules and decision logic remain inline; only verbose examples are extracted
|
||||
|
||||
**Reference files:** `executor-examples.md`, `planner-antipatterns.md`
|
||||
|
||||
---
|
||||
|
||||
### 115. Configurable CLAUDE.md Path
|
||||
|
||||
**Purpose:** Allow projects to store their CLAUDE.md in a non-root location. The `claude_md_path` config key controls where `/gsd-profile-user` and related commands write the generated CLAUDE.md file.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-CMDPATH-01: `claude_md_path` defaults to `./CLAUDE.md`
|
||||
- REQ-CMDPATH-02: Profile generation commands read the path from config and write to the specified location
|
||||
- REQ-CMDPATH-03: Relative paths are resolved from the project root
|
||||
|
||||
**Configuration:** `claude_md_path`
|
||||
|
||||
---
|
||||
|
||||
### 116. TDD Pipeline Mode
|
||||
|
||||
**Purpose:** Opt-in TDD (red-green-refactor) as a first-class phase execution mode. When enabled, the planner aggressively selects `type: tdd` for eligible tasks and the executor enforces RED/GREEN/REFACTOR gate sequence with fail-fast on unexpected GREEN before RED.
|
||||
|
||||
**Requirements:**
|
||||
- REQ-TDD-01: `workflow.tdd_mode` config key (boolean, default `false`)
|
||||
- REQ-TDD-02: When enabled, planner applies TDD heuristics from `references/tdd.md` to all eligible tasks (business logic, APIs, validations, algorithms, state machines)
|
||||
- REQ-TDD-03: Executor enforces gate sequence for `type: tdd` plans — RED commit (`test(...)`) must precede GREEN commit (`feat(...)`)
|
||||
- REQ-TDD-04: Executor fails fast if tests pass unexpectedly during RED phase (feature already exists or test is wrong)
|
||||
- REQ-TDD-05: End-of-phase collaborative review checkpoint verifies gate compliance across all TDD plans (advisory, non-blocking)
|
||||
- REQ-TDD-06: Gate violations surfaced in SUMMARY.md under `## TDD Gate Compliance` section
|
||||
|
||||
**Configuration:** `workflow.tdd_mode`
|
||||
**Reference files:** `tdd.md`, `checkpoints.md`
|
||||
|
||||
@@ -868,6 +868,40 @@ The installer auto-configures `resolve_model_ids: "omit"` for Gemini CLI, OpenCo
|
||||
|
||||
See the [Configuration Reference](CONFIGURATION.md#non-claude-runtimes-codex-opencode-gemini-cli-kilo) for the full explanation.
|
||||
|
||||
### Installing for Cline
|
||||
|
||||
Cline uses a rules-based integration — GSD installs as `.clinerules` rather than slash commands.
|
||||
|
||||
```bash
|
||||
# Global install (applies to all projects)
|
||||
npx get-shit-done-cc --cline --global
|
||||
|
||||
# Local install (this project only)
|
||||
npx get-shit-done-cc --cline --local
|
||||
```
|
||||
|
||||
Global installs write to `~/.cline/`. Local installs write to `./.cline/`. No custom slash commands are registered — GSD rules are loaded automatically by Cline from the rules file.
|
||||
|
||||
### Installing for CodeBuddy
|
||||
|
||||
CodeBuddy uses a skills-based integration.
|
||||
|
||||
```bash
|
||||
npx get-shit-done-cc --codebuddy --global
|
||||
```
|
||||
|
||||
Skills are installed to `~/.codebuddy/skills/gsd-*/SKILL.md`.
|
||||
|
||||
### Installing for Qwen Code
|
||||
|
||||
Qwen Code uses the same open skills standard as Claude Code 2.1.88+.
|
||||
|
||||
```bash
|
||||
npx get-shit-done-cc --qwen --global
|
||||
```
|
||||
|
||||
Skills are installed to `~/.qwen/skills/gsd-*/SKILL.md`. Use the `QWEN_CONFIG_DIR` environment variable to override the default install path.
|
||||
|
||||
### Using Claude Code with Non-Anthropic Providers (OpenRouter, Local)
|
||||
|
||||
If GSD subagents call Anthropic models and you're paying through OpenRouter or a local provider, switch to the `inherit` profile: `/gsd-set-profile inherit`. This makes all agents use your current session model instead of specific Anthropic models. See also `/gsd-settings` → Model Profile → Inherit.
|
||||
|
||||
@@ -1049,9 +1049,9 @@ fix(03-01): correct auth token expiry
|
||||
|
||||
### 42. クロス AI ピアレビュー
|
||||
|
||||
**コマンド:** `/gsd-review --phase N [--gemini] [--claude] [--codex] [--coderabbit] [--all]`
|
||||
**コマンド:** `/gsd-review --phase N [--gemini] [--claude] [--codex] [--coderabbit] [--opencode] [--qwen] [--cursor] [--all]`
|
||||
|
||||
**目的:** 外部の AI CLI(Gemini、Claude、Codex、CodeRabbit)を呼び出して、フェーズプランを独立してレビューします。レビュアーごとのフィードバックを含む構造化された REVIEWS.md を生成します。
|
||||
**目的:** 外部の AI CLI(Gemini、Claude、Codex、CodeRabbit、OpenCode、Qwen Code、Cursor)を呼び出して、フェーズプランを独立してレビューします。レビュアーごとのフィードバックを含む構造化された REVIEWS.md を生成します。
|
||||
|
||||
**要件:**
|
||||
- REQ-REVIEW-01: システムはシステム上で利用可能な AI CLI を検出しなければならない
|
||||
|
||||
@@ -1049,9 +1049,9 @@ fix(03-01): correct auth token expiry
|
||||
|
||||
### 42. Cross-AI Peer Review
|
||||
|
||||
**명령어:** `/gsd-review --phase N [--gemini] [--claude] [--codex] [--coderabbit] [--all]`
|
||||
**명령어:** `/gsd-review --phase N [--gemini] [--claude] [--codex] [--coderabbit] [--opencode] [--qwen] [--cursor] [--all]`
|
||||
|
||||
**목적:** 외부 AI CLI(Gemini, Claude, Codex, CodeRabbit)를 호출하여 페이즈 계획을 독립적으로 검토합니다. 검토자별 피드백이 담긴 구조화된 REVIEWS.md를 생성합니다.
|
||||
**목적:** 외부 AI CLI(Gemini, Claude, Codex, CodeRabbit, OpenCode, Qwen Code, Cursor)를 호출하여 페이즈 계획을 독립적으로 검토합니다. 검토자별 피드백이 담긴 구조화된 REVIEWS.md를 생성합니다.
|
||||
|
||||
**요구사항.**
|
||||
- REQ-REVIEW-01: 시스템에서 사용 가능한 AI CLI를 감지해야 합니다.
|
||||
|
||||
@@ -154,6 +154,10 @@
|
||||
* learnings copy Copy from current project's LEARNINGS.md
|
||||
* learnings prune --older-than <dur> Remove entries older than duration (e.g. 90d)
|
||||
* learnings delete <id> Delete a learning by ID
|
||||
*
|
||||
* GSD-2 Migration:
|
||||
* from-gsd2 [--path <dir>] [--force] [--dry-run]
|
||||
* Import a GSD-2 (.gsd/) project back to GSD v1 (.planning/) format
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
@@ -466,6 +470,9 @@ async function runCommand(command, args, cwd, raw, defaultValue) {
|
||||
} else if (subcommand === 'sync') {
|
||||
const { verify } = parseNamedArgs(args, [], ['verify']);
|
||||
state.cmdStateSync(cwd, { verify }, raw);
|
||||
} else if (subcommand === 'prune') {
|
||||
const { 'keep-recent': keepRecent, 'dry-run': dryRun } = parseNamedArgs(args, ['keep-recent'], ['dry-run']);
|
||||
state.cmdStatePrune(cwd, { keepRecent: keepRecent || '3', dryRun: !!dryRun }, raw);
|
||||
} else {
|
||||
state.cmdStateLoad(cwd, raw);
|
||||
}
|
||||
@@ -634,6 +641,11 @@ async function runCommand(command, args, cwd, raw, defaultValue) {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'skill-manifest': {
|
||||
init.cmdSkillManifest(cwd, args, raw);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'history-digest': {
|
||||
commands.cmdHistoryDigest(cwd, raw);
|
||||
break;
|
||||
@@ -795,13 +807,13 @@ async function runCommand(command, args, cwd, raw, defaultValue) {
|
||||
const workflow = args[1];
|
||||
switch (workflow) {
|
||||
case 'execute-phase': {
|
||||
const { validate: epValidate } = parseNamedArgs(args, [], ['validate']);
|
||||
init.cmdInitExecutePhase(cwd, args[2], raw, { validate: epValidate });
|
||||
const { validate: epValidate, tdd: epTdd } = parseNamedArgs(args, [], ['validate', 'tdd']);
|
||||
init.cmdInitExecutePhase(cwd, args[2], raw, { validate: epValidate, tdd: epTdd });
|
||||
break;
|
||||
}
|
||||
case 'plan-phase': {
|
||||
const { validate: ppValidate } = parseNamedArgs(args, [], ['validate']);
|
||||
init.cmdInitPlanPhase(cwd, args[2], raw, { validate: ppValidate });
|
||||
const { validate: ppValidate, tdd: ppTdd } = parseNamedArgs(args, [], ['validate', 'tdd']);
|
||||
init.cmdInitPlanPhase(cwd, args[2], raw, { validate: ppValidate, tdd: ppTdd });
|
||||
break;
|
||||
}
|
||||
case 'new-project':
|
||||
@@ -1070,6 +1082,14 @@ async function runCommand(command, args, cwd, raw, defaultValue) {
|
||||
break;
|
||||
}
|
||||
|
||||
// ─── GSD-2 Reverse Migration ───────────────────────────────────────────
|
||||
|
||||
case 'from-gsd2': {
|
||||
const gsd2Import = require('./lib/gsd2-import.cjs');
|
||||
gsd2Import.cmdFromGsd2(args.slice(1), cwd, raw);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
error(`Unknown command: ${command}`);
|
||||
}
|
||||
|
||||
@@ -17,17 +17,26 @@ const VALID_CONFIG_KEYS = new Set([
|
||||
'workflow.research', 'workflow.plan_check', 'workflow.verifier',
|
||||
'workflow.nyquist_validation', 'workflow.ai_integration_phase', 'workflow.ui_phase', 'workflow.ui_safety_gate',
|
||||
'workflow.auto_advance', 'workflow.node_repair', 'workflow.node_repair_budget',
|
||||
'workflow.tdd_mode',
|
||||
'workflow.text_mode',
|
||||
'workflow.research_before_questions',
|
||||
'workflow.discuss_mode',
|
||||
'workflow.skip_discuss',
|
||||
'workflow.auto_prune_state',
|
||||
'workflow._auto_chain_active',
|
||||
'workflow.use_worktrees',
|
||||
'workflow.code_review',
|
||||
'workflow.code_review_depth',
|
||||
'workflow.code_review_command',
|
||||
'workflow.pattern_mapper',
|
||||
'workflow.plan_bounce',
|
||||
'workflow.plan_bounce_script',
|
||||
'workflow.plan_bounce_passes',
|
||||
'git.branching_strategy', 'git.base_branch', 'git.phase_branch_template', 'git.milestone_branch_template', 'git.quick_branch_template',
|
||||
'planning.commit_docs', 'planning.search_gitignored',
|
||||
'workflow.cross_ai_execution', 'workflow.cross_ai_command', 'workflow.cross_ai_timeout',
|
||||
'workflow.subagent_timeout',
|
||||
'workflow.inline_plan_threshold',
|
||||
'hooks.context_warnings',
|
||||
'features.thinking_partner',
|
||||
'context',
|
||||
@@ -37,6 +46,7 @@ const VALID_CONFIG_KEYS = new Set([
|
||||
'manager.flags.discuss', 'manager.flags.plan', 'manager.flags.execute',
|
||||
'response_language',
|
||||
'intel.enabled',
|
||||
'claude_md_path',
|
||||
]);
|
||||
|
||||
/**
|
||||
@@ -64,6 +74,7 @@ const CONFIG_KEY_SUGGESTIONS = {
|
||||
'hooks.research_questions': 'workflow.research_before_questions',
|
||||
'workflow.research_questions': 'workflow.research_before_questions',
|
||||
'workflow.codereview': 'workflow.code_review',
|
||||
'workflow.review_command': 'workflow.code_review_command',
|
||||
'workflow.review': 'workflow.code_review',
|
||||
'workflow.code_review_level': 'workflow.code_review_depth',
|
||||
'workflow.review_depth': 'workflow.code_review_depth',
|
||||
@@ -148,12 +159,19 @@ function buildNewProjectConfig(userChoices) {
|
||||
ui_phase: true,
|
||||
ui_safety_gate: true,
|
||||
ai_integration_phase: true,
|
||||
tdd_mode: false,
|
||||
text_mode: false,
|
||||
research_before_questions: false,
|
||||
discuss_mode: 'discuss',
|
||||
skip_discuss: false,
|
||||
code_review: true,
|
||||
code_review_depth: 'standard',
|
||||
code_review_command: null,
|
||||
pattern_mapper: true,
|
||||
plan_bounce: false,
|
||||
plan_bounce_script: null,
|
||||
plan_bounce_passes: 2,
|
||||
auto_prune_state: false,
|
||||
},
|
||||
hooks: {
|
||||
context_warnings: true,
|
||||
@@ -161,6 +179,7 @@ function buildNewProjectConfig(userChoices) {
|
||||
project_code: null,
|
||||
phase_naming: 'sequential',
|
||||
agent_skills: {},
|
||||
claude_md_path: './CLAUDE.md',
|
||||
};
|
||||
|
||||
// Three-level deep merge: hardcoded <- userDefaults <- choices
|
||||
|
||||
@@ -159,14 +159,25 @@ function findProjectRoot(startDir) {
|
||||
* @param {number} opts.maxAgeMs - max age in ms before removal (default: 5 min)
|
||||
* @param {boolean} opts.dirsOnly - if true, only remove directories (default: false)
|
||||
*/
|
||||
/**
|
||||
* Dedicated GSD temp directory: path.join(os.tmpdir(), 'gsd').
|
||||
* Created on first use. Keeps GSD temp files isolated from the system
|
||||
* temp directory so reap scans only GSD files (#1975).
|
||||
*/
|
||||
const GSD_TEMP_DIR = path.join(require('os').tmpdir(), 'gsd');
|
||||
|
||||
function ensureGsdTempDir() {
|
||||
fs.mkdirSync(GSD_TEMP_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
function reapStaleTempFiles(prefix = 'gsd-', { maxAgeMs = 5 * 60 * 1000, dirsOnly = false } = {}) {
|
||||
try {
|
||||
const tmpDir = require('os').tmpdir();
|
||||
ensureGsdTempDir();
|
||||
const now = Date.now();
|
||||
const entries = fs.readdirSync(tmpDir);
|
||||
const entries = fs.readdirSync(GSD_TEMP_DIR);
|
||||
for (const entry of entries) {
|
||||
if (!entry.startsWith(prefix)) continue;
|
||||
const fullPath = path.join(tmpDir, entry);
|
||||
const fullPath = path.join(GSD_TEMP_DIR, entry);
|
||||
try {
|
||||
const stat = fs.statSync(fullPath);
|
||||
if (now - stat.mtimeMs > maxAgeMs) {
|
||||
@@ -195,7 +206,8 @@ function output(result, raw, rawValue) {
|
||||
// Write to tmpfile and output the path prefixed with @file: so callers can detect it.
|
||||
if (json.length > 50000) {
|
||||
reapStaleTempFiles();
|
||||
const tmpPath = path.join(require('os').tmpdir(), `gsd-${Date.now()}.json`);
|
||||
ensureGsdTempDir();
|
||||
const tmpPath = path.join(GSD_TEMP_DIR, `gsd-${Date.now()}.json`);
|
||||
fs.writeFileSync(tmpPath, json, 'utf-8');
|
||||
data = '@file:' + tmpPath;
|
||||
} else {
|
||||
@@ -313,7 +325,7 @@ function loadConfig(cwd) {
|
||||
// Section containers that hold nested sub-keys
|
||||
'git', 'workflow', 'planning', 'hooks', 'features',
|
||||
// Internal keys loadConfig reads but config-set doesn't expose
|
||||
'model_overrides', 'agent_skills', 'context_window', 'resolve_model_ids',
|
||||
'model_overrides', 'agent_skills', 'context_window', 'resolve_model_ids', 'claude_md_path',
|
||||
// Deprecated keys (still accepted for migration, not in config-set)
|
||||
'depth', 'multiRepo',
|
||||
]);
|
||||
@@ -363,6 +375,7 @@ function loadConfig(cwd) {
|
||||
brave_search: get('brave_search') ?? defaults.brave_search,
|
||||
firecrawl: get('firecrawl') ?? defaults.firecrawl,
|
||||
exa_search: get('exa_search') ?? defaults.exa_search,
|
||||
tdd_mode: get('tdd_mode', { section: 'workflow', field: 'tdd_mode' }) ?? false,
|
||||
text_mode: get('text_mode', { section: 'workflow', field: 'text_mode' }) ?? defaults.text_mode,
|
||||
sub_repos: get('sub_repos', { section: 'planning', field: 'sub_repos' }) ?? defaults.sub_repos,
|
||||
resolve_model_ids: get('resolve_model_ids') ?? defaults.resolve_model_ids,
|
||||
@@ -374,6 +387,7 @@ function loadConfig(cwd) {
|
||||
agent_skills: parsed.agent_skills || {},
|
||||
manager: parsed.manager || {},
|
||||
response_language: get('response_language') || null,
|
||||
claude_md_path: get('claude_md_path') || null,
|
||||
};
|
||||
} catch {
|
||||
// Fall back to ~/.gsd/defaults.json only for truly pre-project contexts (#1683)
|
||||
@@ -1578,6 +1592,7 @@ module.exports = {
|
||||
findProjectRoot,
|
||||
detectSubRepos,
|
||||
reapStaleTempFiles,
|
||||
GSD_TEMP_DIR,
|
||||
MODEL_ALIAS_MAP,
|
||||
CONFIG_DEFAULTS,
|
||||
planningDir,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { safeReadFile, normalizeMd, output, error } = require('./core.cjs');
|
||||
const { safeReadFile, normalizeMd, output, error, atomicWriteFileSync } = require('./core.cjs');
|
||||
|
||||
// ─── Parsing engine ───────────────────────────────────────────────────────────
|
||||
|
||||
@@ -42,11 +42,9 @@ function splitInlineArray(body) {
|
||||
|
||||
function extractFrontmatter(content) {
|
||||
const frontmatter = {};
|
||||
// Find ALL frontmatter blocks at the start of the file.
|
||||
// If multiple blocks exist (corruption from CRLF mismatch), use the LAST one
|
||||
// since it represents the most recent state sync.
|
||||
const allBlocks = [...content.matchAll(/(?:^|\n)\s*---\r?\n([\s\S]+?)\r?\n---/g)];
|
||||
const match = allBlocks.length > 0 ? allBlocks[allBlocks.length - 1] : null;
|
||||
// Match frontmatter only at byte 0 — a `---` block later in the document
|
||||
// body (YAML examples, horizontal rules) must never be treated as frontmatter.
|
||||
const match = content.match(/^---\r?\n([\s\S]+?)\r?\n---/);
|
||||
if (!match) return frontmatter;
|
||||
|
||||
const yaml = match[1];
|
||||
@@ -337,7 +335,7 @@ function cmdFrontmatterSet(cwd, filePath, field, value, raw) {
|
||||
try { parsedValue = JSON.parse(value); } catch { parsedValue = value; }
|
||||
fm[field] = parsedValue;
|
||||
const newContent = spliceFrontmatter(content, fm);
|
||||
fs.writeFileSync(fullPath, normalizeMd(newContent), 'utf-8');
|
||||
atomicWriteFileSync(fullPath, normalizeMd(newContent));
|
||||
output({ updated: true, field, value: parsedValue }, raw, 'true');
|
||||
}
|
||||
|
||||
@@ -351,7 +349,7 @@ function cmdFrontmatterMerge(cwd, filePath, data, raw) {
|
||||
try { mergeData = JSON.parse(data); } catch { error('Invalid JSON for --data'); return; }
|
||||
Object.assign(fm, mergeData);
|
||||
const newContent = spliceFrontmatter(content, fm);
|
||||
fs.writeFileSync(fullPath, normalizeMd(newContent), 'utf-8');
|
||||
atomicWriteFileSync(fullPath, normalizeMd(newContent));
|
||||
output({ merged: true, fields: Object.keys(mergeData) }, raw, 'true');
|
||||
}
|
||||
|
||||
|
||||
511
get-shit-done/bin/lib/gsd2-import.cjs
Normal file
511
get-shit-done/bin/lib/gsd2-import.cjs
Normal file
@@ -0,0 +1,511 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* gsd2-import — Reverse migration from GSD-2 (.gsd/) to GSD v1 (.planning/)
|
||||
*
|
||||
* Reads a GSD-2 project directory structure and produces a complete
|
||||
* .planning/ artifact tree in GSD v1 format.
|
||||
*
|
||||
* GSD-2 hierarchy: Milestone → Slice → Task
|
||||
* GSD v1 hierarchy: Milestone (in ROADMAP.md) → Phase → Plan
|
||||
*
|
||||
* Mapping rules:
|
||||
* - Slices are numbered sequentially across all milestones (01, 02, …)
|
||||
* - Tasks within a slice become plans (01-01, 01-02, …)
|
||||
* - Completed slices ([x] in ROADMAP) → [x] phases in ROADMAP.md
|
||||
* - Tasks with a SUMMARY file → SUMMARY.md written
|
||||
* - Slice RESEARCH.md → phase XX-RESEARCH.md
|
||||
*/
|
||||
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
|
||||
// ─── Utilities ──────────────────────────────────────────────────────────────
|
||||
|
||||
function readOptional(filePath) {
|
||||
try { return fs.readFileSync(filePath, 'utf8'); } catch { return null; }
|
||||
}
|
||||
|
||||
function zeroPad(n, width = 2) {
|
||||
return String(n).padStart(width, '0');
|
||||
}
|
||||
|
||||
function slugify(title) {
|
||||
return title.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-|-$/g, '');
|
||||
}
|
||||
|
||||
// ─── GSD-2 Parser ───────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Find the .gsd/ directory starting from a project root.
|
||||
* Returns the absolute path or null if not found.
|
||||
*/
|
||||
function findGsd2Root(startPath) {
|
||||
if (path.basename(startPath) === '.gsd' && fs.existsSync(startPath)) {
|
||||
return startPath;
|
||||
}
|
||||
const candidate = path.join(startPath, '.gsd');
|
||||
if (fs.existsSync(candidate) && fs.statSync(candidate).isDirectory()) {
|
||||
return candidate;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the ## Slices section from a GSD-2 milestone ROADMAP.md.
|
||||
* Each slice entry looks like:
|
||||
* - [x] **S01: Title** `risk:medium` `depends:[S00]`
|
||||
*/
|
||||
function parseSlicesFromRoadmap(content) {
|
||||
const slices = [];
|
||||
const sectionMatch = content.match(/## Slices\n([\s\S]*?)(?:\n## |\n# |$)/);
|
||||
if (!sectionMatch) return slices;
|
||||
|
||||
for (const line of sectionMatch[1].split('\n')) {
|
||||
const m = line.match(/^- \[([x ])\]\s+\*\*(\w+):\s*([^*]+)\*\*/);
|
||||
if (!m) continue;
|
||||
slices.push({ done: m[1] === 'x', id: m[2].trim(), title: m[3].trim() });
|
||||
}
|
||||
return slices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the milestone title from the first heading in a GSD-2 ROADMAP.md.
|
||||
* Format: # M001: Title
|
||||
*/
|
||||
function parseMilestoneTitle(content) {
|
||||
const m = content.match(/^# \w+:\s*(.+)/m);
|
||||
return m ? m[1].trim() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a task title from a GSD-2 T##-PLAN.md.
|
||||
* Format: # T01: Title
|
||||
*/
|
||||
function parseTaskTitle(content, fallback) {
|
||||
const m = content.match(/^# \w+:\s*(.+)/m);
|
||||
return m ? m[1].trim() : fallback;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the ## Description body from a GSD-2 task plan.
|
||||
*/
|
||||
function parseTaskDescription(content) {
|
||||
const m = content.match(/## Description\n+([\s\S]+?)(?:\n## |\n# |$)/);
|
||||
return m ? m[1].trim() : '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse ## Must-Haves items from a GSD-2 task plan.
|
||||
*/
|
||||
function parseTaskMustHaves(content) {
|
||||
const m = content.match(/## Must-Haves\n+([\s\S]+?)(?:\n## |\n# |$)/);
|
||||
if (!m) return [];
|
||||
return m[1].split('\n')
|
||||
.map(l => l.match(/^- \[[ x]\]\s*(.+)/))
|
||||
.filter(Boolean)
|
||||
.map(match => match[1].trim());
|
||||
}
|
||||
|
||||
/**
|
||||
* Read all task plan files from a GSD-2 tasks/ directory.
|
||||
*/
|
||||
function readTasksDir(tasksDir) {
|
||||
if (!fs.existsSync(tasksDir)) return [];
|
||||
|
||||
return fs.readdirSync(tasksDir)
|
||||
.filter(f => f.endsWith('-PLAN.md'))
|
||||
.sort()
|
||||
.map(tf => {
|
||||
const tid = tf.replace('-PLAN.md', '');
|
||||
const plan = readOptional(path.join(tasksDir, tf));
|
||||
const summary = readOptional(path.join(tasksDir, `${tid}-SUMMARY.md`));
|
||||
return {
|
||||
id: tid,
|
||||
title: plan ? parseTaskTitle(plan, tid) : tid,
|
||||
description: plan ? parseTaskDescription(plan) : '',
|
||||
mustHaves: plan ? parseTaskMustHaves(plan) : [],
|
||||
plan,
|
||||
summary,
|
||||
done: !!summary,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a complete GSD-2 .gsd/ directory into a structured representation.
|
||||
*/
|
||||
function parseGsd2(gsdDir) {
|
||||
const data = {
|
||||
projectContent: readOptional(path.join(gsdDir, 'PROJECT.md')),
|
||||
requirements: readOptional(path.join(gsdDir, 'REQUIREMENTS.md')),
|
||||
milestones: [],
|
||||
};
|
||||
|
||||
const milestonesBase = path.join(gsdDir, 'milestones');
|
||||
if (!fs.existsSync(milestonesBase)) return data;
|
||||
|
||||
const milestoneIds = fs.readdirSync(milestonesBase)
|
||||
.filter(d => fs.statSync(path.join(milestonesBase, d)).isDirectory())
|
||||
.sort();
|
||||
|
||||
for (const mid of milestoneIds) {
|
||||
const mDir = path.join(milestonesBase, mid);
|
||||
const roadmapContent = readOptional(path.join(mDir, `${mid}-ROADMAP.md`));
|
||||
const slicesDir = path.join(mDir, 'slices');
|
||||
|
||||
const sliceInfos = roadmapContent ? parseSlicesFromRoadmap(roadmapContent) : [];
|
||||
|
||||
const slices = sliceInfos.map(info => {
|
||||
const sDir = path.join(slicesDir, info.id);
|
||||
const hasSDir = fs.existsSync(sDir);
|
||||
return {
|
||||
id: info.id,
|
||||
title: info.title,
|
||||
done: info.done,
|
||||
plan: hasSDir ? readOptional(path.join(sDir, `${info.id}-PLAN.md`)) : null,
|
||||
summary: hasSDir ? readOptional(path.join(sDir, `${info.id}-SUMMARY.md`)) : null,
|
||||
research: hasSDir ? readOptional(path.join(sDir, `${info.id}-RESEARCH.md`)) : null,
|
||||
context: hasSDir ? readOptional(path.join(sDir, `${info.id}-CONTEXT.md`)) : null,
|
||||
tasks: hasSDir ? readTasksDir(path.join(sDir, 'tasks')) : [],
|
||||
};
|
||||
});
|
||||
|
||||
data.milestones.push({
|
||||
id: mid,
|
||||
title: roadmapContent ? (parseMilestoneTitle(roadmapContent) ?? mid) : mid,
|
||||
research: readOptional(path.join(mDir, `${mid}-RESEARCH.md`)),
|
||||
slices,
|
||||
});
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
// ─── Artifact Builders ──────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Build a GSD v1 PLAN.md from a GSD-2 task.
|
||||
*/
|
||||
function buildPlanMd(task, phasePrefix, planPrefix, phaseSlug, milestoneTitle) {
|
||||
const lines = [
|
||||
'---',
|
||||
`phase: "${phasePrefix}"`,
|
||||
`plan: "${planPrefix}"`,
|
||||
'type: "implementation"',
|
||||
'---',
|
||||
'',
|
||||
'<objective>',
|
||||
task.title,
|
||||
'</objective>',
|
||||
'',
|
||||
'<context>',
|
||||
`Phase: ${phasePrefix} (${phaseSlug}) — Milestone: ${milestoneTitle}`,
|
||||
];
|
||||
|
||||
if (task.description) {
|
||||
lines.push('', task.description);
|
||||
}
|
||||
|
||||
lines.push('</context>');
|
||||
|
||||
if (task.mustHaves.length > 0) {
|
||||
lines.push('', '<must_haves>');
|
||||
for (const mh of task.mustHaves) {
|
||||
lines.push(`- ${mh}`);
|
||||
}
|
||||
lines.push('</must_haves>');
|
||||
}
|
||||
|
||||
return lines.join('\n') + '\n';
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a GSD v1 SUMMARY.md from a GSD-2 task summary.
|
||||
* Strips the GSD-2 frontmatter and preserves the body.
|
||||
*/
|
||||
function buildSummaryMd(task, phasePrefix, planPrefix) {
|
||||
const raw = task.summary || '';
|
||||
// Strip GSD-2 frontmatter block (--- ... ---) if present
|
||||
const bodyMatch = raw.match(/^---[\s\S]*?---\n+([\s\S]*)$/);
|
||||
const body = bodyMatch ? bodyMatch[1].trim() : raw.trim();
|
||||
|
||||
return [
|
||||
'---',
|
||||
`phase: "${phasePrefix}"`,
|
||||
`plan: "${planPrefix}"`,
|
||||
'---',
|
||||
'',
|
||||
body || 'Task completed (migrated from GSD-2).',
|
||||
'',
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a GSD v1 XX-CONTEXT.md from a GSD-2 slice.
|
||||
*/
|
||||
function buildContextMd(slice, phasePrefix) {
|
||||
const lines = [
|
||||
`# Phase ${phasePrefix} Context`,
|
||||
'',
|
||||
`Migrated from GSD-2 slice ${slice.id}: ${slice.title}`,
|
||||
];
|
||||
|
||||
const extra = slice.context || '';
|
||||
if (extra.trim()) {
|
||||
lines.push('', extra.trim());
|
||||
}
|
||||
|
||||
return lines.join('\n') + '\n';
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the GSD v1 ROADMAP.md with milestone-sectioned format.
|
||||
*/
|
||||
function buildRoadmapMd(milestones, phaseMap) {
|
||||
const lines = ['# Roadmap', ''];
|
||||
|
||||
for (const milestone of milestones) {
|
||||
lines.push(`## ${milestone.id}: ${milestone.title}`, '');
|
||||
const mPhases = phaseMap.filter(p => p.milestoneId === milestone.id);
|
||||
for (const { slice, phaseNum } of mPhases) {
|
||||
const prefix = zeroPad(phaseNum);
|
||||
const slug = slugify(slice.title);
|
||||
const check = slice.done ? 'x' : ' ';
|
||||
lines.push(`- [${check}] **Phase ${prefix}: ${slug}** — ${slice.title}`);
|
||||
}
|
||||
lines.push('');
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the GSD v1 STATE.md reflecting the current position in the project.
|
||||
*/
|
||||
function buildStateMd(phaseMap) {
|
||||
const currentEntry = phaseMap.find(p => !p.slice.done);
|
||||
const totalPhases = phaseMap.length;
|
||||
const donePhases = phaseMap.filter(p => p.slice.done).length;
|
||||
const pct = totalPhases > 0 ? Math.round((donePhases / totalPhases) * 100) : 0;
|
||||
|
||||
const currentPhaseNum = currentEntry ? zeroPad(currentEntry.phaseNum) : zeroPad(totalPhases);
|
||||
const currentSlug = currentEntry ? slugify(currentEntry.slice.title) : 'complete';
|
||||
const status = currentEntry ? 'Ready to plan' : 'All phases complete';
|
||||
|
||||
const filled = Math.round(pct / 10);
|
||||
const bar = `[${'█'.repeat(filled)}${'░'.repeat(10 - filled)}]`;
|
||||
const today = new Date().toISOString().split('T')[0];
|
||||
|
||||
return [
|
||||
'# Project State',
|
||||
'',
|
||||
'## Project Reference',
|
||||
'',
|
||||
'See: .planning/PROJECT.md',
|
||||
'',
|
||||
`**Current focus:** Phase ${currentPhaseNum} (${currentSlug})`,
|
||||
'',
|
||||
'## Current Position',
|
||||
'',
|
||||
`Phase: ${currentPhaseNum} of ${zeroPad(totalPhases)} (${currentSlug})`,
|
||||
`Status: ${status}`,
|
||||
`Last activity: ${today} — Migrated from GSD-2`,
|
||||
'',
|
||||
`Progress: ${bar} ${pct}%`,
|
||||
'',
|
||||
'## Accumulated Context',
|
||||
'',
|
||||
'### Decisions',
|
||||
'',
|
||||
'Migrated from GSD-2. Review PROJECT.md for key decisions.',
|
||||
'',
|
||||
'### Blockers/Concerns',
|
||||
'',
|
||||
'None.',
|
||||
'',
|
||||
'## Session Continuity',
|
||||
'',
|
||||
`Last session: ${today}`,
|
||||
'Stopped at: Migration from GSD-2 completed',
|
||||
'Resume file: None',
|
||||
'',
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
// ─── Transformer ─────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Convert parsed GSD-2 data into a map of relative path → file content.
|
||||
* All paths are relative to the .planning/ root.
|
||||
*/
|
||||
function buildPlanningArtifacts(gsd2Data) {
|
||||
const artifacts = new Map();
|
||||
|
||||
// Passthrough files
|
||||
artifacts.set('PROJECT.md', gsd2Data.projectContent || '# Project\n\n(Migrated from GSD-2)\n');
|
||||
if (gsd2Data.requirements) {
|
||||
artifacts.set('REQUIREMENTS.md', gsd2Data.requirements);
|
||||
}
|
||||
|
||||
// Minimal valid v1 config
|
||||
artifacts.set('config.json', JSON.stringify({ version: 1 }, null, 2) + '\n');
|
||||
|
||||
// Build sequential phase map: flatten Milestones → Slices into numbered phases
|
||||
const phaseMap = [];
|
||||
let phaseNum = 1;
|
||||
for (const milestone of gsd2Data.milestones) {
|
||||
for (const slice of milestone.slices) {
|
||||
phaseMap.push({ milestoneId: milestone.id, milestoneTitle: milestone.title, slice, phaseNum });
|
||||
phaseNum++;
|
||||
}
|
||||
}
|
||||
|
||||
artifacts.set('ROADMAP.md', buildRoadmapMd(gsd2Data.milestones, phaseMap));
|
||||
artifacts.set('STATE.md', buildStateMd(phaseMap));
|
||||
|
||||
for (const { slice, phaseNum, milestoneTitle } of phaseMap) {
|
||||
const prefix = zeroPad(phaseNum);
|
||||
const slug = slugify(slice.title);
|
||||
const dir = `phases/${prefix}-${slug}`;
|
||||
|
||||
artifacts.set(`${dir}/${prefix}-CONTEXT.md`, buildContextMd(slice, prefix));
|
||||
|
||||
if (slice.research) {
|
||||
artifacts.set(`${dir}/${prefix}-RESEARCH.md`, slice.research);
|
||||
}
|
||||
|
||||
for (let i = 0; i < slice.tasks.length; i++) {
|
||||
const task = slice.tasks[i];
|
||||
const planPrefix = zeroPad(i + 1);
|
||||
|
||||
artifacts.set(
|
||||
`${dir}/${prefix}-${planPrefix}-PLAN.md`,
|
||||
buildPlanMd(task, prefix, planPrefix, slug, milestoneTitle)
|
||||
);
|
||||
|
||||
if (task.done && task.summary) {
|
||||
artifacts.set(
|
||||
`${dir}/${prefix}-${planPrefix}-SUMMARY.md`,
|
||||
buildSummaryMd(task, prefix, planPrefix)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts;
|
||||
}
|
||||
|
||||
// ─── Preview ─────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Format a dry-run preview string for display before writing.
|
||||
*/
|
||||
function buildPreview(gsd2Data, artifacts) {
|
||||
const lines = ['Preview — files that will be created in .planning/:'];
|
||||
|
||||
for (const rel of artifacts.keys()) {
|
||||
lines.push(` ${rel}`);
|
||||
}
|
||||
|
||||
const totalSlices = gsd2Data.milestones.reduce((s, m) => s + m.slices.length, 0);
|
||||
const doneSlices = gsd2Data.milestones.reduce((s, m) => s + m.slices.filter(sl => sl.done).length, 0);
|
||||
const allTasks = gsd2Data.milestones.flatMap(m => m.slices.flatMap(sl => sl.tasks));
|
||||
const doneTasks = allTasks.filter(t => t.done).length;
|
||||
|
||||
lines.push('');
|
||||
lines.push(`Milestones: ${gsd2Data.milestones.length}`);
|
||||
lines.push(`Phases (slices): ${totalSlices} (${doneSlices} completed)`);
|
||||
lines.push(`Plans (tasks): ${allTasks.length} (${doneTasks} completed)`);
|
||||
lines.push('');
|
||||
lines.push('Cannot migrate automatically:');
|
||||
lines.push(' - GSD-2 cost/token ledger (no v1 equivalent)');
|
||||
lines.push(' - GSD-2 database state (rebuilt from files on first /gsd-health)');
|
||||
lines.push(' - VS Code extension state');
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
// ─── Writer ───────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Write all artifacts to the .planning/ directory.
|
||||
*/
|
||||
function writePlanningDir(artifacts, planningRoot) {
|
||||
for (const [rel, content] of artifacts) {
|
||||
const absPath = path.join(planningRoot, rel);
|
||||
fs.mkdirSync(path.dirname(absPath), { recursive: true });
|
||||
fs.writeFileSync(absPath, content, 'utf8');
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Command Handler ──────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Entry point called from gsd-tools.cjs.
|
||||
* Supports: --force, --dry-run, --path <dir>
|
||||
*/
|
||||
function cmdFromGsd2(args, cwd, raw) {
|
||||
const { output, error } = require('./core.cjs');
|
||||
|
||||
const force = args.includes('--force');
|
||||
const dryRun = args.includes('--dry-run');
|
||||
|
||||
const pathIdx = args.indexOf('--path');
|
||||
const projectDir = pathIdx >= 0 && args[pathIdx + 1]
|
||||
? path.resolve(cwd, args[pathIdx + 1])
|
||||
: cwd;
|
||||
|
||||
const gsdDir = findGsd2Root(projectDir);
|
||||
if (!gsdDir) {
|
||||
return output({ success: false, error: `No .gsd/ directory found in ${projectDir}` }, raw);
|
||||
}
|
||||
|
||||
const planningRoot = path.join(path.dirname(gsdDir), '.planning');
|
||||
if (fs.existsSync(planningRoot) && !force) {
|
||||
return output({
|
||||
success: false,
|
||||
error: `.planning/ already exists at ${planningRoot}. Pass --force to overwrite.`,
|
||||
}, raw);
|
||||
}
|
||||
|
||||
const gsd2Data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(gsd2Data);
|
||||
const preview = buildPreview(gsd2Data, artifacts);
|
||||
|
||||
if (dryRun) {
|
||||
return output({ success: true, dryRun: true, preview }, raw);
|
||||
}
|
||||
|
||||
writePlanningDir(artifacts, planningRoot);
|
||||
|
||||
return output({
|
||||
success: true,
|
||||
planningDir: planningRoot,
|
||||
filesWritten: artifacts.size,
|
||||
milestones: gsd2Data.milestones.length,
|
||||
preview,
|
||||
}, raw);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
findGsd2Root,
|
||||
parseGsd2,
|
||||
buildPlanningArtifacts,
|
||||
buildPreview,
|
||||
writePlanningDir,
|
||||
cmdFromGsd2,
|
||||
// Exported for unit tests
|
||||
parseSlicesFromRoadmap,
|
||||
parseMilestoneTitle,
|
||||
parseTaskTitle,
|
||||
parseTaskDescription,
|
||||
parseTaskMustHaves,
|
||||
buildPlanMd,
|
||||
buildSummaryMd,
|
||||
buildContextMd,
|
||||
buildRoadmapMd,
|
||||
buildStateMd,
|
||||
slugify,
|
||||
zeroPad,
|
||||
};
|
||||
@@ -88,6 +88,7 @@ function cmdInitExecutePhase(cwd, phase, raw, options = {}) {
|
||||
verifier_model: resolveModelInternal(cwd, 'gsd-verifier'),
|
||||
|
||||
// Config flags
|
||||
tdd_mode: options.tdd || config.tdd_mode || false,
|
||||
commit_docs: config.commit_docs,
|
||||
sub_repos: config.sub_repos,
|
||||
parallelization: config.parallelization,
|
||||
@@ -211,6 +212,7 @@ function cmdInitPlanPhase(cwd, phase, raw, options = {}) {
|
||||
checker_model: resolveModelInternal(cwd, 'gsd-plan-checker'),
|
||||
|
||||
// Workflow flags
|
||||
tdd_mode: options.tdd || config.tdd_mode || false,
|
||||
research_enabled: config.research,
|
||||
plan_checker_enabled: config.plan_checker,
|
||||
nyquist_validation_enabled: config.nyquist_validation,
|
||||
@@ -241,6 +243,9 @@ function cmdInitPlanPhase(cwd, phase, raw, options = {}) {
|
||||
state_path: toPosixPath(path.relative(cwd, path.join(planningDir(cwd), 'STATE.md'))),
|
||||
roadmap_path: toPosixPath(path.relative(cwd, path.join(planningDir(cwd), 'ROADMAP.md'))),
|
||||
requirements_path: toPosixPath(path.relative(cwd, path.join(planningDir(cwd), 'REQUIREMENTS.md'))),
|
||||
|
||||
// Pattern mapper output (null until PATTERNS.md exists in phase dir)
|
||||
patterns_path: null,
|
||||
};
|
||||
|
||||
if (phaseInfo?.directory) {
|
||||
@@ -268,6 +273,10 @@ function cmdInitPlanPhase(cwd, phase, raw, options = {}) {
|
||||
if (reviewsFile) {
|
||||
result.reviews_path = toPosixPath(path.join(phaseInfo.directory, reviewsFile));
|
||||
}
|
||||
const patternsFile = files.find(f => f.endsWith('-PATTERNS.md') || f === 'PATTERNS.md');
|
||||
if (patternsFile) {
|
||||
result.patterns_path = toPosixPath(path.join(phaseInfo.directory, patternsFile));
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
|
||||
@@ -1095,7 +1104,9 @@ function cmdInitManager(cwd, raw) {
|
||||
return true;
|
||||
});
|
||||
|
||||
const completedCount = phases.filter(p => p.disk_status === 'complete').length;
|
||||
// Exclude backlog phases (999.x) from completion accounting (#2129)
|
||||
const nonBacklogPhases = phases.filter(p => !/^999(?:\.|$)/.test(p.number));
|
||||
const completedCount = nonBacklogPhases.filter(p => p.disk_status === 'complete').length;
|
||||
|
||||
// Read manager flags from config (passthrough flags for each step)
|
||||
// Validate: flags must be CLI-safe (only --flags, alphanumeric, hyphens, spaces)
|
||||
@@ -1126,7 +1137,7 @@ function cmdInitManager(cwd, raw) {
|
||||
in_progress_count: phases.filter(p => ['partial', 'planned', 'discussed', 'researched'].includes(p.disk_status)).length,
|
||||
recommended_actions: filteredActions,
|
||||
waiting_signal: waitingSignal,
|
||||
all_complete: completedCount === phases.length && phases.length > 0,
|
||||
all_complete: completedCount === nonBacklogPhases.length && nonBacklogPhases.length > 0,
|
||||
project_exists: pathExistsInternal(cwd, '.planning/PROJECT.md'),
|
||||
roadmap_exists: true,
|
||||
state_exists: true,
|
||||
@@ -1456,6 +1467,8 @@ function cmdInitRemoveWorkspace(cwd, name, raw) {
|
||||
*/
|
||||
function buildAgentSkillsBlock(config, agentType, projectRoot) {
|
||||
const { validatePath } = require('./security.cjs');
|
||||
const os = require('os');
|
||||
const globalSkillsBase = path.join(os.homedir(), '.claude', 'skills');
|
||||
|
||||
if (!config || !config.agent_skills || !agentType) return '';
|
||||
|
||||
@@ -1470,6 +1483,37 @@ function buildAgentSkillsBlock(config, agentType, projectRoot) {
|
||||
for (const skillPath of skillPaths) {
|
||||
if (typeof skillPath !== 'string') continue;
|
||||
|
||||
// Support global: prefix for skills installed at ~/.claude/skills/ (#1992)
|
||||
if (skillPath.startsWith('global:')) {
|
||||
const skillName = skillPath.slice(7);
|
||||
// Explicit empty-name guard before regex for clearer error message
|
||||
if (!skillName) {
|
||||
process.stderr.write(`[agent-skills] WARNING: "global:" prefix with empty skill name — skipping\n`);
|
||||
continue;
|
||||
}
|
||||
// Sanitize: skill name must be alphanumeric, hyphens, or underscores only
|
||||
if (!/^[a-zA-Z0-9_-]+$/.test(skillName)) {
|
||||
process.stderr.write(`[agent-skills] WARNING: Invalid global skill name "${skillName}" — skipping\n`);
|
||||
continue;
|
||||
}
|
||||
const globalSkillDir = path.join(globalSkillsBase, skillName);
|
||||
const globalSkillMd = path.join(globalSkillDir, 'SKILL.md');
|
||||
if (!fs.existsSync(globalSkillMd)) {
|
||||
process.stderr.write(`[agent-skills] WARNING: Global skill not found at "~/.claude/skills/${skillName}/SKILL.md" — skipping\n`);
|
||||
continue;
|
||||
}
|
||||
// Symlink escape guard: validatePath resolves symlinks and enforces
|
||||
// containment within globalSkillsBase. Prevents a skill directory
|
||||
// symlinked to an arbitrary location from being injected (#1992).
|
||||
const pathCheck = validatePath(globalSkillMd, globalSkillsBase, { allowAbsolute: true });
|
||||
if (!pathCheck.safe) {
|
||||
process.stderr.write(`[agent-skills] WARNING: Global skill "${skillName}" failed path check (symlink escape?) — skipping\n`);
|
||||
continue;
|
||||
}
|
||||
validPaths.push({ ref: `${globalSkillDir}/SKILL.md`, display: `~/.claude/skills/${skillName}` });
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate path safety — must resolve within project root
|
||||
const pathCheck = validatePath(skillPath, projectRoot);
|
||||
if (!pathCheck.safe) {
|
||||
@@ -1484,12 +1528,12 @@ function buildAgentSkillsBlock(config, agentType, projectRoot) {
|
||||
continue;
|
||||
}
|
||||
|
||||
validPaths.push(skillPath);
|
||||
validPaths.push({ ref: `${skillPath}/SKILL.md`, display: skillPath });
|
||||
}
|
||||
|
||||
if (validPaths.length === 0) return '';
|
||||
|
||||
const lines = validPaths.map(p => `- @${p}/SKILL.md`).join('\n');
|
||||
const lines = validPaths.map(p => `- @${p.ref}`).join('\n');
|
||||
return `<agent_skills>\nRead these user-configured skills:\n${lines}\n</agent_skills>`;
|
||||
}
|
||||
|
||||
@@ -1513,6 +1557,105 @@ function cmdAgentSkills(cwd, agentType, raw) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a skill manifest from a skills directory.
|
||||
*
|
||||
* Scans the given skills directory for subdirectories containing SKILL.md,
|
||||
* extracts frontmatter (name, description) and trigger conditions from the
|
||||
* body text, and returns an array of skill descriptors.
|
||||
*
|
||||
* @param {string} skillsDir - Absolute path to the skills directory
|
||||
* @returns {Array<{name: string, description: string, triggers: string[], path: string}>}
|
||||
*/
|
||||
function buildSkillManifest(skillsDir) {
|
||||
const { extractFrontmatter } = require('./frontmatter.cjs');
|
||||
|
||||
if (!fs.existsSync(skillsDir)) return [];
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = fs.readdirSync(skillsDir, { withFileTypes: true });
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
||||
const manifest = [];
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const skillMdPath = path.join(skillsDir, entry.name, 'SKILL.md');
|
||||
if (!fs.existsSync(skillMdPath)) continue;
|
||||
|
||||
let content;
|
||||
try {
|
||||
content = fs.readFileSync(skillMdPath, 'utf-8');
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
const frontmatter = extractFrontmatter(content);
|
||||
const name = frontmatter.name || entry.name;
|
||||
const description = frontmatter.description || '';
|
||||
|
||||
// Extract trigger lines from body text (after frontmatter)
|
||||
const triggers = [];
|
||||
const bodyMatch = content.match(/^---[\s\S]*?---\s*\n([\s\S]*)$/);
|
||||
if (bodyMatch) {
|
||||
const body = bodyMatch[1];
|
||||
const triggerLines = body.match(/^TRIGGER\s+when:\s*(.+)$/gmi);
|
||||
if (triggerLines) {
|
||||
for (const line of triggerLines) {
|
||||
const m = line.match(/^TRIGGER\s+when:\s*(.+)$/i);
|
||||
if (m) triggers.push(m[1].trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
manifest.push({
|
||||
name,
|
||||
description,
|
||||
triggers,
|
||||
path: entry.name,
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by name for deterministic output
|
||||
manifest.sort((a, b) => a.name.localeCompare(b.name));
|
||||
return manifest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command: generate skill manifest JSON.
|
||||
*
|
||||
* Options:
|
||||
* --skills-dir <path> Path to skills directory (required)
|
||||
* --write Also write to .planning/skill-manifest.json
|
||||
*/
|
||||
function cmdSkillManifest(cwd, args, raw) {
|
||||
const skillsDirIdx = args.indexOf('--skills-dir');
|
||||
const skillsDir = skillsDirIdx >= 0 && args[skillsDirIdx + 1]
|
||||
? args[skillsDirIdx + 1]
|
||||
: null;
|
||||
|
||||
if (!skillsDir) {
|
||||
output([], raw);
|
||||
return;
|
||||
}
|
||||
|
||||
const manifest = buildSkillManifest(skillsDir);
|
||||
|
||||
// Optionally write to .planning/skill-manifest.json
|
||||
if (args.includes('--write')) {
|
||||
const planningDir = path.join(cwd, '.planning');
|
||||
if (fs.existsSync(planningDir)) {
|
||||
const manifestPath = path.join(planningDir, 'skill-manifest.json');
|
||||
fs.writeFileSync(manifestPath, JSON.stringify(manifest, null, 2), 'utf-8');
|
||||
}
|
||||
}
|
||||
|
||||
output(manifest, raw);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
cmdInitExecutePhase,
|
||||
cmdInitPlanPhase,
|
||||
@@ -1533,4 +1676,6 @@ module.exports = {
|
||||
detectChildRepos,
|
||||
buildAgentSkillsBlock,
|
||||
cmdAgentSkills,
|
||||
buildSkillManifest,
|
||||
cmdSkillManifest,
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { escapeRegex, getMilestonePhaseFilter, extractOneLinerFromBody, normalizeMd, planningPaths, output, error } = require('./core.cjs');
|
||||
const { escapeRegex, getMilestonePhaseFilter, extractOneLinerFromBody, normalizeMd, planningPaths, output, error, atomicWriteFileSync } = require('./core.cjs');
|
||||
const { extractFrontmatter } = require('./frontmatter.cjs');
|
||||
const { writeStateMd, stateReplaceFieldWithFallback } = require('./state.cjs');
|
||||
|
||||
@@ -74,7 +74,7 @@ function cmdRequirementsMarkComplete(cwd, reqIdsRaw, raw) {
|
||||
}
|
||||
|
||||
if (updated.length > 0) {
|
||||
fs.writeFileSync(reqPath, reqContent, 'utf-8');
|
||||
atomicWriteFileSync(reqPath, reqContent);
|
||||
}
|
||||
|
||||
output({
|
||||
@@ -178,21 +178,21 @@ function cmdMilestoneComplete(cwd, version, options, raw) {
|
||||
const existing = fs.readFileSync(milestonesPath, 'utf-8');
|
||||
if (!existing.trim()) {
|
||||
// Empty file — treat like new
|
||||
fs.writeFileSync(milestonesPath, normalizeMd(`# Milestones\n\n${milestoneEntry}`), 'utf-8');
|
||||
atomicWriteFileSync(milestonesPath, normalizeMd(`# Milestones\n\n${milestoneEntry}`));
|
||||
} else {
|
||||
// Insert after the header line(s) for reverse chronological order (newest first)
|
||||
const headerMatch = existing.match(/^(#{1,3}\s+[^\n]*\n\n?)/);
|
||||
if (headerMatch) {
|
||||
const header = headerMatch[1];
|
||||
const rest = existing.slice(header.length);
|
||||
fs.writeFileSync(milestonesPath, normalizeMd(header + milestoneEntry + rest), 'utf-8');
|
||||
atomicWriteFileSync(milestonesPath, normalizeMd(header + milestoneEntry + rest));
|
||||
} else {
|
||||
// No recognizable header — prepend the entry
|
||||
fs.writeFileSync(milestonesPath, normalizeMd(milestoneEntry + existing), 'utf-8');
|
||||
atomicWriteFileSync(milestonesPath, normalizeMd(milestoneEntry + existing));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fs.writeFileSync(milestonesPath, normalizeMd(`# Milestones\n\n${milestoneEntry}`), 'utf-8');
|
||||
atomicWriteFileSync(milestonesPath, normalizeMd(`# Milestones\n\n${milestoneEntry}`));
|
||||
}
|
||||
|
||||
// Update STATE.md — use shared helpers that handle both **bold:** and plain Field: formats
|
||||
|
||||
@@ -19,6 +19,7 @@ const MODEL_PROFILES = {
|
||||
'gsd-plan-checker': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
'gsd-integration-checker': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
'gsd-nyquist-auditor': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
'gsd-pattern-mapper': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
'gsd-ui-researcher': { quality: 'opus', balanced: 'sonnet', budget: 'haiku', adaptive: 'sonnet' },
|
||||
'gsd-ui-checker': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
'gsd-ui-auditor': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku', adaptive: 'haiku' },
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { escapeRegex, loadConfig, normalizePhaseName, comparePhaseNum, findPhaseInternal, getArchivedPhaseDirs, generateSlugInternal, getMilestonePhaseFilter, stripShippedMilestones, extractCurrentMilestone, replaceInCurrentMilestone, toPosixPath, planningDir, withPlanningLock, output, error, readSubdirectories, phaseTokenMatches } = require('./core.cjs');
|
||||
const { escapeRegex, loadConfig, normalizePhaseName, comparePhaseNum, findPhaseInternal, getArchivedPhaseDirs, generateSlugInternal, getMilestonePhaseFilter, stripShippedMilestones, extractCurrentMilestone, replaceInCurrentMilestone, toPosixPath, planningDir, withPlanningLock, output, error, readSubdirectories, phaseTokenMatches, atomicWriteFileSync } = require('./core.cjs');
|
||||
const { extractFrontmatter } = require('./frontmatter.cjs');
|
||||
const { writeStateMd, readModifyWriteStateMd, stateExtractField, stateReplaceField, stateReplaceFieldWithFallback, updatePerformanceMetricsSection } = require('./state.cjs');
|
||||
|
||||
@@ -392,7 +392,7 @@ function cmdPhaseAdd(cwd, description, raw, customId) {
|
||||
updatedContent = rawContent + phaseEntry;
|
||||
}
|
||||
|
||||
fs.writeFileSync(roadmapPath, updatedContent, 'utf-8');
|
||||
atomicWriteFileSync(roadmapPath, updatedContent);
|
||||
return { newPhaseId: _newPhaseId, dirName: _dirName };
|
||||
});
|
||||
|
||||
@@ -493,7 +493,7 @@ function cmdPhaseInsert(cwd, afterPhase, description, raw) {
|
||||
}
|
||||
|
||||
const updatedContent = rawContent.slice(0, insertIdx) + phaseEntry + rawContent.slice(insertIdx);
|
||||
fs.writeFileSync(roadmapPath, updatedContent, 'utf-8');
|
||||
atomicWriteFileSync(roadmapPath, updatedContent);
|
||||
return { decimalPhase: _decimalPhase, dirName: _dirName };
|
||||
});
|
||||
|
||||
@@ -607,7 +607,7 @@ function updateRoadmapAfterPhaseRemoval(roadmapPath, targetPhase, isDecimal, rem
|
||||
}
|
||||
}
|
||||
|
||||
fs.writeFileSync(roadmapPath, content, 'utf-8');
|
||||
atomicWriteFileSync(roadmapPath, content);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -783,7 +783,7 @@ function cmdPhaseComplete(cwd, phaseNum, raw) {
|
||||
roadmapContent = roadmapContent.replace(planCheckboxPattern, '$1x$2');
|
||||
}
|
||||
|
||||
fs.writeFileSync(roadmapPath, roadmapContent, 'utf-8');
|
||||
atomicWriteFileSync(roadmapPath, roadmapContent);
|
||||
|
||||
// Update REQUIREMENTS.md traceability for this phase's requirements
|
||||
const reqPath = path.join(planningDir(cwd), 'REQUIREMENTS.md');
|
||||
@@ -816,7 +816,7 @@ function cmdPhaseComplete(cwd, phaseNum, raw) {
|
||||
);
|
||||
}
|
||||
|
||||
fs.writeFileSync(reqPath, reqContent, 'utf-8');
|
||||
atomicWriteFileSync(reqPath, reqContent);
|
||||
requirementsUpdated = true;
|
||||
}
|
||||
}
|
||||
@@ -838,9 +838,11 @@ function cmdPhaseComplete(cwd, phaseNum, raw) {
|
||||
.sort((a, b) => comparePhaseNum(a, b));
|
||||
|
||||
// Find the next phase directory after current
|
||||
// Skip backlog phases (999.x) — they are parked ideas, not sequential work (#2129)
|
||||
for (const dir of dirs) {
|
||||
const dm = dir.match(/^(\d+[A-Z]?(?:\.\d+)*)-?(.*)/i);
|
||||
if (dm) {
|
||||
if (/^999(?:\.|$)/.test(dm[1])) continue;
|
||||
if (comparePhaseNum(dm[1], phaseNum) > 0) {
|
||||
nextPhaseNum = dm[1];
|
||||
nextPhaseName = dm[2] || null;
|
||||
@@ -937,6 +939,21 @@ function cmdPhaseComplete(cwd, phaseNum, raw) {
|
||||
}, cwd);
|
||||
}
|
||||
|
||||
// Auto-prune STATE.md on phase boundary when configured (#2087)
|
||||
let autoPruned = false;
|
||||
try {
|
||||
const configPath = path.join(planningDir(cwd), 'config.json');
|
||||
if (fs.existsSync(configPath)) {
|
||||
const rawConfig = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||
const autoPruneEnabled = rawConfig.workflow && rawConfig.workflow.auto_prune_state === true;
|
||||
if (autoPruneEnabled && fs.existsSync(statePath)) {
|
||||
const { cmdStatePrune } = require('./state.cjs');
|
||||
cmdStatePrune(cwd, { keepRecent: '3', dryRun: false, silent: true }, true);
|
||||
autoPruned = true;
|
||||
}
|
||||
}
|
||||
} catch { /* intentionally empty — auto-prune is best-effort */ }
|
||||
|
||||
const result = {
|
||||
completed_phase: phaseNum,
|
||||
phase_name: phaseInfo.phase_name,
|
||||
@@ -948,6 +965,7 @@ function cmdPhaseComplete(cwd, phaseNum, raw) {
|
||||
roadmap_updated: fs.existsSync(roadmapPath),
|
||||
state_updated: fs.existsSync(statePath),
|
||||
requirements_updated: requirementsUpdated,
|
||||
auto_pruned: autoPruned,
|
||||
warnings,
|
||||
has_warnings: warnings.length > 0,
|
||||
};
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const { output, error, safeReadFile } = require('./core.cjs');
|
||||
const { output, error, safeReadFile, loadConfig } = require('./core.cjs');
|
||||
|
||||
// ─── Constants ────────────────────────────────────────────────────────────────
|
||||
|
||||
@@ -870,7 +870,13 @@ function cmdGenerateClaudeProfile(cwd, options, raw) {
|
||||
} else if (options.output) {
|
||||
targetPath = path.isAbsolute(options.output) ? options.output : path.join(cwd, options.output);
|
||||
} else {
|
||||
targetPath = path.join(cwd, 'CLAUDE.md');
|
||||
// Read claude_md_path from config, default to ./CLAUDE.md
|
||||
let configClaudeMdPath = './CLAUDE.md';
|
||||
try {
|
||||
const config = loadConfig(cwd);
|
||||
if (config.claude_md_path) configClaudeMdPath = config.claude_md_path;
|
||||
} catch { /* use default */ }
|
||||
targetPath = path.isAbsolute(configClaudeMdPath) ? configClaudeMdPath : path.join(cwd, configClaudeMdPath);
|
||||
}
|
||||
|
||||
let action;
|
||||
@@ -944,7 +950,13 @@ function cmdGenerateClaudeMd(cwd, options, raw) {
|
||||
|
||||
let outputPath = options.output;
|
||||
if (!outputPath) {
|
||||
outputPath = path.join(cwd, 'CLAUDE.md');
|
||||
// Read claude_md_path from config, default to ./CLAUDE.md
|
||||
let configClaudeMdPath = './CLAUDE.md';
|
||||
try {
|
||||
const config = loadConfig(cwd);
|
||||
if (config.claude_md_path) configClaudeMdPath = config.claude_md_path;
|
||||
} catch { /* use default */ }
|
||||
outputPath = path.isAbsolute(configClaudeMdPath) ? configClaudeMdPath : path.join(cwd, configClaudeMdPath);
|
||||
} else if (!path.isAbsolute(outputPath)) {
|
||||
outputPath = path.join(cwd, outputPath);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,11 @@ const path = require('path');
|
||||
const { escapeRegex, loadConfig, getMilestoneInfo, getMilestonePhaseFilter, normalizeMd, planningDir, planningPaths, output, error, atomicWriteFileSync } = require('./core.cjs');
|
||||
const { extractFrontmatter, reconstructFrontmatter } = require('./frontmatter.cjs');
|
||||
|
||||
// Cache disk scan results from buildStateFrontmatter per cwd per process (#1967).
|
||||
// Avoids re-reading N+1 directories on every state write when the phase structure
|
||||
// hasn't changed within the same gsd-tools invocation.
|
||||
const _diskScanCache = new Map();
|
||||
|
||||
/** Shorthand — every state command needs this path */
|
||||
function getStatePath(cwd) {
|
||||
return planningPaths(cwd).state;
|
||||
@@ -737,28 +742,40 @@ function buildStateFrontmatter(bodyContent, cwd) {
|
||||
try {
|
||||
const phasesDir = planningPaths(cwd).phases;
|
||||
if (fs.existsSync(phasesDir)) {
|
||||
const isDirInMilestone = getMilestonePhaseFilter(cwd);
|
||||
const phaseDirs = fs.readdirSync(phasesDir, { withFileTypes: true })
|
||||
.filter(e => e.isDirectory()).map(e => e.name)
|
||||
.filter(isDirInMilestone);
|
||||
let diskTotalPlans = 0;
|
||||
let diskTotalSummaries = 0;
|
||||
let diskCompletedPhases = 0;
|
||||
// Use cached disk scan when available — avoids N+1 readdirSync calls
|
||||
// on repeated buildStateFrontmatter invocations within the same process (#1967)
|
||||
let cached = _diskScanCache.get(cwd);
|
||||
if (!cached) {
|
||||
const isDirInMilestone = getMilestonePhaseFilter(cwd);
|
||||
const phaseDirs = fs.readdirSync(phasesDir, { withFileTypes: true })
|
||||
.filter(e => e.isDirectory()).map(e => e.name)
|
||||
.filter(isDirInMilestone);
|
||||
let diskTotalPlans = 0;
|
||||
let diskTotalSummaries = 0;
|
||||
let diskCompletedPhases = 0;
|
||||
|
||||
for (const dir of phaseDirs) {
|
||||
const files = fs.readdirSync(path.join(phasesDir, dir));
|
||||
const plans = files.filter(f => f.match(/-PLAN\.md$/i)).length;
|
||||
const summaries = files.filter(f => f.match(/-SUMMARY\.md$/i)).length;
|
||||
diskTotalPlans += plans;
|
||||
diskTotalSummaries += summaries;
|
||||
if (plans > 0 && summaries >= plans) diskCompletedPhases++;
|
||||
for (const dir of phaseDirs) {
|
||||
const files = fs.readdirSync(path.join(phasesDir, dir));
|
||||
const plans = files.filter(f => f.match(/-PLAN\.md$/i)).length;
|
||||
const summaries = files.filter(f => f.match(/-SUMMARY\.md$/i)).length;
|
||||
diskTotalPlans += plans;
|
||||
diskTotalSummaries += summaries;
|
||||
if (plans > 0 && summaries >= plans) diskCompletedPhases++;
|
||||
}
|
||||
cached = {
|
||||
totalPhases: isDirInMilestone.phaseCount > 0
|
||||
? Math.max(phaseDirs.length, isDirInMilestone.phaseCount)
|
||||
: phaseDirs.length,
|
||||
completedPhases: diskCompletedPhases,
|
||||
totalPlans: diskTotalPlans,
|
||||
completedPlans: diskTotalSummaries,
|
||||
};
|
||||
_diskScanCache.set(cwd, cached);
|
||||
}
|
||||
totalPhases = isDirInMilestone.phaseCount > 0
|
||||
? Math.max(phaseDirs.length, isDirInMilestone.phaseCount)
|
||||
: phaseDirs.length;
|
||||
completedPhases = diskCompletedPhases;
|
||||
totalPlans = diskTotalPlans;
|
||||
completedPlans = diskTotalSummaries;
|
||||
totalPhases = cached.totalPhases;
|
||||
completedPhases = cached.completedPhases;
|
||||
totalPlans = cached.totalPlans;
|
||||
completedPlans = cached.completedPlans;
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
@@ -904,6 +921,10 @@ function releaseStateLock(lockPath) {
|
||||
* each other's changes (race condition with read-modify-write cycle).
|
||||
*/
|
||||
function writeStateMd(statePath, content, cwd) {
|
||||
// Invalidate disk scan cache before computing new frontmatter — the write
|
||||
// may create new PLAN/SUMMARY files that buildStateFrontmatter must see.
|
||||
// Safe for any calling pattern, not just short-lived CLI processes (#1967).
|
||||
if (cwd) _diskScanCache.delete(cwd);
|
||||
const synced = syncStateFrontmatter(content, cwd);
|
||||
const lockPath = acquireStateLock(statePath);
|
||||
try {
|
||||
@@ -1386,6 +1407,187 @@ function cmdStateSync(cwd, options, raw) {
|
||||
output({ synced: true, changes, dry_run: false }, raw);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prune old entries from STATE.md sections that grow unboundedly (#1970).
|
||||
* Moves decisions, recently-completed summaries, and resolved blockers
|
||||
* older than keepRecent phases to STATE-ARCHIVE.md.
|
||||
*
|
||||
* Options:
|
||||
* keepRecent: number of recent phases to retain (default: 3)
|
||||
* dryRun: if true, return what would be pruned without modifying STATE.md
|
||||
*/
|
||||
function cmdStatePrune(cwd, options, raw) {
|
||||
const silent = !!options.silent;
|
||||
const emit = silent ? () => {} : (result, r, v) => output(result, r, v);
|
||||
const statePath = planningPaths(cwd).state;
|
||||
if (!fs.existsSync(statePath)) { emit({ error: 'STATE.md not found' }, raw); return; }
|
||||
|
||||
const keepRecent = parseInt(options.keepRecent, 10) || 3;
|
||||
const dryRun = !!options.dryRun;
|
||||
const currentPhaseRaw = stateExtractField(fs.readFileSync(statePath, 'utf-8'), 'Current Phase');
|
||||
const currentPhase = parseInt(currentPhaseRaw, 10) || 0;
|
||||
const cutoff = currentPhase - keepRecent;
|
||||
|
||||
if (cutoff <= 0) {
|
||||
emit({ pruned: false, reason: `Only ${currentPhase} phases — nothing to prune with --keep-recent ${keepRecent}` }, raw, 'false');
|
||||
return;
|
||||
}
|
||||
|
||||
const archivePath = path.join(path.dirname(statePath), 'STATE-ARCHIVE.md');
|
||||
const archived = [];
|
||||
|
||||
// Shared pruning logic applied to both dry-run and real passes.
|
||||
// Returns { newContent, archivedSections }.
|
||||
function prunePass(content) {
|
||||
const sections = [];
|
||||
|
||||
// Prune Decisions section: entries like "- [Phase N]: ..."
|
||||
const decisionPattern = /(###?\s*(?:Decisions|Decisions Made|Accumulated.*Decisions)\s*\n)([\s\S]*?)(?=\n###?|\n##[^#]|$)/i;
|
||||
const decMatch = content.match(decisionPattern);
|
||||
if (decMatch) {
|
||||
const lines = decMatch[2].split('\n');
|
||||
const keep = [];
|
||||
const archive = [];
|
||||
for (const line of lines) {
|
||||
const phaseMatch = line.match(/^\s*-\s*\[Phase\s+(\d+)/i);
|
||||
if (phaseMatch && parseInt(phaseMatch[1], 10) <= cutoff) {
|
||||
archive.push(line);
|
||||
} else {
|
||||
keep.push(line);
|
||||
}
|
||||
}
|
||||
if (archive.length > 0) {
|
||||
sections.push({ section: 'Decisions', count: archive.length, lines: archive });
|
||||
content = content.replace(decisionPattern, (_m, header) => `${header}${keep.join('\n')}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Prune Recently Completed section: entries mentioning phase numbers
|
||||
const recentPattern = /(###?\s*Recently Completed\s*\n)([\s\S]*?)(?=\n###?|\n##[^#]|$)/i;
|
||||
const recMatch = content.match(recentPattern);
|
||||
if (recMatch) {
|
||||
const lines = recMatch[2].split('\n');
|
||||
const keep = [];
|
||||
const archive = [];
|
||||
for (const line of lines) {
|
||||
const phaseMatch = line.match(/Phase\s+(\d+)/i);
|
||||
if (phaseMatch && parseInt(phaseMatch[1], 10) <= cutoff) {
|
||||
archive.push(line);
|
||||
} else {
|
||||
keep.push(line);
|
||||
}
|
||||
}
|
||||
if (archive.length > 0) {
|
||||
sections.push({ section: 'Recently Completed', count: archive.length, lines: archive });
|
||||
content = content.replace(recentPattern, (_m, header) => `${header}${keep.join('\n')}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Prune resolved blockers: lines marked as resolved (strikethrough ~~text~~
|
||||
// or "[RESOLVED]" prefix) with a phase reference older than cutoff
|
||||
const blockersPattern = /(###?\s*(?:Blockers|Blockers\/Concerns|Blockers\s*&\s*Concerns)\s*\n)([\s\S]*?)(?=\n###?|\n##[^#]|$)/i;
|
||||
const blockersMatch = content.match(blockersPattern);
|
||||
if (blockersMatch) {
|
||||
const lines = blockersMatch[2].split('\n');
|
||||
const keep = [];
|
||||
const archive = [];
|
||||
for (const line of lines) {
|
||||
const isResolved = /~~.*~~|\[RESOLVED\]/i.test(line);
|
||||
const phaseMatch = line.match(/Phase\s+(\d+)/i);
|
||||
if (isResolved && phaseMatch && parseInt(phaseMatch[1], 10) <= cutoff) {
|
||||
archive.push(line);
|
||||
} else {
|
||||
keep.push(line);
|
||||
}
|
||||
}
|
||||
if (archive.length > 0) {
|
||||
sections.push({ section: 'Blockers (resolved)', count: archive.length, lines: archive });
|
||||
content = content.replace(blockersPattern, (_m, header) => `${header}${keep.join('\n')}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Prune Performance Metrics table rows: keep only rows for phases > cutoff.
|
||||
// Preserves header rows (| Phase | ... and |---|...) and any prose around the table.
|
||||
const metricsPattern = /(###?\s*Performance Metrics\s*\n)([\s\S]*?)(?=\n###?|\n##[^#]|$)/i;
|
||||
const metricsMatch = content.match(metricsPattern);
|
||||
if (metricsMatch) {
|
||||
const sectionLines = metricsMatch[2].split('\n');
|
||||
const keep = [];
|
||||
const archive = [];
|
||||
for (const line of sectionLines) {
|
||||
// Table data row: starts with | followed by a number (phase)
|
||||
const tableRowMatch = line.match(/^\|\s*(\d+)\s*\|/);
|
||||
if (tableRowMatch) {
|
||||
const rowPhase = parseInt(tableRowMatch[1], 10);
|
||||
if (rowPhase <= cutoff) {
|
||||
archive.push(line);
|
||||
} else {
|
||||
keep.push(line);
|
||||
}
|
||||
} else {
|
||||
// Header row, separator row, or prose — always keep
|
||||
keep.push(line);
|
||||
}
|
||||
}
|
||||
if (archive.length > 0) {
|
||||
sections.push({ section: 'Performance Metrics', count: archive.length, lines: archive });
|
||||
content = content.replace(metricsPattern, (_m, header) => `${header}${keep.join('\n')}`);
|
||||
}
|
||||
}
|
||||
|
||||
return { newContent: content, archivedSections: sections };
|
||||
}
|
||||
|
||||
if (dryRun) {
|
||||
// Dry-run: compute what would be pruned without writing anything
|
||||
const content = fs.readFileSync(statePath, 'utf-8');
|
||||
const result = prunePass(content);
|
||||
const totalPruned = result.archivedSections.reduce((sum, s) => sum + s.count, 0);
|
||||
emit({
|
||||
pruned: false,
|
||||
dry_run: true,
|
||||
cutoff_phase: cutoff,
|
||||
keep_recent: keepRecent,
|
||||
sections: result.archivedSections.map(s => ({ section: s.section, entries_would_archive: s.count })),
|
||||
total_would_archive: totalPruned,
|
||||
note: totalPruned > 0 ? 'Run without --dry-run to actually prune' : 'Nothing to prune',
|
||||
}, raw, totalPruned > 0 ? 'true' : 'false');
|
||||
return;
|
||||
}
|
||||
|
||||
readModifyWriteStateMd(statePath, (content) => {
|
||||
const result = prunePass(content);
|
||||
archived.push(...result.archivedSections);
|
||||
return result.newContent;
|
||||
}, cwd);
|
||||
|
||||
// Write archived entries to STATE-ARCHIVE.md
|
||||
if (archived.length > 0) {
|
||||
const timestamp = new Date().toISOString().split('T')[0];
|
||||
let archiveContent = '';
|
||||
if (fs.existsSync(archivePath)) {
|
||||
archiveContent = fs.readFileSync(archivePath, 'utf-8');
|
||||
} else {
|
||||
archiveContent = '# STATE Archive\n\nPruned entries from STATE.md. Recoverable but no longer loaded into agent context.\n\n';
|
||||
}
|
||||
archiveContent += `## Pruned ${timestamp} (phases 1-${cutoff}, kept recent ${keepRecent})\n\n`;
|
||||
for (const section of archived) {
|
||||
archiveContent += `### ${section.section}\n\n${section.lines.join('\n')}\n\n`;
|
||||
}
|
||||
atomicWriteFileSync(archivePath, archiveContent);
|
||||
}
|
||||
|
||||
const totalPruned = archived.reduce((sum, s) => sum + s.count, 0);
|
||||
emit({
|
||||
pruned: totalPruned > 0,
|
||||
cutoff_phase: cutoff,
|
||||
keep_recent: keepRecent,
|
||||
sections: archived.map(s => ({ section: s.section, entries_archived: s.count })),
|
||||
total_archived: totalPruned,
|
||||
archive_file: totalPruned > 0 ? 'STATE-ARCHIVE.md' : null,
|
||||
}, raw, totalPruned > 0 ? 'true' : 'false');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
stateExtractField,
|
||||
stateReplaceField,
|
||||
@@ -1410,6 +1612,7 @@ module.exports = {
|
||||
cmdStatePlannedPhase,
|
||||
cmdStateValidate,
|
||||
cmdStateSync,
|
||||
cmdStatePrune,
|
||||
cmdSignalWaiting,
|
||||
cmdSignalResume,
|
||||
};
|
||||
|
||||
@@ -655,52 +655,55 @@ function cmdValidateHealth(cwd, options, raw) {
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
|
||||
// ─── Check 6: Phase directory naming (NN-name format) ─────────────────────
|
||||
// ─── Read phase directories once for checks 6, 7, 7b, and 8 (#1973) ──────
|
||||
let phaseDirEntries = [];
|
||||
const phaseDirFiles = new Map(); // phase dir name → file list
|
||||
try {
|
||||
const entries = fs.readdirSync(phasesDir, { withFileTypes: true });
|
||||
for (const e of entries) {
|
||||
if (e.isDirectory() && !e.name.match(/^\d{2}(?:\.\d+)*-[\w-]+$/)) {
|
||||
addIssue('warning', 'W005', `Phase directory "${e.name}" doesn't follow NN-name format`, 'Rename to match pattern (e.g., 01-setup)');
|
||||
}
|
||||
phaseDirEntries = fs.readdirSync(phasesDir, { withFileTypes: true }).filter(e => e.isDirectory());
|
||||
for (const e of phaseDirEntries) {
|
||||
try {
|
||||
phaseDirFiles.set(e.name, fs.readdirSync(path.join(phasesDir, e.name)));
|
||||
} catch { phaseDirFiles.set(e.name, []); }
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
|
||||
// ─── Check 6: Phase directory naming (NN-name format) ─────────────────────
|
||||
for (const e of phaseDirEntries) {
|
||||
if (!e.name.match(/^\d{2}(?:\.\d+)*-[\w-]+$/)) {
|
||||
addIssue('warning', 'W005', `Phase directory "${e.name}" doesn't follow NN-name format`, 'Rename to match pattern (e.g., 01-setup)');
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Check 7: Orphaned plans (PLAN without SUMMARY) ───────────────────────
|
||||
try {
|
||||
const entries = fs.readdirSync(phasesDir, { withFileTypes: true });
|
||||
for (const e of entries) {
|
||||
if (!e.isDirectory()) continue;
|
||||
const phaseFiles = fs.readdirSync(path.join(phasesDir, e.name));
|
||||
const plans = phaseFiles.filter(f => f.endsWith('-PLAN.md') || f === 'PLAN.md');
|
||||
const summaries = phaseFiles.filter(f => f.endsWith('-SUMMARY.md') || f === 'SUMMARY.md');
|
||||
const summaryBases = new Set(summaries.map(s => s.replace('-SUMMARY.md', '').replace('SUMMARY.md', '')));
|
||||
for (const e of phaseDirEntries) {
|
||||
const phaseFiles = phaseDirFiles.get(e.name) || [];
|
||||
const plans = phaseFiles.filter(f => f.endsWith('-PLAN.md') || f === 'PLAN.md');
|
||||
const summaries = phaseFiles.filter(f => f.endsWith('-SUMMARY.md') || f === 'SUMMARY.md');
|
||||
const summaryBases = new Set(summaries.map(s => s.replace('-SUMMARY.md', '').replace('SUMMARY.md', '')));
|
||||
|
||||
for (const plan of plans) {
|
||||
const planBase = plan.replace('-PLAN.md', '').replace('PLAN.md', '');
|
||||
if (!summaryBases.has(planBase)) {
|
||||
addIssue('info', 'I001', `${e.name}/${plan} has no SUMMARY.md`, 'May be in progress');
|
||||
}
|
||||
for (const plan of plans) {
|
||||
const planBase = plan.replace('-PLAN.md', '').replace('PLAN.md', '');
|
||||
if (!summaryBases.has(planBase)) {
|
||||
addIssue('info', 'I001', `${e.name}/${plan} has no SUMMARY.md`, 'May be in progress');
|
||||
}
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
|
||||
// ─── Check 7b: Nyquist VALIDATION.md consistency ────────────────────────
|
||||
try {
|
||||
const phaseEntries = fs.readdirSync(phasesDir, { withFileTypes: true });
|
||||
for (const e of phaseEntries) {
|
||||
if (!e.isDirectory()) continue;
|
||||
const phaseFiles = fs.readdirSync(path.join(phasesDir, e.name));
|
||||
const hasResearch = phaseFiles.some(f => f.endsWith('-RESEARCH.md'));
|
||||
const hasValidation = phaseFiles.some(f => f.endsWith('-VALIDATION.md'));
|
||||
if (hasResearch && !hasValidation) {
|
||||
const researchFile = phaseFiles.find(f => f.endsWith('-RESEARCH.md'));
|
||||
for (const e of phaseDirEntries) {
|
||||
const phaseFiles = phaseDirFiles.get(e.name) || [];
|
||||
const hasResearch = phaseFiles.some(f => f.endsWith('-RESEARCH.md'));
|
||||
const hasValidation = phaseFiles.some(f => f.endsWith('-VALIDATION.md'));
|
||||
if (hasResearch && !hasValidation) {
|
||||
const researchFile = phaseFiles.find(f => f.endsWith('-RESEARCH.md'));
|
||||
try {
|
||||
const researchContent = fs.readFileSync(path.join(phasesDir, e.name, researchFile), 'utf-8');
|
||||
if (researchContent.includes('## Validation Architecture')) {
|
||||
addIssue('warning', 'W009', `Phase ${e.name}: has Validation Architecture in RESEARCH.md but no VALIDATION.md`, 'Re-run /gsd-plan-phase with --research to regenerate');
|
||||
}
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
}
|
||||
|
||||
// ─── Check 7c: Agent installation (#1371) ──────────────────────────────────
|
||||
// Verify GSD agents are installed. Missing agents cause Task(subagent_type=...)
|
||||
@@ -733,15 +736,10 @@ function cmdValidateHealth(cwd, options, raw) {
|
||||
}
|
||||
|
||||
const diskPhases = new Set();
|
||||
try {
|
||||
const entries = fs.readdirSync(phasesDir, { withFileTypes: true });
|
||||
for (const e of entries) {
|
||||
if (e.isDirectory()) {
|
||||
const dm = e.name.match(/^(\d+[A-Z]?(?:\.\d+)*)/i);
|
||||
if (dm) diskPhases.add(dm[1]);
|
||||
}
|
||||
}
|
||||
} catch { /* intentionally empty */ }
|
||||
for (const e of phaseDirEntries) {
|
||||
const dm = e.name.match(/^(\d+[A-Z]?(?:\.\d+)*)/i);
|
||||
if (dm) diskPhases.add(dm[1]);
|
||||
}
|
||||
|
||||
// Build a set of phases explicitly marked not-yet-started in the ROADMAP
|
||||
// summary list (- [ ] **Phase N:**). These phases are intentionally absent
|
||||
|
||||
@@ -759,6 +759,36 @@ timeout 30 bash -c 'until node -e "fetch(\"http://localhost:3000\").then(r=>{pro
|
||||
|
||||
</anti_patterns>
|
||||
|
||||
<type name="tdd-review">
|
||||
## checkpoint:tdd-review (TDD Mode Only)
|
||||
|
||||
**When:** All waves in a phase complete and `workflow.tdd_mode` is enabled. Inserted by the execute-phase orchestrator after `aggregate_results`.
|
||||
|
||||
**Purpose:** Collaborative review of TDD gate compliance across all `type: tdd` plans in the phase. Advisory — does not block execution.
|
||||
|
||||
**Use for:**
|
||||
- Verifying RED/GREEN/REFACTOR commit sequence for each TDD plan
|
||||
- Surfacing gate violations (missing RED or GREEN commits)
|
||||
- Reviewing test quality (tests fail for the right reason)
|
||||
- Confirming minimal GREEN implementations
|
||||
|
||||
**Structure:**
|
||||
```xml
|
||||
<task type="checkpoint:tdd-review" gate="advisory">
|
||||
<what-checked>TDD gate compliance for {count} plans in Phase {X}</what-checked>
|
||||
<gate-results>
|
||||
| Plan | RED | GREEN | REFACTOR | Status |
|
||||
|------|-----|-------|----------|--------|
|
||||
| {id} | ✓ | ✓ | ✓ | Pass |
|
||||
</gate-results>
|
||||
<violations>[List of gate violations, or "None"]</violations>
|
||||
<resume-signal>Review complete — proceed to phase verification</resume-signal>
|
||||
</task>
|
||||
```
|
||||
|
||||
**Auto-mode behavior:** When `workflow._auto_chain_active` or `workflow.auto_advance` is true, the TDD review checkpoint auto-approves (advisory gate — never blocks).
|
||||
</type>
|
||||
|
||||
<summary>
|
||||
|
||||
Checkpoints formalize human-in-the-loop points for verification and decisions, not manual work.
|
||||
|
||||
110
get-shit-done/references/executor-examples.md
Normal file
110
get-shit-done/references/executor-examples.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Executor Extended Examples
|
||||
|
||||
> Reference file for gsd-executor agent. Loaded on-demand via `@` reference.
|
||||
> For sub-200K context windows, this content is stripped from the agent prompt and available here for on-demand loading.
|
||||
|
||||
## Deviation Rule Examples
|
||||
|
||||
### Rule 1 — Auto-fix bugs
|
||||
|
||||
**Examples of Rule 1 triggers:**
|
||||
- Wrong queries returning incorrect data
|
||||
- Logic errors in conditionals
|
||||
- Type errors and type mismatches
|
||||
- Null pointer exceptions / undefined access
|
||||
- Broken validation (accepts invalid input)
|
||||
- Security vulnerabilities (XSS, SQL injection)
|
||||
- Race conditions in async code
|
||||
- Memory leaks from uncleaned resources
|
||||
|
||||
### Rule 2 — Auto-add missing critical functionality
|
||||
|
||||
**Examples of Rule 2 triggers:**
|
||||
- Missing error handling (unhandled promise rejections, no try/catch on I/O)
|
||||
- No input validation on user-facing endpoints
|
||||
- Missing null checks before property access
|
||||
- No auth on protected routes
|
||||
- Missing authorization checks (user can access other users' data)
|
||||
- No CSRF/CORS configuration
|
||||
- No rate limiting on public endpoints
|
||||
- Missing DB indexes on frequently queried columns
|
||||
- No error logging (failures silently swallowed)
|
||||
|
||||
### Rule 3 — Auto-fix blocking issues
|
||||
|
||||
**Examples of Rule 3 triggers:**
|
||||
- Missing dependency not in package.json
|
||||
- Wrong types preventing compilation
|
||||
- Broken imports (wrong path, wrong export name)
|
||||
- Missing env var required at runtime
|
||||
- DB connection error (wrong URL, missing credentials)
|
||||
- Build config error (wrong entry point, missing loader)
|
||||
- Missing referenced file (import points to non-existent module)
|
||||
- Circular dependency preventing module load
|
||||
|
||||
### Rule 4 — Ask about architectural changes
|
||||
|
||||
**Examples of Rule 4 triggers:**
|
||||
- New DB table (not just adding a column)
|
||||
- Major schema changes (renaming tables, changing relationships)
|
||||
- New service layer (adding a queue, cache, or message bus)
|
||||
- Switching libraries/frameworks (e.g., replacing Express with Fastify)
|
||||
- Changing auth approach (switching from session to JWT)
|
||||
- New infrastructure (adding Redis, S3, etc.)
|
||||
- Breaking API changes (removing or renaming endpoints)
|
||||
|
||||
## Edge Case Decision Guide
|
||||
|
||||
| Scenario | Rule | Rationale |
|
||||
|----------|------|-----------|
|
||||
| Missing validation on input | Rule 2 | Security requirement |
|
||||
| Crashes on null input | Rule 1 | Bug — incorrect behavior |
|
||||
| Need new database table | Rule 4 | Architectural decision |
|
||||
| Need new column on existing table | Rule 1 or 2 | Depends on context |
|
||||
| Pre-existing linting warnings | Out of scope | Not caused by current task |
|
||||
| Unrelated test failures | Out of scope | Not caused by current task |
|
||||
|
||||
**Decision heuristic:** "Does this affect correctness, security, or ability to complete the current task?"
|
||||
- YES → Rules 1-3 (fix automatically)
|
||||
- MAYBE → Rule 4 (ask the user)
|
||||
- NO → Out of scope (log to deferred-items.md)
|
||||
|
||||
## Checkpoint Examples
|
||||
|
||||
### Good checkpoint placement
|
||||
|
||||
```xml
|
||||
<!-- Automate everything, then verify at the end -->
|
||||
<task type="auto">Create database schema</task>
|
||||
<task type="auto">Create API endpoints</task>
|
||||
<task type="auto">Create UI components</task>
|
||||
<task type="checkpoint:human-verify">
|
||||
<what-built>Complete auth flow (schema + API + UI)</what-built>
|
||||
<how-to-verify>
|
||||
1. Visit http://localhost:3000/register
|
||||
2. Create account with test@example.com
|
||||
3. Log in with those credentials
|
||||
4. Verify dashboard loads with user name
|
||||
</how-to-verify>
|
||||
</task>
|
||||
```
|
||||
|
||||
### Bad checkpoint placement
|
||||
|
||||
```xml
|
||||
<!-- Too many checkpoints — causes verification fatigue -->
|
||||
<task type="auto">Create schema</task>
|
||||
<task type="checkpoint:human-verify">Check schema</task>
|
||||
<task type="auto">Create API</task>
|
||||
<task type="checkpoint:human-verify">Check API</task>
|
||||
<task type="auto">Create UI</task>
|
||||
<task type="checkpoint:human-verify">Check UI</task>
|
||||
```
|
||||
|
||||
### Auth gate handling
|
||||
|
||||
When an auth error occurs during `type="auto"` execution:
|
||||
1. Recognize it as an auth gate (not a bug) — indicators: "Not authenticated", "401", "403", "Please run X login"
|
||||
2. STOP the current task
|
||||
3. Return a `checkpoint:human-action` with exact auth steps
|
||||
4. In SUMMARY.md, document auth gates as normal flow, not deviations
|
||||
89
get-shit-done/references/planner-antipatterns.md
Normal file
89
get-shit-done/references/planner-antipatterns.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Planner Anti-Patterns and Specificity Examples
|
||||
|
||||
> Reference file for gsd-planner agent. Loaded on-demand via `@` reference.
|
||||
> For sub-200K context windows, this content is stripped from the agent prompt and available here for on-demand loading.
|
||||
|
||||
## Checkpoint Anti-Patterns
|
||||
|
||||
### Bad — Asking human to automate
|
||||
|
||||
```xml
|
||||
<task type="checkpoint:human-action">
|
||||
<action>Deploy to Vercel</action>
|
||||
<instructions>Visit vercel.com, import repo, click deploy...</instructions>
|
||||
</task>
|
||||
```
|
||||
|
||||
**Why bad:** Vercel has a CLI. Claude should run `vercel --yes`. Never ask the user to do what Claude can automate via CLI/API.
|
||||
|
||||
### Bad — Too many checkpoints
|
||||
|
||||
```xml
|
||||
<task type="auto">Create schema</task>
|
||||
<task type="checkpoint:human-verify">Check schema</task>
|
||||
<task type="auto">Create API</task>
|
||||
<task type="checkpoint:human-verify">Check API</task>
|
||||
```
|
||||
|
||||
**Why bad:** Verification fatigue. Users should not be asked to verify every small step. Combine into one checkpoint at the end of meaningful work.
|
||||
|
||||
### Good — Single verification checkpoint
|
||||
|
||||
```xml
|
||||
<task type="auto">Create schema</task>
|
||||
<task type="auto">Create API</task>
|
||||
<task type="auto">Create UI</task>
|
||||
<task type="checkpoint:human-verify">
|
||||
<what-built>Complete auth flow (schema + API + UI)</what-built>
|
||||
<how-to-verify>Test full flow: register, login, access protected page</how-to-verify>
|
||||
</task>
|
||||
```
|
||||
|
||||
### Bad — Mixing checkpoints with implementation
|
||||
|
||||
A plan should not interleave multiple checkpoint types with implementation tasks. Checkpoints belong at natural verification boundaries, not scattered throughout.
|
||||
|
||||
## Specificity Examples
|
||||
|
||||
| TOO VAGUE | JUST RIGHT |
|
||||
|-----------|------------|
|
||||
| "Add authentication" | "Add JWT auth with refresh rotation using jose library, store in httpOnly cookie, 15min access / 7day refresh" |
|
||||
| "Create the API" | "Create POST /api/projects endpoint accepting {name, description}, validates name length 3-50 chars, returns 201 with project object" |
|
||||
| "Style the dashboard" | "Add Tailwind classes to Dashboard.tsx: grid layout (3 cols on lg, 1 on mobile), card shadows, hover states on action buttons" |
|
||||
| "Handle errors" | "Wrap API calls in try/catch, return {error: string} on 4xx/5xx, show toast via sonner on client" |
|
||||
| "Set up the database" | "Add User and Project models to schema.prisma with UUID ids, email unique constraint, createdAt/updatedAt timestamps, run prisma db push" |
|
||||
|
||||
**Specificity test:** Could a different Claude instance execute the task without asking clarifying questions? If not, add more detail.
|
||||
|
||||
## Context Section Anti-Patterns
|
||||
|
||||
### Bad — Reflexive SUMMARY chaining
|
||||
|
||||
```markdown
|
||||
<context>
|
||||
@.planning/phases/01-foundation/01-01-SUMMARY.md
|
||||
@.planning/phases/01-foundation/01-02-SUMMARY.md <!-- Does Plan 02 actually need Plan 01's output? -->
|
||||
@.planning/phases/01-foundation/01-03-SUMMARY.md <!-- Chain grows, context bloats -->
|
||||
</context>
|
||||
```
|
||||
|
||||
**Why bad:** Plans are often independent. Reflexive chaining (02 refs 01, 03 refs 02...) wastes context. Only reference prior SUMMARY files when the plan genuinely uses types/exports from that prior plan or a decision from it affects the current plan.
|
||||
|
||||
### Good — Selective context
|
||||
|
||||
```markdown
|
||||
<context>
|
||||
@.planning/PROJECT.md
|
||||
@.planning/STATE.md
|
||||
@.planning/phases/01-foundation/01-01-SUMMARY.md <!-- Uses User type defined in Plan 01 -->
|
||||
</context>
|
||||
```
|
||||
|
||||
## Scope Reduction Anti-Patterns
|
||||
|
||||
**Prohibited language in task actions:**
|
||||
- "v1", "v2", "simplified version", "static for now", "hardcoded for now"
|
||||
- "future enhancement", "placeholder", "basic version", "minimal implementation"
|
||||
- "will be wired later", "dynamic in future phase", "skip for now"
|
||||
|
||||
If a decision from CONTEXT.md says "display cost calculated from billing table in impulses", the plan must deliver exactly that. Not "static label /min" as a "v1". If the phase is too complex, recommend a phase split instead of silently reducing scope.
|
||||
73
get-shit-done/references/planner-source-audit.md
Normal file
73
get-shit-done/references/planner-source-audit.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Planner Source Audit & Authority Limits
|
||||
|
||||
Reference for `agents/gsd-planner.md` — extended rules for multi-source coverage audits and planner authority constraints.
|
||||
|
||||
## Multi-Source Coverage Audit Format
|
||||
|
||||
Before finalizing plans, produce a **source audit** covering ALL four artifact types:
|
||||
|
||||
```
|
||||
SOURCE | ID | Feature/Requirement | Plan | Status | Notes
|
||||
--------- | ------- | ---------------------------- | ----- | --------- | ------
|
||||
GOAL | — | {phase goal from ROADMAP.md} | 01-03 | COVERED |
|
||||
REQ | REQ-14 | OAuth login with Google + GH | 02 | COVERED |
|
||||
REQ | REQ-22 | Email verification flow | 03 | COVERED |
|
||||
RESEARCH | — | Rate limiting on auth routes | 01 | COVERED |
|
||||
RESEARCH | — | Refresh token rotation | NONE | ⚠ MISSING | No plan covers this
|
||||
CONTEXT | D-01 | Use jose library for JWT | 02 | COVERED |
|
||||
CONTEXT | D-04 | 15min access / 7day refresh | 02 | COVERED |
|
||||
```
|
||||
|
||||
### Four Source Types
|
||||
|
||||
1. **GOAL** — The `goal:` field from ROADMAP.md for this phase. The primary success condition.
|
||||
2. **REQ** — Every REQ-ID in `phase_req_ids`. Cross-reference REQUIREMENTS.md for descriptions.
|
||||
3. **RESEARCH** — Technical approaches, discovered constraints, and features identified in RESEARCH.md. Exclude items explicitly marked "out of scope" or "future work" by the researcher.
|
||||
4. **CONTEXT** — Every D-XX decision from CONTEXT.md `<decisions>` section.
|
||||
|
||||
### What is NOT a Gap
|
||||
|
||||
Do not flag these as MISSING:
|
||||
- Items in `## Deferred Ideas` in CONTEXT.md — developer chose to defer these
|
||||
- Items scoped to a different phase via `phase_req_ids` — not assigned to this phase
|
||||
- Items in RESEARCH.md explicitly marked "out of scope" or "future work" by the researcher
|
||||
|
||||
### Handling MISSING Items
|
||||
|
||||
If ANY row is `⚠ MISSING`, do NOT finalize the plan set silently. Return to the orchestrator:
|
||||
|
||||
```
|
||||
## ⚠ Source Audit: Unplanned Items Found
|
||||
|
||||
The following items from source artifacts have no corresponding plan:
|
||||
|
||||
1. **{SOURCE}: {item description}** (from {artifact file}, section "{section}")
|
||||
- {why this was identified as required}
|
||||
|
||||
Options:
|
||||
A) Add a plan to cover this item
|
||||
B) Split phase: move to a sub-phase
|
||||
C) Defer explicitly: add to backlog with developer confirmation
|
||||
|
||||
→ Awaiting developer decision before finalizing plan set.
|
||||
```
|
||||
|
||||
If ALL rows are COVERED → return `## PLANNING COMPLETE` as normal.
|
||||
|
||||
---
|
||||
|
||||
## Authority Limits — Constraint Examples
|
||||
|
||||
The planner's only legitimate reasons to split or flag a feature are **constraints**, not judgments about difficulty:
|
||||
|
||||
**Valid (constraints):**
|
||||
- ✓ "This task touches 9 files and would consume ~45% context — split into two tasks"
|
||||
- ✓ "No API key or endpoint is defined in any source artifact — need developer input"
|
||||
- ✓ "This feature depends on the auth system built in Phase 03, which is not yet complete"
|
||||
|
||||
**Invalid (difficulty judgments):**
|
||||
- ✗ "This is complex and would be difficult to implement correctly"
|
||||
- ✗ "Integrating with an external service could take a long time"
|
||||
- ✗ "This is a challenging feature that might be better left to a future phase"
|
||||
|
||||
If a feature has none of the three legitimate constraints (context cost, missing information, dependency conflict), it gets planned. Period.
|
||||
@@ -35,6 +35,7 @@ Configuration options for `.planning/` directory behavior.
|
||||
| `git.quick_branch_template` | `null` | Optional branch template for quick-task runs |
|
||||
| `workflow.use_worktrees` | `true` | Whether executor agents run in isolated git worktrees. Set to `false` to disable worktrees — agents execute sequentially on the main working tree instead. Recommended for solo developers or when worktree merges cause issues. |
|
||||
| `workflow.subagent_timeout` | `300000` | Timeout in milliseconds for parallel subagent tasks (e.g. codebase mapping). Increase for large codebases or slower models. Default: 300000 (5 minutes). |
|
||||
| `workflow.inline_plan_threshold` | `2` | Plans with this many tasks or fewer execute inline (Pattern C) instead of spawning a subagent. Avoids ~14K token spawn overhead for small plans. Set to `0` to always spawn subagents. |
|
||||
| `manager.flags.discuss` | `""` | Flags passed to `/gsd-discuss-phase` when dispatched from manager (e.g. `"--auto --analyze"`) |
|
||||
| `manager.flags.plan` | `""` | Flags passed to plan workflow when dispatched from manager |
|
||||
| `manager.flags.execute` | `""` | Flags passed to execute workflow when dispatched from manager |
|
||||
@@ -247,6 +248,7 @@ Set via `workflow.*` namespace in config.json (e.g., `"workflow": { "research":
|
||||
| `workflow.plan_check` | boolean | `true` | `true`, `false` | Run plan-checker agent to validate plans. _Alias:_ `plan_checker` is the flat-key form used in `CONFIG_DEFAULTS`; `workflow.plan_check` is the canonical namespaced form. |
|
||||
| `workflow.verifier` | boolean | `true` | `true`, `false` | Run verifier agent after execution |
|
||||
| `workflow.nyquist_validation` | boolean | `true` | `true`, `false` | Enable Nyquist-inspired validation gates |
|
||||
| `workflow.auto_prune_state` | boolean | `false` | `true`, `false` | Automatically prune old STATE.md entries on phase completion (keeps 3 most recent phases) |
|
||||
| `workflow.auto_advance` | boolean | `false` | `true`, `false` | Auto-advance to next phase after completion |
|
||||
| `workflow.node_repair` | boolean | `true` | `true`, `false` | Attempt automatic repair of failed plan nodes |
|
||||
| `workflow.node_repair_budget` | number | `2` | Any positive integer | Max repair retries per failed node |
|
||||
@@ -259,6 +261,7 @@ Set via `workflow.*` namespace in config.json (e.g., `"workflow": { "research":
|
||||
| `workflow.skip_discuss` | boolean | `false` | `true`, `false` | Skip discuss phase entirely |
|
||||
| `workflow.use_worktrees` | boolean | `true` | `true`, `false` | Run executor agents in isolated git worktrees |
|
||||
| `workflow.subagent_timeout` | number | `300000` | Any positive integer (ms) | Timeout for parallel subagent tasks (default: 5 minutes) |
|
||||
| `workflow.inline_plan_threshold` | number | `2` | `0`–`10` | Plans with ≤N tasks execute inline instead of spawning a subagent |
|
||||
| `workflow.code_review` | boolean | `true` | `true`, `false` | Enable built-in code review step in the ship workflow |
|
||||
| `workflow.code_review_depth` | string | `"standard"` | `"light"`, `"standard"`, `"deep"` | Depth level for code review analysis in the ship workflow |
|
||||
| `workflow._auto_chain_active` | boolean | `false` | `true`, `false` | Internal: tracks whether autonomous chaining is active |
|
||||
|
||||
@@ -247,6 +247,73 @@ Both follow same format: `{type}({phase}-{plan}): {description}`
|
||||
- Consistent with overall commit strategy
|
||||
</commit_pattern>
|
||||
|
||||
<gate_enforcement>
|
||||
## Gate Enforcement Rules
|
||||
|
||||
When `workflow.tdd_mode` is enabled in config, the RED/GREEN/REFACTOR gate sequence is enforced for all `type: tdd` plans.
|
||||
|
||||
### Gate Definitions
|
||||
|
||||
| Gate | Required | Commit Pattern | Validation |
|
||||
|------|----------|---------------|------------|
|
||||
| RED | Yes | `test({phase}-{plan}): ...` | Test exists AND fails before implementation |
|
||||
| GREEN | Yes | `feat({phase}-{plan}): ...` | Test passes after implementation |
|
||||
| REFACTOR | No | `refactor({phase}-{plan}): ...` | Tests still pass after cleanup |
|
||||
|
||||
### Fail-Fast Rules
|
||||
|
||||
1. **Unexpected GREEN in RED phase:** If the test passes before any implementation code is written, STOP. The feature may already exist or the test is wrong. Investigate before proceeding.
|
||||
2. **Missing RED commit:** If no `test(...)` commit precedes the `feat(...)` commit, the TDD discipline was violated. Flag in SUMMARY.md.
|
||||
3. **REFACTOR breaks tests:** Undo the refactor immediately. Commit was premature — refactor in smaller steps.
|
||||
|
||||
### Executor Gate Validation
|
||||
|
||||
After completing a `type: tdd` plan, the executor validates the git log:
|
||||
```bash
|
||||
# Check for RED gate commit
|
||||
git log --oneline --grep="^test(${PHASE}-${PLAN})" | head -1
|
||||
# Check for GREEN gate commit
|
||||
git log --oneline --grep="^feat(${PHASE}-${PLAN})" | head -1
|
||||
# Check for optional REFACTOR gate commit
|
||||
git log --oneline --grep="^refactor(${PHASE}-${PLAN})" | head -1
|
||||
```
|
||||
|
||||
If RED or GREEN gate commits are missing, add a `## TDD Gate Compliance` section to SUMMARY.md with the violation details.
|
||||
</gate_enforcement>
|
||||
|
||||
<end_of_phase_review>
|
||||
## End-of-Phase TDD Review Checkpoint
|
||||
|
||||
When `workflow.tdd_mode` is enabled, the execute-phase orchestrator inserts a collaborative review checkpoint after all waves complete but before phase verification.
|
||||
|
||||
### Review Checkpoint Format
|
||||
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
TDD REVIEW — Phase {X}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
TDD Plans: {count} | Gate violations: {count}
|
||||
|
||||
| Plan | RED | GREEN | REFACTOR | Status |
|
||||
|------|-----|-------|----------|--------|
|
||||
| {id} | ✓ | ✓ | ✓ | Pass |
|
||||
| {id} | ✓ | ✗ | — | FAIL |
|
||||
|
||||
{If violations exist:}
|
||||
⚠ Gate violations are advisory — review before advancing.
|
||||
```
|
||||
|
||||
### What the Review Checks
|
||||
|
||||
1. **Gate sequence:** Each TDD plan has RED → GREEN commits in order
|
||||
2. **Test quality:** RED phase tests fail for the right reason (not import errors or syntax)
|
||||
3. **Minimal GREEN:** Implementation is minimal — no premature optimization in GREEN phase
|
||||
4. **Refactor discipline:** If REFACTOR commit exists, tests still pass
|
||||
|
||||
This checkpoint is advisory — it does not block phase completion but surfaces TDD discipline issues for human review.
|
||||
</end_of_phase_review>
|
||||
|
||||
<context_budget>
|
||||
## Context Budget
|
||||
|
||||
|
||||
@@ -11,7 +11,14 @@
|
||||
"security_asvs_level": 1,
|
||||
"security_block_on": "high",
|
||||
"discuss_mode": "discuss",
|
||||
"research_before_questions": false
|
||||
"research_before_questions": false,
|
||||
"code_review_command": null,
|
||||
"plan_bounce": false,
|
||||
"plan_bounce_script": null,
|
||||
"plan_bounce_passes": 2,
|
||||
"cross_ai_execution": false,
|
||||
"cross_ai_command": "",
|
||||
"cross_ai_timeout": 300
|
||||
},
|
||||
"planning": {
|
||||
"commit_docs": true,
|
||||
@@ -44,5 +51,6 @@
|
||||
"context_warnings": true
|
||||
},
|
||||
"project_code": null,
|
||||
"agent_skills": {}
|
||||
"agent_skills": {},
|
||||
"claude_md_path": "./CLAUDE.md"
|
||||
}
|
||||
|
||||
@@ -38,6 +38,18 @@ Template for `.planning/phases/XX-name/{phase_num}-RESEARCH.md` - comprehensive
|
||||
**If no CONTEXT.md exists:** Write "No user constraints - all decisions at Claude's discretion"
|
||||
</user_constraints>
|
||||
|
||||
<architectural_responsibility_map>
|
||||
## Architectural Responsibility Map
|
||||
|
||||
Map each phase capability to its standard architectural tier owner before diving into framework research. This prevents tier misassignment from propagating into plans.
|
||||
|
||||
| Capability | Primary Tier | Secondary Tier | Rationale |
|
||||
|------------|-------------|----------------|-----------|
|
||||
| [capability from phase description] | [Browser/Client, Frontend Server, API/Backend, CDN/Static, or Database/Storage] | [secondary tier or —] | [why this tier owns it] |
|
||||
|
||||
**If single-tier application:** Write "Single-tier application — all capabilities reside in [tier]" and omit the table.
|
||||
</architectural_responsibility_map>
|
||||
|
||||
<research_summary>
|
||||
## Summary
|
||||
|
||||
|
||||
@@ -113,6 +113,15 @@ Phase: "API documentation"
|
||||
|
||||
<answer_validation>
|
||||
**IMPORTANT: Answer validation** — After every AskUserQuestion call, check if the response is empty or whitespace-only. If so:
|
||||
|
||||
**Exception — "Other" with empty text:** If the user selected "Other" (or "Chat more") and the response body is empty or whitespace-only, this is NOT an empty answer — it is a signal that the user wants to type freeform input. In this case:
|
||||
1. Output a single plain-text line: "What would you like to discuss?"
|
||||
2. STOP generating. Do not call any tools. Do not output any further text.
|
||||
3. Wait for the user's next message.
|
||||
4. After receiving their message, reflect it back and continue.
|
||||
Do NOT retry the AskUserQuestion or generate more questions when "Other" is selected with empty text.
|
||||
|
||||
**All other empty responses:** If the response is empty or whitespace-only (and the user did NOT select "Other"):
|
||||
1. Retry the question once with the same parameters
|
||||
2. If still empty, present the options as a plain-text numbered list and ask the user to type their choice number
|
||||
Never proceed with an empty answer.
|
||||
|
||||
@@ -57,6 +57,8 @@ Parse `$ARGUMENTS` before loading any context:
|
||||
- First positional token → `PHASE_ARG`
|
||||
- Optional `--wave N` → `WAVE_FILTER`
|
||||
- Optional `--gaps-only` keeps its current meaning
|
||||
- Optional `--cross-ai` → `CROSS_AI_FORCE=true` (force all plans through cross-AI execution)
|
||||
- Optional `--no-cross-ai` → `CROSS_AI_DISABLED=true` (disable cross-AI for this run, overrides config and frontmatter)
|
||||
|
||||
If `--wave` is absent, preserve the current behavior of executing all incomplete waves in the phase.
|
||||
</step>
|
||||
@@ -93,6 +95,12 @@ When `CONTEXT_WINDOW >= 500000` (1M-class models), subagent prompts include rich
|
||||
- Verifier agents receive all PLAN.md, SUMMARY.md, CONTEXT.md files plus REQUIREMENTS.md
|
||||
- This enables cross-phase awareness and history-aware verification
|
||||
|
||||
When `CONTEXT_WINDOW < 200000` (sub-200K models), subagent prompts are thinned to reduce static overhead:
|
||||
- Executor agents omit extended deviation rule examples and checkpoint examples from inline prompt — load on-demand via @~/.claude/get-shit-done/references/executor-examples.md
|
||||
- Planner agents omit extended anti-pattern lists and specificity examples from inline prompt — load on-demand via @~/.claude/get-shit-done/references/planner-antipatterns.md
|
||||
- Core rules and decision logic remain inline; only verbose examples and edge-case lists are extracted
|
||||
- This reduces executor static overhead by ~40% while preserving behavioral correctness
|
||||
|
||||
**If `phase_found` is false:** Error — phase directory not found.
|
||||
**If `plan_count` is 0:** Error — no plans found in phase.
|
||||
**If `state_exists` is false but `.planning/` exists:** Offer reconstruct or continue.
|
||||
@@ -243,6 +251,77 @@ Report:
|
||||
```
|
||||
</step>
|
||||
|
||||
<step name="cross_ai_delegation">
|
||||
**Optional step 2.5 — Delegate plans to an external AI runtime.**
|
||||
|
||||
This step runs after plan discovery and before normal wave execution. It identifies plans
|
||||
that should be delegated to an external AI command and executes them via stdin-based prompt
|
||||
delivery. Plans handled here are removed from the execute_waves plan list so the normal
|
||||
executor skips them.
|
||||
|
||||
**Activation logic:**
|
||||
|
||||
1. If `CROSS_AI_DISABLED` is true (`--no-cross-ai` flag): skip this step entirely.
|
||||
2. If `CROSS_AI_FORCE` is true (`--cross-ai` flag): mark ALL incomplete plans for cross-AI execution.
|
||||
3. Otherwise: check each plan's frontmatter for `cross_ai: true` AND verify config
|
||||
`workflow.cross_ai_execution` is `true`. Plans matching both conditions are marked for cross-AI.
|
||||
|
||||
```bash
|
||||
CROSS_AI_ENABLED=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.cross_ai_execution --default false 2>/dev/null)
|
||||
CROSS_AI_CMD=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.cross_ai_command --default "" 2>/dev/null)
|
||||
CROSS_AI_TIMEOUT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.cross_ai_timeout --default 300 2>/dev/null)
|
||||
```
|
||||
|
||||
**If no plans are marked for cross-AI:** Skip to execute_waves.
|
||||
|
||||
**If plans are marked but `cross_ai_command` is empty:** Error — tell user to set
|
||||
`workflow.cross_ai_command` via `gsd-tools.cjs config-set workflow.cross_ai_command "<command>"`.
|
||||
|
||||
**For each cross-AI plan (sequentially):**
|
||||
|
||||
1. **Construct the task prompt** from the plan file:
|
||||
- Extract `<objective>` and `<tasks>` sections from the PLAN.md
|
||||
- Append PROJECT.md context (project name, description, tech stack)
|
||||
- Format as a self-contained execution prompt
|
||||
|
||||
2. **Check for dirty working tree before execution:**
|
||||
```bash
|
||||
if ! git diff --quiet HEAD 2>/dev/null; then
|
||||
echo "WARNING: dirty working tree detected — the external AI command may produce uncommitted changes that conflict with existing modifications"
|
||||
fi
|
||||
```
|
||||
|
||||
3. **Run the external command** from the project root, writing the prompt to stdin.
|
||||
Never shell-interpolate the prompt — always pipe via stdin to prevent injection:
|
||||
```bash
|
||||
echo "$TASK_PROMPT" | timeout "${CROSS_AI_TIMEOUT}s" ${CROSS_AI_CMD} > "$CANDIDATE_SUMMARY" 2>"$ERROR_LOG"
|
||||
EXIT_CODE=$?
|
||||
```
|
||||
|
||||
4. **Evaluate the result:**
|
||||
|
||||
**Success (exit 0 + valid summary):**
|
||||
- Read `$CANDIDATE_SUMMARY` and validate it contains meaningful content
|
||||
(not empty, has at least a heading and description — a valid SUMMARY.md structure)
|
||||
- Write it as the plan's SUMMARY.md file
|
||||
- Update STATE.md plan status to complete
|
||||
- Update ROADMAP.md progress
|
||||
- Mark plan as handled — skip it in execute_waves
|
||||
|
||||
**Failure (non-zero exit or invalid summary):**
|
||||
- Display the error output and exit code
|
||||
- Warn: "The external command may have left uncommitted changes or partial edits
|
||||
in the working tree. Review `git status` and `git diff` before proceeding."
|
||||
- Offer three choices:
|
||||
- **retry** — run the same plan through cross-AI again
|
||||
- **skip** — fall back to normal executor for this plan (re-add to execute_waves list)
|
||||
- **abort** — stop execution entirely, preserve state for resume
|
||||
|
||||
5. **After all cross-AI plans processed:** Remove successfully handled plans from the
|
||||
incomplete plan list so execute_waves skips them. Any skipped-to-fallback plans remain
|
||||
in the list for normal executor processing.
|
||||
</step>
|
||||
|
||||
<step name="execute_waves">
|
||||
Execute each selected wave in sequence. Within a wave: parallel if `PARALLELIZATION=true`, sequential if `false`.
|
||||
|
||||
@@ -382,6 +461,12 @@ Execute each selected wave in sequence. Within a wave: parallel if `PARALLELIZAT
|
||||
auto-detects worktree mode (`.git` is a file, not a directory) and skips
|
||||
shared file updates automatically. The orchestrator updates them centrally
|
||||
after merge.
|
||||
|
||||
REQUIRED: SUMMARY.md MUST be committed before you return. In worktree mode the
|
||||
git_commit_metadata step in execute-plan.md commits SUMMARY.md and REQUIREMENTS.md
|
||||
only (STATE.md and ROADMAP.md are excluded automatically). Do NOT skip or defer
|
||||
this commit — the orchestrator force-removes the worktree after you return, and
|
||||
any uncommitted SUMMARY.md will be permanently lost (#2070).
|
||||
</parallel_execution>
|
||||
|
||||
<execution_context>
|
||||
@@ -389,6 +474,7 @@ Execute each selected wave in sequence. Within a wave: parallel if `PARALLELIZAT
|
||||
@~/.claude/get-shit-done/templates/summary.md
|
||||
@~/.claude/get-shit-done/references/checkpoints.md
|
||||
@~/.claude/get-shit-done/references/tdd.md
|
||||
${CONTEXT_WINDOW < 200000 ? '' : '@~/.claude/get-shit-done/references/executor-examples.md'}
|
||||
</execution_context>
|
||||
|
||||
<files_to_read>
|
||||
@@ -556,6 +642,17 @@ Execute each selected wave in sequence. Within a wave: parallel if `PARALLELIZAT
|
||||
fi
|
||||
fi
|
||||
|
||||
# Safety net: commit any uncommitted SUMMARY.md before force-removing the worktree.
|
||||
# This guards against executors that skipped the git_commit_metadata step (#2070).
|
||||
UNCOMMITTED_SUMMARY=$(git -C "$WT" ls-files --modified --others --exclude-standard -- "*SUMMARY.md" 2>/dev/null || true)
|
||||
if [ -n "$UNCOMMITTED_SUMMARY" ]; then
|
||||
echo "⚠ SUMMARY.md was not committed by executor — committing now to prevent data loss"
|
||||
git -C "$WT" add -- "*SUMMARY.md" 2>/dev/null || true
|
||||
git -C "$WT" commit --no-verify -m "docs(recovery): rescue uncommitted SUMMARY.md before worktree removal (#2070)" 2>/dev/null || true
|
||||
# Re-merge the recovery commit
|
||||
git merge "$WT_BRANCH" --no-edit -m "chore: merge rescued SUMMARY.md from executor worktree ($WT_BRANCH)" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Remove the worktree
|
||||
git worktree remove "$WT" --force 2>/dev/null || true
|
||||
|
||||
@@ -822,6 +919,50 @@ If `SECURITY_CFG` is `true` AND SECURITY.md exists: check frontmatter `threats_o
|
||||
```
|
||||
</step>
|
||||
|
||||
<step name="tdd_review_checkpoint">
|
||||
**Optional step — TDD collaborative review.**
|
||||
|
||||
```bash
|
||||
TDD_MODE=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.tdd_mode --default false 2>/dev/null)
|
||||
```
|
||||
|
||||
**Skip if `TDD_MODE` is `false`.**
|
||||
|
||||
When `TDD_MODE` is `true`, check whether any completed plans in this phase have `type: tdd` in their frontmatter:
|
||||
|
||||
```bash
|
||||
TDD_PLANS=$(grep -rl "^type: tdd" "${PHASE_DIR}"/*-PLAN.md 2>/dev/null | wc -l | tr -d ' ')
|
||||
```
|
||||
|
||||
**If `TDD_PLANS` > 0:** Insert end-of-phase collaborative review checkpoint.
|
||||
|
||||
1. Collect all SUMMARY.md files for TDD plans
|
||||
2. For each TDD plan summary, verify the RED/GREEN/REFACTOR gate sequence:
|
||||
- RED gate: A failing test commit exists (`test(...)` commit with MUST-fail evidence)
|
||||
- GREEN gate: An implementation commit exists (`feat(...)` commit making tests pass)
|
||||
- REFACTOR gate: Optional cleanup commit (`refactor(...)` commit, tests still pass)
|
||||
3. If any TDD plan is missing the RED or GREEN gate commits, flag it:
|
||||
```
|
||||
⚠ TDD gate violation: Plan {plan_id} missing {RED|GREEN} phase commit.
|
||||
Expected commit pattern: test({phase}-{plan}): ... → feat({phase}-{plan}): ...
|
||||
```
|
||||
4. Present collaborative review summary:
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
TDD REVIEW — Phase {X}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
TDD Plans: {TDD_PLANS} | Gate violations: {count}
|
||||
|
||||
| Plan | RED | GREEN | REFACTOR | Status |
|
||||
|------|-----|-------|----------|--------|
|
||||
| {id} | ✓ | ✓ | ✓ | Pass |
|
||||
| {id} | ✓ | ✗ | — | FAIL |
|
||||
```
|
||||
|
||||
**Gate violations are advisory** — they do not block execution but are surfaced to the user for review. The verifier agent (step `verify_phase_goal`) will also check TDD discipline as part of its quality assessment.
|
||||
</step>
|
||||
|
||||
<step name="handle_partial_wave_execution">
|
||||
If `WAVE_FILTER` was used, re-run plan discovery after execution:
|
||||
|
||||
|
||||
@@ -61,10 +61,19 @@ PLAN_START_EPOCH=$(date +%s)
|
||||
|
||||
<step name="parse_segments">
|
||||
```bash
|
||||
# Count tasks — match <task tag at any indentation level
|
||||
TASK_COUNT=$(grep -cE '^\s*<task[[:space:]>]' .planning/phases/XX-name/{phase}-{plan}-PLAN.md 2>/dev/null || echo "0")
|
||||
INLINE_THRESHOLD=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.inline_plan_threshold --default 2 2>/dev/null || echo "2")
|
||||
grep -n "type=\"checkpoint" .planning/phases/XX-name/{phase}-{plan}-PLAN.md
|
||||
```
|
||||
|
||||
**Routing by checkpoint type:**
|
||||
**Primary routing: task count threshold (#1979)**
|
||||
|
||||
If `INLINE_THRESHOLD > 0` AND `TASK_COUNT <= INLINE_THRESHOLD`: Use Pattern C (inline) regardless of checkpoint type. Small plans execute faster inline — avoids ~14K token subagent spawn overhead and preserves prompt cache. Configure threshold via `workflow.inline_plan_threshold` (default: 2, set to `0` to always spawn subagents).
|
||||
|
||||
Otherwise: Apply checkpoint-based routing below.
|
||||
|
||||
**Checkpoint-based routing (plans with > threshold tasks):**
|
||||
|
||||
| Checkpoints | Pattern | Execution |
|
||||
|-------------|---------|-----------|
|
||||
|
||||
232
get-shit-done/workflows/extract_learnings.md
Normal file
232
get-shit-done/workflows/extract_learnings.md
Normal file
@@ -0,0 +1,232 @@
|
||||
<purpose>
|
||||
Extract decisions, lessons learned, patterns discovered, and surprises encountered from completed phase artifacts into a structured LEARNINGS.md file. Captures institutional knowledge that would otherwise be lost between phases.
|
||||
</purpose>
|
||||
|
||||
<required_reading>
|
||||
Read all files referenced by the invoking prompt's execution_context before starting.
|
||||
</required_reading>
|
||||
|
||||
<objective>
|
||||
Analyze completed phase artifacts (PLAN.md, SUMMARY.md, VERIFICATION.md, UAT.md, STATE.md) and extract structured learnings into 4 categories: decisions, lessons, patterns, and surprises. Each extracted item includes source attribution. The output is a LEARNINGS.md file with YAML frontmatter containing metadata about the extraction.
|
||||
</objective>
|
||||
|
||||
<process>
|
||||
|
||||
<step name="initialize">
|
||||
Parse arguments and load project state:
|
||||
|
||||
```bash
|
||||
INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE_ARG}")
|
||||
if [[ "$INIT" == @file:* ]]; then INIT=$(cat "${INIT#@file:}"); fi
|
||||
```
|
||||
|
||||
Parse from init JSON: `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `padded_phase`.
|
||||
|
||||
If phase not found, exit with error: "Phase {PHASE_ARG} not found."
|
||||
</step>
|
||||
|
||||
<step name="collect_artifacts">
|
||||
Read the phase artifacts. PLAN.md and SUMMARY.md are required; VERIFICATION.md, UAT.md, and STATE.md are optional.
|
||||
|
||||
**Required artifacts:**
|
||||
- `${PHASE_DIR}/*-PLAN.md` — all plan files for the phase
|
||||
- `${PHASE_DIR}/*-SUMMARY.md` — all summary files for the phase
|
||||
|
||||
If PLAN.md or SUMMARY.md files are not found or missing, exit with error: "Required artifacts missing. PLAN.md and SUMMARY.md are required for learning extraction."
|
||||
|
||||
**Optional artifacts (read if available, skip if not found):**
|
||||
- `${PHASE_DIR}/*-VERIFICATION.md` — verification results
|
||||
- `${PHASE_DIR}/*-UAT.md` — user acceptance test results
|
||||
- `.planning/STATE.md` — project state with decisions and blockers
|
||||
|
||||
Track which optional artifacts are missing for the `missing_artifacts` frontmatter field.
|
||||
</step>
|
||||
|
||||
<step name="extract_learnings">
|
||||
Analyze all collected artifacts and extract learnings into 4 categories:
|
||||
|
||||
### 1. Decisions
|
||||
Technical and architectural decisions made during the phase. Look for:
|
||||
- Explicit decisions documented in PLAN.md or SUMMARY.md
|
||||
- Technology choices and their rationale
|
||||
- Trade-offs that were evaluated
|
||||
- Design decisions recorded in STATE.md
|
||||
|
||||
Each decision entry must include:
|
||||
- **What** was decided
|
||||
- **Why** it was decided (rationale)
|
||||
- **Source:** attribution to the artifact where the decision was found (e.g., "Source: 03-01-PLAN.md")
|
||||
|
||||
### 2. Lessons
|
||||
Things learned during execution that were not known beforehand. Look for:
|
||||
- Unexpected complexity in SUMMARY.md
|
||||
- Issues discovered during verification in VERIFICATION.md
|
||||
- Failed approaches documented in SUMMARY.md
|
||||
- UAT feedback that revealed gaps
|
||||
|
||||
Each lesson entry must include:
|
||||
- **What** was learned
|
||||
- **Context** for the lesson
|
||||
- **Source:** attribution to the originating artifact
|
||||
|
||||
### 3. Patterns
|
||||
Reusable patterns, approaches, or techniques discovered. Look for:
|
||||
- Successful implementation patterns in SUMMARY.md
|
||||
- Testing patterns from VERIFICATION.md or UAT.md
|
||||
- Workflow patterns that worked well
|
||||
- Code organization patterns from PLAN.md
|
||||
|
||||
Each pattern entry must include:
|
||||
- **Pattern** name/description
|
||||
- **When to use** it
|
||||
- **Source:** attribution to the originating artifact
|
||||
|
||||
### 4. Surprises
|
||||
Unexpected findings, behaviors, or outcomes. Look for:
|
||||
- Things that took longer or shorter than estimated
|
||||
- Unexpected dependencies or interactions
|
||||
- Edge cases not anticipated in planning
|
||||
- Performance or behavior that differed from expectations
|
||||
|
||||
Each surprise entry must include:
|
||||
- **What** was surprising
|
||||
- **Impact** of the surprise
|
||||
- **Source:** attribution to the originating artifact
|
||||
</step>
|
||||
|
||||
<step name="capture_thought_integration">
|
||||
If the `capture_thought` tool is available in the current session, capture each extracted learning as a thought with metadata:
|
||||
|
||||
```
|
||||
capture_thought({
|
||||
category: "decision" | "lesson" | "pattern" | "surprise",
|
||||
phase: PHASE_NUMBER,
|
||||
content: LEARNING_TEXT,
|
||||
source: ARTIFACT_NAME
|
||||
})
|
||||
```
|
||||
|
||||
If `capture_thought` is not available (e.g., runtime does not support it), gracefully skip this step and continue. The LEARNINGS.md file is the primary output — capture_thought is a supplementary integration that provides a fallback for runtimes with thought capture support. The workflow must not fail or warn if capture_thought is unavailable.
|
||||
</step>
|
||||
|
||||
<step name="write_learnings">
|
||||
Write the LEARNINGS.md file to the phase directory. If a previous LEARNINGS.md exists, overwrite it (replace the file entirely).
|
||||
|
||||
Output path: `${PHASE_DIR}/${PADDED_PHASE}-LEARNINGS.md`
|
||||
|
||||
The file must have YAML frontmatter with these fields:
|
||||
```yaml
|
||||
---
|
||||
phase: {PHASE_NUMBER}
|
||||
phase_name: "{PHASE_NAME}"
|
||||
project: "{PROJECT_NAME}"
|
||||
generated: "{ISO_DATE}"
|
||||
counts:
|
||||
decisions: {N}
|
||||
lessons: {N}
|
||||
patterns: {N}
|
||||
surprises: {N}
|
||||
missing_artifacts:
|
||||
- "{ARTIFACT_NAME}"
|
||||
---
|
||||
```
|
||||
|
||||
The body follows this structure:
|
||||
```markdown
|
||||
# Phase {PHASE_NUMBER} Learnings: {PHASE_NAME}
|
||||
|
||||
## Decisions
|
||||
|
||||
### {Decision Title}
|
||||
{What was decided}
|
||||
|
||||
**Rationale:** {Why}
|
||||
**Source:** {artifact file}
|
||||
|
||||
---
|
||||
|
||||
## Lessons
|
||||
|
||||
### {Lesson Title}
|
||||
{What was learned}
|
||||
|
||||
**Context:** {context}
|
||||
**Source:** {artifact file}
|
||||
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
### {Pattern Name}
|
||||
{Description}
|
||||
|
||||
**When to use:** {applicability}
|
||||
**Source:** {artifact file}
|
||||
|
||||
---
|
||||
|
||||
## Surprises
|
||||
|
||||
### {Surprise Title}
|
||||
{What was surprising}
|
||||
|
||||
**Impact:** {impact description}
|
||||
**Source:** {artifact file}
|
||||
```
|
||||
</step>
|
||||
|
||||
<step name="update_state">
|
||||
Update STATE.md to reflect the learning extraction:
|
||||
|
||||
```bash
|
||||
node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" state update "Last Activity" "$(date +%Y-%m-%d)"
|
||||
```
|
||||
</step>
|
||||
|
||||
<step name="report">
|
||||
```
|
||||
---------------------------------------------------------------
|
||||
|
||||
## Learnings Extracted: Phase {X} — {Name}
|
||||
|
||||
Decisions: {N}
|
||||
Lessons: {N}
|
||||
Patterns: {N}
|
||||
Surprises: {N}
|
||||
Total: {N}
|
||||
|
||||
Output: {PHASE_DIR}/{PADDED_PHASE}-LEARNINGS.md
|
||||
|
||||
Missing artifacts: {list or "none"}
|
||||
|
||||
Next steps:
|
||||
- Review extracted learnings for accuracy
|
||||
- /gsd-progress — see overall project state
|
||||
- /gsd-execute-phase {next} — continue to next phase
|
||||
|
||||
---------------------------------------------------------------
|
||||
```
|
||||
</step>
|
||||
|
||||
</process>
|
||||
|
||||
<success_criteria>
|
||||
- [ ] Phase artifacts located and read successfully
|
||||
- [ ] All 4 categories extracted: decisions, lessons, patterns, surprises
|
||||
- [ ] Each extracted item has source attribution
|
||||
- [ ] LEARNINGS.md written with correct YAML frontmatter
|
||||
- [ ] Missing optional artifacts tracked in frontmatter
|
||||
- [ ] capture_thought integration attempted if tool available
|
||||
- [ ] STATE.md updated with extraction activity
|
||||
- [ ] User receives summary report
|
||||
</success_criteria>
|
||||
|
||||
<critical_rules>
|
||||
- PLAN.md and SUMMARY.md are required — exit with clear error if missing
|
||||
- VERIFICATION.md, UAT.md, and STATE.md are optional — extract from them if present, skip gracefully if not found
|
||||
- Every extracted learning must have source attribution back to the originating artifact
|
||||
- Running extract-learnings twice on the same phase must overwrite (replace) the previous LEARNINGS.md, not append
|
||||
- Do not fabricate learnings — only extract what is explicitly documented in artifacts
|
||||
- If capture_thought is unavailable, the workflow must not fail — graceful degradation to file-only output
|
||||
- LEARNINGS.md frontmatter must include counts for all 4 categories and list any missing_artifacts
|
||||
</critical_rules>
|
||||
@@ -202,7 +202,7 @@ Workspace created: $TARGET_PATH
|
||||
Branch: $BRANCH_NAME
|
||||
|
||||
Next steps:
|
||||
cd $TARGET_PATH
|
||||
cd "$TARGET_PATH"
|
||||
/gsd-new-project # Initialize GSD in the workspace
|
||||
```
|
||||
|
||||
@@ -215,7 +215,7 @@ Workspace created with $SUCCESS_COUNT of $TOTAL_COUNT repos: $TARGET_PATH
|
||||
Failed: repo3 (branch already exists), repo4 (not a git repo)
|
||||
|
||||
Next steps:
|
||||
cd $TARGET_PATH
|
||||
cd "$TARGET_PATH"
|
||||
/gsd-new-project # Initialize GSD in the workspace
|
||||
```
|
||||
|
||||
@@ -225,7 +225,7 @@ Use AskUserQuestion:
|
||||
- header: "Initialize GSD"
|
||||
- question: "Would you like to initialize a GSD project in the new workspace?"
|
||||
- options:
|
||||
- "Yes — run /gsd-new-project" → tell user to `cd $TARGET_PATH` first, then run `/gsd-new-project`
|
||||
- "Yes — run /gsd-new-project" → tell user to `cd "$TARGET_PATH"` first, then run `/gsd-new-project`
|
||||
- "No — I'll set it up later" → done
|
||||
|
||||
</process>
|
||||
|
||||
@@ -82,12 +82,56 @@ Use `--force` to bypass this check.
|
||||
```
|
||||
Exit.
|
||||
|
||||
**Consecutive-call guard:**
|
||||
After passing all gates, check a counter file `.planning/.next-call-count`:
|
||||
- If file exists and count >= 6: prompt "You've called /gsd-next {N} times consecutively. Continue? [y/N]"
|
||||
- If user says no, exit
|
||||
- Increment the counter
|
||||
- The counter file is deleted by any non-`/gsd-next` command (convention — other workflows don't need to implement this, the note here is sufficient)
|
||||
**Prior-phase completeness scan:**
|
||||
After passing all three hard-stop gates, scan all phases that precede the current phase in ROADMAP.md order for incomplete work. Use the existing `gsd-tools.cjs phase json <N>` output to inspect each prior phase.
|
||||
|
||||
Detect three categories of incomplete work:
|
||||
1. **Plans without summaries** — a PLAN.md exists in a prior phase directory but no matching SUMMARY.md exists (execution started but not completed).
|
||||
2. **Verification failures not overridden** — a prior phase has a VERIFICATION.md with `FAIL` items that have no override annotation.
|
||||
3. **CONTEXT.md without plans** — a prior phase directory has a CONTEXT.md but no PLAN.md files (discussion happened, planning never ran).
|
||||
|
||||
If no incomplete prior work is found, continue to `determine_next_action` silently with no interruption.
|
||||
|
||||
If incomplete prior work is found, show a structured completeness report:
|
||||
```
|
||||
⚠ Prior phase has incomplete work
|
||||
|
||||
Phase {N} — "{name}" has unresolved items:
|
||||
• Plan {N}-{M} ({slug}): executed but no SUMMARY.md
|
||||
[... additional items ...]
|
||||
|
||||
Advancing before resolving these may cause:
|
||||
• Verification gaps — future phase verification won't have visibility into what prior phases shipped
|
||||
• Context loss — plans that ran without summaries leave no record for future agents
|
||||
|
||||
Options:
|
||||
[C] Continue and defer these items to backlog
|
||||
[S] Stop and resolve manually (recommended)
|
||||
[F] Force advance without recording deferral
|
||||
|
||||
Choice [S]:
|
||||
```
|
||||
|
||||
**If the user chooses "Stop" (S or Enter/default):** Exit without routing.
|
||||
|
||||
**If the user chooses "Continue and defer" (C):**
|
||||
1. For each incomplete item, create a backlog entry in `ROADMAP.md` under `## Backlog` using the existing `999.x` numbering scheme:
|
||||
```markdown
|
||||
### Phase 999.{N}: Follow-up — Phase {src} incomplete plans (BACKLOG)
|
||||
|
||||
**Goal:** Resolve plans that ran without producing summaries during Phase {src} execution
|
||||
**Source phase:** {src}
|
||||
**Deferred at:** {date} during /gsd-next advancement to Phase {dest}
|
||||
**Plans:**
|
||||
- [ ] {N}-{M}: {slug} (ran, no SUMMARY.md)
|
||||
```
|
||||
2. Commit the deferral record:
|
||||
```bash
|
||||
node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: defer incomplete Phase {src} items to backlog"
|
||||
```
|
||||
3. Continue routing to `determine_next_action` immediately — no second prompt.
|
||||
|
||||
**If the user chooses "Force" (F):** Continue to `determine_next_action` without recording deferral.
|
||||
</step>
|
||||
|
||||
<step name="determine_next_action">
|
||||
|
||||
@@ -15,6 +15,7 @@ Read all files referenced by the invoking prompt's execution_context before star
|
||||
<available_agent_types>
|
||||
Valid GSD subagent types (use exact names — do not fall back to 'general-purpose'):
|
||||
- gsd-phase-researcher — Researches technical approaches for a phase
|
||||
- gsd-pattern-mapper — Analyzes codebase for existing patterns, produces PATTERNS.md
|
||||
- gsd-planner — Creates detailed plans from phase scope
|
||||
- gsd-plan-checker — Reviews plan quality before execution
|
||||
</available_agent_types>
|
||||
@@ -32,9 +33,12 @@ AGENT_SKILLS_RESEARCHER=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" a
|
||||
AGENT_SKILLS_PLANNER=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" agent-skills gsd-planner 2>/dev/null)
|
||||
AGENT_SKILLS_CHECKER=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" agent-skills gsd-checker 2>/dev/null)
|
||||
CONTEXT_WINDOW=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get context_window 2>/dev/null || echo "200000")
|
||||
TDD_MODE=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.tdd_mode 2>/dev/null || echo "false")
|
||||
```
|
||||
|
||||
When `CONTEXT_WINDOW >= 500000`, the planner prompt includes prior phase CONTEXT.md files so cross-phase decisions are consistent (e.g., "use library X for all data fetching" from Phase 2 is visible to Phase 5's planner).
|
||||
When `TDD_MODE` is `true`, the planner agent is instructed to apply `type: tdd` to eligible tasks using heuristics from `references/tdd.md`. The planner's `<required_reading>` is extended to include `@~/.claude/get-shit-done/references/tdd.md` so gate enforcement rules are available during planning.
|
||||
|
||||
When `CONTEXT_WINDOW >= 500000`, the planner prompt includes the 3 most recent prior phase CONTEXT.md and SUMMARY.md files PLUS any phases explicitly listed in the current phase's `Depends on:` field in ROADMAP.md. Explicit dependencies always load regardless of recency (e.g., Phase 7 declaring `Depends on: Phase 2` always sees Phase 2's context). Bounded recency keeps the planner's context budget focused on recent work.
|
||||
|
||||
Parse JSON for: `researcher_model`, `planner_model`, `checker_model`, `research_enabled`, `plan_checker_enabled`, `nyquist_validation_enabled`, `commit_docs`, `text_mode`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `phase_slug`, `padded_phase`, `has_research`, `has_context`, `has_reviews`, `has_plans`, `plan_count`, `planning_exists`, `roadmap_exists`, `phase_req_ids`, `response_language`.
|
||||
|
||||
@@ -46,7 +50,7 @@ Parse JSON for: `researcher_model`, `planner_model`, `checker_model`, `research_
|
||||
|
||||
## 2. Parse and Normalize Arguments
|
||||
|
||||
Extract from $ARGUMENTS: phase number (integer or decimal like `2.1`), flags (`--research`, `--skip-research`, `--gaps`, `--skip-verify`, `--skip-ui`, `--prd <filepath>`, `--reviews`, `--text`).
|
||||
Extract from $ARGUMENTS: phase number (integer or decimal like `2.1`), flags (`--research`, `--skip-research`, `--gaps`, `--skip-verify`, `--skip-ui`, `--prd <filepath>`, `--reviews`, `--text`, `--bounce`, `--skip-bounce`).
|
||||
|
||||
Set `TEXT_MODE=true` if `--text` is present in $ARGUMENTS OR `text_mode` from init JSON is `true`. When `TEXT_MODE` is active, replace every `AskUserQuestion` call with a plain-text numbered list and ask the user to type their choice number. This is required for Claude Code remote sessions (`/rc` mode) where TUI menus don't work through the Claude App.
|
||||
|
||||
@@ -588,6 +592,7 @@ VERIFICATION_PATH=$(_gsd_field "$INIT" verification_path)
|
||||
UAT_PATH=$(_gsd_field "$INIT" uat_path)
|
||||
CONTEXT_PATH=$(_gsd_field "$INIT" context_path)
|
||||
REVIEWS_PATH=$(_gsd_field "$INIT" reviews_path)
|
||||
PATTERNS_PATH=$(_gsd_field "$INIT" patterns_path)
|
||||
```
|
||||
|
||||
## 7.5. Verify Nyquist Artifacts
|
||||
@@ -611,7 +616,66 @@ If missing and Nyquist is still enabled/applicable — ask user:
|
||||
`node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-set workflow.nyquist_validation false`
|
||||
3. Continue anyway (plans fail Dimension 8)
|
||||
|
||||
Proceed to Step 8 only if user selects 2 or 3.
|
||||
Proceed to Step 7.8 (or Step 8 if pattern mapper is disabled) only if user selects 2 or 3.
|
||||
|
||||
## 7.8. Spawn gsd-pattern-mapper Agent (Optional)
|
||||
|
||||
**Skip if** `workflow.pattern_mapper` is explicitly set to `false` in config.json (absent key = enabled). Also skip if no CONTEXT.md and no RESEARCH.md exist for this phase (nothing to extract file lists from).
|
||||
|
||||
Check config:
|
||||
```bash
|
||||
PATTERN_MAPPER_CFG=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.pattern_mapper --default true 2>/dev/null)
|
||||
```
|
||||
|
||||
**If `PATTERN_MAPPER_CFG` is `false`:** Skip to step 8.
|
||||
|
||||
**If PATTERNS.md already exists** (`PATTERNS_PATH` is non-empty from step 7): Skip to step 8 (use existing).
|
||||
|
||||
Display banner:
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
GSD ► PATTERN MAPPING PHASE {X}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
◆ Spawning pattern mapper...
|
||||
```
|
||||
|
||||
Pattern mapper prompt:
|
||||
|
||||
```markdown
|
||||
<pattern_mapping_context>
|
||||
**Phase:** {phase_number} - {phase_name}
|
||||
**Phase directory:** {phase_dir}
|
||||
**Padded phase:** {padded_phase}
|
||||
|
||||
<files_to_read>
|
||||
- {context_path} (USER DECISIONS from /gsd-discuss-phase)
|
||||
- {research_path} (Technical Research)
|
||||
</files_to_read>
|
||||
|
||||
**Output file:** {phase_dir}/{padded_phase}-PATTERNS.md
|
||||
|
||||
Extract the list of files to be created/modified from CONTEXT.md and RESEARCH.md. For each file, classify by role and data flow, find the closest existing analog in the codebase, extract concrete code excerpts, and produce PATTERNS.md.
|
||||
</pattern_mapping_context>
|
||||
```
|
||||
|
||||
Spawn with:
|
||||
```
|
||||
Task(
|
||||
prompt="{above}",
|
||||
subagent_type="gsd-pattern-mapper",
|
||||
model="{researcher_model}",
|
||||
)
|
||||
```
|
||||
|
||||
**Handle return:**
|
||||
- **`## PATTERN MAPPING COMPLETE`:** Update `PATTERNS_PATH` to the created file path, continue to step 8.
|
||||
- **Any error or empty return:** Log warning, continue to step 8 without patterns (non-blocking).
|
||||
|
||||
After pattern mapper completes, update the path variable:
|
||||
```bash
|
||||
PATTERNS_PATH="${PHASE_DIR}/${PADDED_PHASE}-PATTERNS.md"
|
||||
```
|
||||
|
||||
## 8. Spawn gsd-planner Agent
|
||||
|
||||
@@ -637,14 +701,17 @@ Planner prompt:
|
||||
- {requirements_path} (Requirements)
|
||||
- {context_path} (USER DECISIONS from /gsd-discuss-phase)
|
||||
- {research_path} (Technical Research)
|
||||
- {PATTERNS_PATH} (Pattern Map — analog files and code excerpts, if exists)
|
||||
- {verification_path} (Verification Gaps - if --gaps)
|
||||
- {uat_path} (UAT Gaps - if --gaps)
|
||||
- {reviews_path} (Cross-AI Review Feedback - if --reviews)
|
||||
- {UI_SPEC_PATH} (UI Design Contract — visual/interaction specs, if exists)
|
||||
${CONTEXT_WINDOW >= 500000 ? `
|
||||
**Cross-phase context (1M model enrichment):**
|
||||
- Prior phase CONTEXT.md files (locked decisions from earlier phases — maintain consistency)
|
||||
- Prior phase SUMMARY.md files (what was actually built — reuse patterns, avoid duplication)
|
||||
- CONTEXT.md files from the 3 most recent completed phases (locked decisions — maintain consistency)
|
||||
- SUMMARY.md files from the 3 most recent completed phases (what was built — reuse patterns, avoid duplication)
|
||||
- CONTEXT.md and SUMMARY.md from any phases listed in the current phase's "Depends on:" field in ROADMAP.md (regardless of recency — explicit dependencies always load, deduplicated against the 3 most recent)
|
||||
- Skip all other prior phases to stay within context budget
|
||||
` : ''}
|
||||
</files_to_read>
|
||||
|
||||
@@ -655,6 +722,16 @@ ${AGENT_SKILLS_PLANNER}
|
||||
**Project instructions:** Read ./CLAUDE.md if exists — follow project-specific guidelines
|
||||
**Project skills:** Check .claude/skills/ or .agents/skills/ directory (if either exists) — read SKILL.md files, plans should account for project skill rules
|
||||
|
||||
${TDD_MODE === 'true' ? `
|
||||
<tdd_mode_active>
|
||||
**TDD Mode is ENABLED.** Apply TDD heuristics from @~/.claude/get-shit-done/references/tdd.md to all eligible tasks:
|
||||
- Business logic with defined I/O → type: tdd
|
||||
- API endpoints with request/response contracts → type: tdd
|
||||
- Data transformations, validation, algorithms → type: tdd
|
||||
- UI, config, glue code, CRUD → standard plan (type: execute)
|
||||
Each TDD plan gets one feature with RED/GREEN/REFACTOR gate sequence.
|
||||
</tdd_mode_active>
|
||||
` : ''}
|
||||
</planning_context>
|
||||
|
||||
<downstream_consumer>
|
||||
@@ -719,41 +796,70 @@ Task(
|
||||
## 9. Handle Planner Return
|
||||
|
||||
- **`## PLANNING COMPLETE`:** Display plan count. If `--skip-verify` or `plan_checker_enabled` is false (from init): skip to step 13. Otherwise: step 10.
|
||||
- **`## PHASE SPLIT RECOMMENDED`:** The planner determined the phase is too complex to implement all user decisions without simplifying them. Handle in step 9b.
|
||||
- **`## PHASE SPLIT RECOMMENDED`:** The planner determined the phase exceeds the context budget for full-fidelity implementation of all source items. Handle in step 9b.
|
||||
- **`## ⚠ Source Audit: Unplanned Items Found`:** The planner's multi-source coverage audit found items from REQUIREMENTS.md, RESEARCH.md, ROADMAP goal, or CONTEXT.md decisions that are not covered by any plan. Handle in step 9c.
|
||||
- **`## CHECKPOINT REACHED`:** Present to user, get response, spawn continuation (step 12)
|
||||
- **`## PLANNING INCONCLUSIVE`:** Show attempts, offer: Add context / Retry / Manual
|
||||
|
||||
## 9b. Handle Phase Split Recommendation
|
||||
|
||||
When the planner returns `## PHASE SPLIT RECOMMENDED`, it means the phase has too many decisions to implement at full fidelity within the plan budget. The planner proposes groupings.
|
||||
When the planner returns `## PHASE SPLIT RECOMMENDED`, it means the phase's source items exceed the context budget for full-fidelity implementation. The planner proposes groupings.
|
||||
|
||||
**Extract from planner return:**
|
||||
- Proposed sub-phases (e.g., "17a: processing core (D-01 to D-19)", "17b: billing + config UX (D-20 to D-27)")
|
||||
- Which D-XX decisions go in each sub-phase
|
||||
- Why the split is necessary (decision count, complexity estimate)
|
||||
- Which source items (REQ-IDs, D-XX decisions, RESEARCH items) go in each sub-phase
|
||||
- Why the split is necessary (context cost estimate, file count)
|
||||
|
||||
**Present to user:**
|
||||
```
|
||||
## Phase {X} is too complex for full-fidelity implementation
|
||||
## Phase {X} exceeds context budget for full-fidelity implementation
|
||||
|
||||
The planner found {N} decisions that cannot all be implemented without
|
||||
simplifying some. Instead of reducing your decisions, we recommend splitting:
|
||||
The planner found {N} source items that exceed the context budget when
|
||||
planned at full fidelity. Instead of reducing scope, we recommend splitting:
|
||||
|
||||
**Option 1: Split into sub-phases**
|
||||
- Phase {X}a: {name} — {D-XX to D-YY} ({N} decisions)
|
||||
- Phase {X}b: {name} — {D-XX to D-YY} ({M} decisions)
|
||||
- Phase {X}a: {name} — {items} ({N} source items, ~{P}% context)
|
||||
- Phase {X}b: {name} — {items} ({M} source items, ~{Q}% context)
|
||||
|
||||
**Option 2: Proceed anyway** (planner will attempt all, quality may degrade)
|
||||
**Option 2: Proceed anyway** (planner will attempt all, quality may degrade past 50% context)
|
||||
|
||||
**Option 3: Prioritize** — you choose which decisions to implement now,
|
||||
**Option 3: Prioritize** — you choose which items to implement now,
|
||||
rest become a follow-up phase
|
||||
```
|
||||
|
||||
Use AskUserQuestion with these 3 options.
|
||||
|
||||
**If "Split":** Use `/gsd-insert-phase` to create the sub-phases, then replan each.
|
||||
**If "Proceed":** Return to planner with instruction to attempt all decisions at full fidelity, accepting more plans/tasks.
|
||||
**If "Prioritize":** Use AskUserQuestion (multiSelect) to let user pick which D-XX are "now" vs "later". Create CONTEXT.md for each sub-phase with the selected decisions.
|
||||
**If "Proceed":** Return to planner with instruction to attempt all items at full fidelity, accepting more plans/tasks.
|
||||
**If "Prioritize":** Use AskUserQuestion (multiSelect) to let user pick which items are "now" vs "later". Create CONTEXT.md for each sub-phase with the selected items.
|
||||
|
||||
## 9c. Handle Source Audit Gaps
|
||||
|
||||
When the planner returns `## ⚠ Source Audit: Unplanned Items Found`, it means items from REQUIREMENTS.md, RESEARCH.md, ROADMAP goal, or CONTEXT.md decisions have no corresponding plan.
|
||||
|
||||
**Extract from planner return:**
|
||||
- Each unplanned item with its source artifact and section
|
||||
- The planner's suggested options (A: add plan, B: split phase, C: defer with confirmation)
|
||||
|
||||
**Present each gap to user.** For each unplanned item:
|
||||
|
||||
```
|
||||
## ⚠ Unplanned: {item description}
|
||||
|
||||
Source: {RESEARCH.md / REQUIREMENTS.md / ROADMAP goal / CONTEXT.md}
|
||||
Details: {why the planner flagged this}
|
||||
|
||||
Options:
|
||||
1. Add a plan to cover this item (recommended)
|
||||
2. Split phase — move to a sub-phase with related items
|
||||
3. Defer — add to backlog (developer confirms this is intentional)
|
||||
```
|
||||
|
||||
Use AskUserQuestion for each gap (or batch if multiple gaps).
|
||||
|
||||
**If "Add plan":** Return to planner (step 8) with instruction to add plans covering the missing items, preserving existing plans.
|
||||
**If "Split":** Use `/gsd-insert-phase` for overflow items, then replan.
|
||||
**If "Defer":** Record in CONTEXT.md `## Deferred Ideas` with developer's confirmation. Proceed to step 10.
|
||||
|
||||
## 10. Spawn gsd-plan-checker Agent
|
||||
|
||||
@@ -901,6 +1007,77 @@ Display: `Max iterations reached. {N} issues remain:` + issue list
|
||||
|
||||
Offer: 1) Force proceed, 2) Provide guidance and retry, 3) Abandon
|
||||
|
||||
## 12.5. Plan Bounce (Optional External Refinement)
|
||||
|
||||
**Skip if:** `--skip-bounce` flag, `--gaps` flag, or bounce is not activated.
|
||||
|
||||
**Activation:** Bounce runs when `--bounce` flag is present OR `workflow.plan_bounce` config is `true`. The `--skip-bounce` flag always wins (disables bounce even if config enables it). The `--gaps` flag also disables bounce (gap-closure mode should not modify plans externally).
|
||||
|
||||
**Prerequisites:** `workflow.plan_bounce_script` must be set to a valid script path. If bounce is activated but no script is configured, display warning and skip:
|
||||
```
|
||||
⚠ Plan bounce activated but no script configured.
|
||||
Set workflow.plan_bounce_script to the path of your refinement script.
|
||||
Skipping bounce step.
|
||||
```
|
||||
|
||||
**Read pass count:**
|
||||
```bash
|
||||
BOUNCE_PASSES=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.plan_bounce_passes --default 2)
|
||||
BOUNCE_SCRIPT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.plan_bounce_script)
|
||||
```
|
||||
|
||||
Display banner:
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
GSD ► BOUNCING PLANS (External Refinement)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Script: ${BOUNCE_SCRIPT}
|
||||
Max passes: ${BOUNCE_PASSES}
|
||||
```
|
||||
|
||||
**For each PLAN.md file in the phase directory:**
|
||||
|
||||
1. **Backup:** Copy `*-PLAN.md` to `*-PLAN.pre-bounce.md`
|
||||
```bash
|
||||
cp "${PLAN_FILE}" "${PLAN_FILE%.md}.pre-bounce.md"
|
||||
```
|
||||
|
||||
2. **Invoke bounce script:**
|
||||
```bash
|
||||
"${BOUNCE_SCRIPT}" "${PLAN_FILE}" "${BOUNCE_PASSES}"
|
||||
```
|
||||
|
||||
3. **Validate bounced plan — YAML frontmatter integrity:**
|
||||
After the script returns, check that the bounced file still has valid YAML frontmatter (opening and closing `---` delimiters with parseable content between them). If the bounced plan breaks YAML frontmatter validation, restore the original from the pre-bounce.md backup and continue to the next plan:
|
||||
```
|
||||
⚠ Bounced plan ${PLAN_FILE} has broken YAML frontmatter — restoring original from pre-bounce backup.
|
||||
```
|
||||
|
||||
4. **Handle script failure:** If the bounce script exits non-zero, restore the original plan from the pre-bounce.md backup and continue to the next plan:
|
||||
```
|
||||
⚠ Bounce script failed for ${PLAN_FILE} (exit code ${EXIT_CODE}) — restoring original from pre-bounce backup.
|
||||
```
|
||||
|
||||
**After all plans are bounced:**
|
||||
|
||||
5. **Re-run plan checker on bounced plans:** Spawn gsd-plan-checker (same as step 10) on all modified plans. If a bounced plan fails the checker, restore original from its pre-bounce.md backup:
|
||||
```
|
||||
⚠ Bounced plan ${PLAN_FILE} failed checker validation — restoring original from pre-bounce backup.
|
||||
```
|
||||
|
||||
6. **Commit surviving bounced plans:** If at least one plan survived both the frontmatter validation and the checker re-run, commit the changes:
|
||||
```bash
|
||||
node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "refactor(${padded_phase}): bounce plans through external refinement" --files "${PHASE_DIR}/*-PLAN.md"
|
||||
```
|
||||
|
||||
Display summary:
|
||||
```
|
||||
Plan bounce complete: {survived}/{total} plans refined
|
||||
```
|
||||
|
||||
**Clean up:** Remove all `*-PLAN.pre-bounce.md` backup files after the bounce step completes (whether plans survived or were restored).
|
||||
|
||||
## 13. Requirements Coverage Gate
|
||||
|
||||
After plans pass the checker (or checker is skipped), verify that all phase requirements are covered by at least one plan.
|
||||
|
||||
@@ -43,7 +43,7 @@ Cannot remove workspace "$WORKSPACE_NAME" — the following repos have uncommitt
|
||||
- repo2
|
||||
|
||||
Commit or stash changes in these repos before removing the workspace:
|
||||
cd $WORKSPACE_PATH/repo1
|
||||
cd "$WORKSPACE_PATH/repo1"
|
||||
git stash # or git commit
|
||||
```
|
||||
|
||||
|
||||
@@ -56,6 +56,9 @@ Determine which CLI to skip based on the current runtime environment:
|
||||
if [ "$ANTIGRAVITY_AGENT" = "1" ]; then
|
||||
# Antigravity is a separate client — all CLIs are external, skip none
|
||||
SELF_CLI="none"
|
||||
elif [ -n "$CURSOR_SESSION_ID" ]; then
|
||||
# Running inside Cursor agent — skip cursor for independence
|
||||
SELF_CLI="cursor"
|
||||
elif [ -n "$CLAUDE_CODE_ENTRYPOINT" ]; then
|
||||
# Running inside Claude Code CLI — skip claude for independence
|
||||
SELF_CLI="claude"
|
||||
@@ -275,6 +278,18 @@ plans_reviewed: [{list of PLAN.md files}]
|
||||
|
||||
---
|
||||
|
||||
## Qwen Review
|
||||
|
||||
{qwen review content}
|
||||
|
||||
---
|
||||
|
||||
## Cursor Review
|
||||
|
||||
{cursor review content}
|
||||
|
||||
---
|
||||
|
||||
## Consensus Summary
|
||||
|
||||
{synthesize common concerns across all reviewers}
|
||||
|
||||
@@ -159,6 +159,68 @@ Report: "PR #{number} created: {url}"
|
||||
</step>
|
||||
|
||||
<step name="optional_review">
|
||||
|
||||
**External code review command (automated sub-step):**
|
||||
|
||||
Before prompting the user, check if an external review command is configured:
|
||||
|
||||
```bash
|
||||
REVIEW_CMD=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-get workflow.code_review_command --default "" 2>/dev/null)
|
||||
```
|
||||
|
||||
If `REVIEW_CMD` is non-empty and not `"null"`, run the external review:
|
||||
|
||||
1. **Generate diff and stats:**
|
||||
```bash
|
||||
DIFF=$(git diff ${BASE_BRANCH}...HEAD)
|
||||
DIFF_STATS=$(git diff --stat ${BASE_BRANCH}...HEAD)
|
||||
```
|
||||
|
||||
2. **Load phase context from STATE.md:**
|
||||
```bash
|
||||
STATE_STATUS=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" state load 2>/dev/null | head -20)
|
||||
```
|
||||
|
||||
3. **Build review prompt and pipe to command via stdin:**
|
||||
Construct a review prompt containing the diff, diff stats, and phase context, then pipe it to the configured command:
|
||||
```bash
|
||||
REVIEW_PROMPT="You are reviewing a pull request.\n\nDiff stats:\n${DIFF_STATS}\n\nPhase context:\n${STATE_STATUS}\n\nFull diff:\n${DIFF}\n\nRespond with JSON: { \"verdict\": \"APPROVED\" or \"REVISE\", \"confidence\": 0-100, \"summary\": \"...\", \"issues\": [{\"severity\": \"...\", \"file\": \"...\", \"line_range\": \"...\", \"description\": \"...\", \"suggestion\": \"...\"}] }"
|
||||
REVIEW_OUTPUT=$(echo "${REVIEW_PROMPT}" | timeout 120 ${REVIEW_CMD} 2>/tmp/gsd-review-stderr.log)
|
||||
REVIEW_EXIT=$?
|
||||
```
|
||||
|
||||
4. **Handle timeout (120s) and failure:**
|
||||
If `REVIEW_EXIT` is non-zero or the command times out:
|
||||
```bash
|
||||
if [ $REVIEW_EXIT -ne 0 ]; then
|
||||
REVIEW_STDERR=$(cat /tmp/gsd-review-stderr.log 2>/dev/null)
|
||||
echo "WARNING: External review command failed (exit ${REVIEW_EXIT}). stderr: ${REVIEW_STDERR}"
|
||||
echo "Continuing with manual review flow..."
|
||||
fi
|
||||
```
|
||||
On failure, warn with stderr output and fall through to the manual review flow below.
|
||||
|
||||
5. **Parse JSON result:**
|
||||
If the command succeeded, parse the JSON output and report the verdict:
|
||||
```bash
|
||||
# Parse verdict and summary from REVIEW_OUTPUT JSON
|
||||
VERDICT=$(echo "${REVIEW_OUTPUT}" | node -e "
|
||||
let d=''; process.stdin.on('data',c=>d+=c); process.stdin.on('end',()=>{
|
||||
try { const r=JSON.parse(d); console.log(r.verdict); }
|
||||
catch(e) { console.log('INVALID_JSON'); }
|
||||
});
|
||||
")
|
||||
```
|
||||
- If `verdict` is `"APPROVED"`: report approval with confidence and summary.
|
||||
- If `verdict` is `"REVISE"`: report issues found, list each issue with severity, file, line_range, description, and suggestion.
|
||||
- If JSON is invalid (`INVALID_JSON`): warn "External review returned invalid JSON" with stderr and continue.
|
||||
|
||||
Regardless of the external review result, fall through to the manual review options below.
|
||||
|
||||
---
|
||||
|
||||
**Manual review options:**
|
||||
|
||||
Ask if user wants to trigger a code review:
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
const WARNING_THRESHOLD = 35; // remaining_percentage <= 35%
|
||||
const CRITICAL_THRESHOLD = 25; // remaining_percentage <= 25%
|
||||
@@ -128,6 +129,32 @@ process.stdin.on('end', () => {
|
||||
// Detect if GSD is active (has .planning/STATE.md in working directory)
|
||||
const isGsdActive = fs.existsSync(path.join(cwd, '.planning', 'STATE.md'));
|
||||
|
||||
// On CRITICAL with active GSD project, auto-record session state as a
|
||||
// breadcrumb for /gsd-resume-work (#1974). Fire-and-forget subprocess —
|
||||
// doesn't block the hook or the agent. Fires ONCE per CRITICAL session,
|
||||
// guarded by warnData.criticalRecorded to prevent repeated overwrites
|
||||
// of the "crash moment" record on every debounce cycle.
|
||||
if (isCritical && isGsdActive && !warnData.criticalRecorded) {
|
||||
try {
|
||||
// Runtime-agnostic path: this hook lives at <runtime-config>/hooks/
|
||||
// and gsd-tools.cjs lives at <runtime-config>/get-shit-done/bin/.
|
||||
// Using __dirname makes this work on Claude Code, OpenCode, Gemini,
|
||||
// Kilo, etc. without hardcoding ~/.claude/.
|
||||
const gsdTools = path.join(__dirname, '..', 'get-shit-done', 'bin', 'gsd-tools.cjs');
|
||||
// Coerce usedPct to a safe number in case bridge file is malformed
|
||||
const safeUsedPct = Number(usedPct) || 0;
|
||||
const stoppedAt = `context exhaustion at ${safeUsedPct}% (${new Date().toISOString().split('T')[0]})`;
|
||||
spawn(
|
||||
process.execPath,
|
||||
[gsdTools, 'state', 'record-session', '--stopped-at', stoppedAt],
|
||||
{ cwd, detached: true, stdio: 'ignore' }
|
||||
).unref();
|
||||
warnData.criticalRecorded = true;
|
||||
// Persist the sentinel so subsequent debounce cycles don't re-fire
|
||||
fs.writeFileSync(warnPath, JSON.stringify(warnData));
|
||||
} catch { /* non-critical — don't let state recording break the hook */ }
|
||||
}
|
||||
|
||||
// Build advisory warning message (never use imperative commands that
|
||||
// override user preferences — see #884)
|
||||
let message;
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "get-shit-done-cc",
|
||||
"version": "1.34.2",
|
||||
"version": "1.35.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "get-shit-done-cc",
|
||||
"version": "1.34.2",
|
||||
"version": "1.35.0",
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"get-shit-done-cc": "bin/install.js"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "get-shit-done-cc",
|
||||
"version": "1.34.2",
|
||||
"version": "1.35.0",
|
||||
"description": "A meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini and Codex by TÂCHES.",
|
||||
"bin": {
|
||||
"get-shit-done-cc": "bin/install.js"
|
||||
|
||||
@@ -15,6 +15,7 @@ import { GSD } from './index.js';
|
||||
import { CLITransport } from './cli-transport.js';
|
||||
import { WSTransport } from './ws-transport.js';
|
||||
import { InitRunner } from './init-runner.js';
|
||||
import { validateWorkstreamName } from './workstream-utils.js';
|
||||
|
||||
// ─── Parsed CLI args ─────────────────────────────────────────────────────────
|
||||
|
||||
@@ -29,6 +30,8 @@ export interface ParsedCliArgs {
|
||||
wsPort: number | undefined;
|
||||
model: string | undefined;
|
||||
maxBudget: number | undefined;
|
||||
/** Workstream name for multi-workstream projects. Routes .planning/ to .planning/workstreams/<name>/. */
|
||||
ws: string | undefined;
|
||||
help: boolean;
|
||||
version: boolean;
|
||||
}
|
||||
@@ -43,6 +46,7 @@ export function parseCliArgs(argv: string[]): ParsedCliArgs {
|
||||
options: {
|
||||
'project-dir': { type: 'string', default: process.cwd() },
|
||||
'ws-port': { type: 'string' },
|
||||
ws: { type: 'string' },
|
||||
model: { type: 'string' },
|
||||
'max-budget': { type: 'string' },
|
||||
init: { type: 'string' },
|
||||
@@ -69,6 +73,7 @@ export function parseCliArgs(argv: string[]): ParsedCliArgs {
|
||||
wsPort: values['ws-port'] ? Number(values['ws-port']) : undefined,
|
||||
model: values.model as string | undefined,
|
||||
maxBudget: values['max-budget'] ? Number(values['max-budget']) : undefined,
|
||||
ws: values.ws as string | undefined,
|
||||
help: values.help as boolean,
|
||||
version: values.version as boolean,
|
||||
};
|
||||
@@ -92,6 +97,7 @@ Options:
|
||||
--init <input> Bootstrap from a PRD before running (auto only)
|
||||
Accepts @path/to/prd.md or "description text"
|
||||
--project-dir <dir> Project directory (default: cwd)
|
||||
--ws <name> Route .planning/ to .planning/workstreams/<name>/
|
||||
--ws-port <port> Enable WebSocket transport on <port>
|
||||
--model <model> Override LLM model
|
||||
--max-budget <n> Max budget per step in USD
|
||||
@@ -194,6 +200,13 @@ export async function main(argv: string[] = process.argv.slice(2)): Promise<void
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate --ws flag if provided
|
||||
if (args.ws !== undefined && !validateWorkstreamName(args.ws)) {
|
||||
console.error(`Error: Invalid workstream name "${args.ws}". Use alphanumeric, hyphens, underscores, or dots only.`);
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (args.command !== 'run' && args.command !== 'init' && args.command !== 'auto') {
|
||||
console.error('Error: Expected "gsd-sdk run <prompt>", "gsd-sdk auto", or "gsd-sdk init [input]"');
|
||||
console.error(USAGE);
|
||||
@@ -226,6 +239,7 @@ export async function main(argv: string[] = process.argv.slice(2)): Promise<void
|
||||
projectDir: args.projectDir,
|
||||
model: args.model,
|
||||
maxBudgetUsd: args.maxBudget,
|
||||
workstream: args.ws,
|
||||
});
|
||||
|
||||
// Wire CLI transport
|
||||
@@ -296,6 +310,7 @@ export async function main(argv: string[] = process.argv.slice(2)): Promise<void
|
||||
model: args.model,
|
||||
maxBudgetUsd: args.maxBudget,
|
||||
autoMode: true,
|
||||
workstream: args.ws,
|
||||
});
|
||||
|
||||
// Wire CLI transport (always active)
|
||||
@@ -384,6 +399,7 @@ export async function main(argv: string[] = process.argv.slice(2)): Promise<void
|
||||
projectDir: args.projectDir,
|
||||
model: args.model,
|
||||
maxBudgetUsd: args.maxBudget,
|
||||
workstream: args.ws,
|
||||
});
|
||||
|
||||
// Wire CLI transport (always active)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import { join } from 'node:path';
|
||||
import { relPlanningPath } from './workstream-utils.js';
|
||||
|
||||
// ─── Types ───────────────────────────────────────────────────────────────────
|
||||
|
||||
@@ -99,15 +100,25 @@ export const CONFIG_DEFAULTS: GSDConfig = {
|
||||
* Returns full defaults when file is missing or empty.
|
||||
* Throws on malformed JSON with a helpful error message.
|
||||
*/
|
||||
export async function loadConfig(projectDir: string): Promise<GSDConfig> {
|
||||
const configPath = join(projectDir, '.planning', 'config.json');
|
||||
export async function loadConfig(projectDir: string, workstream?: string): Promise<GSDConfig> {
|
||||
const configPath = join(projectDir, relPlanningPath(workstream), 'config.json');
|
||||
const rootConfigPath = join(projectDir, '.planning', 'config.json');
|
||||
|
||||
let raw: string;
|
||||
try {
|
||||
raw = await readFile(configPath, 'utf-8');
|
||||
} catch {
|
||||
// File missing — normal for new projects
|
||||
return structuredClone(CONFIG_DEFAULTS);
|
||||
// If workstream config missing, fall back to root config
|
||||
if (workstream) {
|
||||
try {
|
||||
raw = await readFile(rootConfigPath, 'utf-8');
|
||||
} catch {
|
||||
return structuredClone(CONFIG_DEFAULTS);
|
||||
}
|
||||
} else {
|
||||
// File missing — normal for new projects
|
||||
return structuredClone(CONFIG_DEFAULTS);
|
||||
}
|
||||
}
|
||||
|
||||
const trimmed = raw.trim();
|
||||
|
||||
@@ -25,6 +25,7 @@ import {
|
||||
DEFAULT_TRUNCATION_OPTIONS,
|
||||
type TruncationOptions,
|
||||
} from './context-truncation.js';
|
||||
import { relPlanningPath } from './workstream-utils.js';
|
||||
|
||||
// ─── File manifest per phase ─────────────────────────────────────────────────
|
||||
|
||||
@@ -77,8 +78,8 @@ export class ContextEngine {
|
||||
private readonly logger?: GSDLogger;
|
||||
private readonly truncation: TruncationOptions;
|
||||
|
||||
constructor(projectDir: string, logger?: GSDLogger, truncation?: Partial<TruncationOptions>) {
|
||||
this.planningDir = join(projectDir, '.planning');
|
||||
constructor(projectDir: string, logger?: GSDLogger, truncation?: Partial<TruncationOptions>, workstream?: string) {
|
||||
this.planningDir = join(projectDir, relPlanningPath(workstream));
|
||||
this.logger = logger;
|
||||
this.truncation = { ...DEFAULT_TRUNCATION_OPTIONS, ...truncation };
|
||||
}
|
||||
|
||||
@@ -39,16 +39,19 @@ export class GSDTools {
|
||||
private readonly projectDir: string;
|
||||
private readonly gsdToolsPath: string;
|
||||
private readonly timeoutMs: number;
|
||||
private readonly workstream?: string;
|
||||
|
||||
constructor(opts: {
|
||||
projectDir: string;
|
||||
gsdToolsPath?: string;
|
||||
timeoutMs?: number;
|
||||
workstream?: string;
|
||||
}) {
|
||||
this.projectDir = opts.projectDir;
|
||||
this.gsdToolsPath =
|
||||
opts.gsdToolsPath ?? resolveGsdToolsPath(opts.projectDir);
|
||||
this.timeoutMs = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
||||
this.workstream = opts.workstream;
|
||||
}
|
||||
|
||||
// ─── Core exec ───────────────────────────────────────────────────────────
|
||||
@@ -58,7 +61,8 @@ export class GSDTools {
|
||||
* Handles the `@file:` prefix pattern for large results.
|
||||
*/
|
||||
async exec(command: string, args: string[] = []): Promise<unknown> {
|
||||
const fullArgs = [this.gsdToolsPath, command, ...args];
|
||||
const wsArgs = this.workstream ? ['--ws', this.workstream] : [];
|
||||
const fullArgs = [this.gsdToolsPath, command, ...args, ...wsArgs];
|
||||
|
||||
return new Promise<unknown>((resolve, reject) => {
|
||||
const child = execFile(
|
||||
@@ -160,7 +164,8 @@ export class GSDTools {
|
||||
* Use for commands like `config-set` that return plain text, not JSON.
|
||||
*/
|
||||
async execRaw(command: string, args: string[] = []): Promise<string> {
|
||||
const fullArgs = [this.gsdToolsPath, command, ...args, '--raw'];
|
||||
const wsArgs = this.workstream ? ['--ws', this.workstream] : [];
|
||||
const fullArgs = [this.gsdToolsPath, command, ...args, ...wsArgs, '--raw'];
|
||||
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
const child = execFile(
|
||||
|
||||
@@ -44,6 +44,7 @@ export class GSD {
|
||||
private readonly defaultMaxBudgetUsd: number;
|
||||
private readonly defaultMaxTurns: number;
|
||||
private readonly autoMode: boolean;
|
||||
private readonly workstream?: string;
|
||||
readonly eventStream: GSDEventStream;
|
||||
|
||||
constructor(options: GSDOptions) {
|
||||
@@ -54,6 +55,7 @@ export class GSD {
|
||||
this.defaultMaxBudgetUsd = options.maxBudgetUsd ?? 5.0;
|
||||
this.defaultMaxTurns = options.maxTurns ?? 50;
|
||||
this.autoMode = options.autoMode ?? false;
|
||||
this.workstream = options.workstream;
|
||||
this.eventStream = new GSDEventStream();
|
||||
}
|
||||
|
||||
@@ -75,7 +77,7 @@ export class GSD {
|
||||
const plan = await parsePlanFile(absolutePlanPath);
|
||||
|
||||
// Load project config
|
||||
const config = await loadConfig(this.projectDir);
|
||||
const config = await loadConfig(this.projectDir, this.workstream);
|
||||
|
||||
// Try to load agent definition for tool restrictions
|
||||
const agentDef = await this.loadAgentDefinition();
|
||||
@@ -117,6 +119,7 @@ export class GSD {
|
||||
return new GSDTools({
|
||||
projectDir: this.projectDir,
|
||||
gsdToolsPath: this.gsdToolsPath,
|
||||
workstream: this.workstream,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -133,8 +136,8 @@ export class GSD {
|
||||
async runPhase(phaseNumber: string, options?: PhaseRunnerOptions): Promise<PhaseRunnerResult> {
|
||||
const tools = this.createTools();
|
||||
const promptFactory = new PromptFactory();
|
||||
const contextEngine = new ContextEngine(this.projectDir);
|
||||
const config = await loadConfig(this.projectDir);
|
||||
const contextEngine = new ContextEngine(this.projectDir, undefined, undefined, this.workstream);
|
||||
const config = await loadConfig(this.projectDir, this.workstream);
|
||||
|
||||
// Auto mode: force auto_advance on and skip_discuss off so self-discuss kicks in
|
||||
if (this.autoMode) {
|
||||
@@ -314,6 +317,9 @@ export { CLITransport } from './cli-transport.js';
|
||||
export { WSTransport } from './ws-transport.js';
|
||||
export type { WSTransportOptions } from './ws-transport.js';
|
||||
|
||||
// Workstream utilities
|
||||
export { validateWorkstreamName, relPlanningPath } from './workstream-utils.js';
|
||||
|
||||
// Init workflow
|
||||
export { InitRunner } from './init-runner.js';
|
||||
export type { InitRunnerDeps } from './init-runner.js';
|
||||
|
||||
@@ -207,6 +207,8 @@ export interface GSDOptions {
|
||||
maxTurns?: number;
|
||||
/** Enable auto mode: sets auto_advance=true, skip_discuss=false in workflow config. */
|
||||
autoMode?: boolean;
|
||||
/** Workstream name. Routes all .planning/ paths to .planning/workstreams/<name>/. */
|
||||
workstream?: string;
|
||||
}
|
||||
|
||||
// ─── S02: Event stream types ─────────────────────────────────────────────────
|
||||
|
||||
32
sdk/src/workstream-utils.ts
Normal file
32
sdk/src/workstream-utils.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Workstream utility functions for multi-workstream project support.
|
||||
*
|
||||
* When --ws <name> is provided, all .planning/ paths are routed to
|
||||
* .planning/workstreams/<name>/ instead.
|
||||
*/
|
||||
|
||||
import { join } from 'node:path';
|
||||
|
||||
/**
|
||||
* Validate a workstream name.
|
||||
* Allowed: alphanumeric, hyphens, underscores, dots.
|
||||
* Disallowed: empty, spaces, slashes, special chars, path traversal.
|
||||
*/
|
||||
export function validateWorkstreamName(name: string): boolean {
|
||||
if (!name || name.length === 0) return false;
|
||||
// Only allow alphanumeric, hyphens, underscores, dots
|
||||
// Must not be ".." or start with ".." (path traversal)
|
||||
if (name === '..' || name.startsWith('../')) return false;
|
||||
return /^[a-zA-Z0-9][a-zA-Z0-9._-]*$/.test(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the relative planning directory path.
|
||||
*
|
||||
* - Without workstream: `.planning`
|
||||
* - With workstream: `.planning/workstreams/<name>`
|
||||
*/
|
||||
export function relPlanningPath(workstream?: string): string {
|
||||
if (!workstream) return '.planning';
|
||||
return join('.planning', 'workstreams', workstream);
|
||||
}
|
||||
285
sdk/src/ws-flag.test.ts
Normal file
285
sdk/src/ws-flag.test.ts
Normal file
@@ -0,0 +1,285 @@
|
||||
/**
|
||||
* Tests for --ws (workstream) flag support.
|
||||
*
|
||||
* Validates:
|
||||
* - CLI parsing of --ws flag
|
||||
* - Workstream name validation
|
||||
* - GSDOptions.workstream propagation
|
||||
* - GSDTools workstream-aware invocation
|
||||
* - Config path resolution with workstream
|
||||
* - ContextEngine workstream-aware planning dir
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { mkdir, writeFile, rm } from 'node:fs/promises';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
// ─── Workstream name validation ─────────────────────────────────────────────
|
||||
|
||||
import { validateWorkstreamName } from './workstream-utils.js';
|
||||
|
||||
describe('validateWorkstreamName', () => {
|
||||
it('accepts alphanumeric names', () => {
|
||||
expect(validateWorkstreamName('frontend')).toBe(true);
|
||||
expect(validateWorkstreamName('backend2')).toBe(true);
|
||||
});
|
||||
|
||||
it('accepts names with hyphens', () => {
|
||||
expect(validateWorkstreamName('my-feature')).toBe(true);
|
||||
});
|
||||
|
||||
it('accepts names with underscores', () => {
|
||||
expect(validateWorkstreamName('my_feature')).toBe(true);
|
||||
});
|
||||
|
||||
it('accepts names with dots', () => {
|
||||
expect(validateWorkstreamName('v1.0')).toBe(true);
|
||||
});
|
||||
|
||||
it('rejects empty strings', () => {
|
||||
expect(validateWorkstreamName('')).toBe(false);
|
||||
});
|
||||
|
||||
it('rejects names with spaces', () => {
|
||||
expect(validateWorkstreamName('my feature')).toBe(false);
|
||||
});
|
||||
|
||||
it('rejects names with slashes', () => {
|
||||
expect(validateWorkstreamName('my/feature')).toBe(false);
|
||||
});
|
||||
|
||||
it('rejects names with special characters', () => {
|
||||
expect(validateWorkstreamName('feat@ure')).toBe(false);
|
||||
expect(validateWorkstreamName('feat!ure')).toBe(false);
|
||||
expect(validateWorkstreamName('feat#ure')).toBe(false);
|
||||
});
|
||||
|
||||
it('rejects path traversal attempts', () => {
|
||||
expect(validateWorkstreamName('..')).toBe(false);
|
||||
expect(validateWorkstreamName('../etc')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── relPlanningPath helper ─────────────────────────────────────────────────
|
||||
|
||||
import { relPlanningPath } from './workstream-utils.js';
|
||||
|
||||
describe('relPlanningPath', () => {
|
||||
it('returns .planning/ in flat mode (no workstream)', () => {
|
||||
expect(relPlanningPath()).toBe('.planning');
|
||||
expect(relPlanningPath(undefined)).toBe('.planning');
|
||||
});
|
||||
|
||||
it('returns .planning/workstreams/<name>/ with workstream', () => {
|
||||
expect(relPlanningPath('frontend')).toBe('.planning/workstreams/frontend');
|
||||
expect(relPlanningPath('api-v2')).toBe('.planning/workstreams/api-v2');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── CLI --ws flag parsing ──────────────────────────────────────────────────
|
||||
|
||||
import { parseCliArgs } from './cli.js';
|
||||
|
||||
describe('parseCliArgs --ws flag', () => {
|
||||
it('parses --ws flag', () => {
|
||||
const result = parseCliArgs(['run', 'build auth', '--ws', 'frontend']);
|
||||
|
||||
expect(result.ws).toBe('frontend');
|
||||
});
|
||||
|
||||
it('ws is undefined when not provided', () => {
|
||||
const result = parseCliArgs(['run', 'build auth']);
|
||||
|
||||
expect(result.ws).toBeUndefined();
|
||||
});
|
||||
|
||||
it('works with other flags', () => {
|
||||
const result = parseCliArgs([
|
||||
'run', 'build auth',
|
||||
'--ws', 'backend',
|
||||
'--model', 'claude-sonnet-4-6',
|
||||
'--project-dir', '/tmp/test',
|
||||
]);
|
||||
|
||||
expect(result.ws).toBe('backend');
|
||||
expect(result.model).toBe('claude-sonnet-4-6');
|
||||
expect(result.projectDir).toBe('/tmp/test');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── GSDOptions.workstream ──────────────────────────────────────────────────
|
||||
|
||||
describe('GSDOptions.workstream', () => {
|
||||
it('GSD class accepts workstream option', async () => {
|
||||
// This is a compile-time check -- if the type is wrong, TS will fail
|
||||
const { GSD } = await import('./index.js');
|
||||
const gsd = new GSD({
|
||||
projectDir: '/tmp/test-ws',
|
||||
workstream: 'frontend',
|
||||
});
|
||||
// If we get here without a type error, the option is accepted
|
||||
expect(gsd).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── GSDTools workstream injection ──────────────────────────────────────────
|
||||
|
||||
describe('GSDTools workstream injection', () => {
|
||||
let tmpDir: string;
|
||||
let fixtureDir: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = join(tmpdir(), `gsd-ws-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
fixtureDir = join(tmpDir, 'fixtures');
|
||||
await mkdir(fixtureDir, { recursive: true });
|
||||
await mkdir(join(tmpDir, '.planning'), { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
async function createScript(name: string, code: string): Promise<string> {
|
||||
const scriptPath = join(fixtureDir, name);
|
||||
await writeFile(scriptPath, code, { mode: 0o755 });
|
||||
return scriptPath;
|
||||
}
|
||||
|
||||
it('passes --ws flag to gsd-tools.cjs when workstream is set', async () => {
|
||||
const { GSDTools } = await import('./gsd-tools.js');
|
||||
|
||||
// Script echoes its arguments as JSON
|
||||
const scriptPath = await createScript(
|
||||
'echo-args.cjs',
|
||||
'process.stdout.write(JSON.stringify(process.argv.slice(2)));',
|
||||
);
|
||||
|
||||
const tools = new GSDTools({
|
||||
projectDir: tmpDir,
|
||||
gsdToolsPath: scriptPath,
|
||||
workstream: 'frontend',
|
||||
});
|
||||
|
||||
const result = await tools.exec('state', ['load']) as string[];
|
||||
|
||||
// Should contain --ws frontend in the arguments
|
||||
expect(result).toContain('--ws');
|
||||
expect(result).toContain('frontend');
|
||||
});
|
||||
|
||||
it('does not pass --ws when workstream is undefined', async () => {
|
||||
const { GSDTools } = await import('./gsd-tools.js');
|
||||
|
||||
const scriptPath = await createScript(
|
||||
'echo-args-no-ws.cjs',
|
||||
'process.stdout.write(JSON.stringify(process.argv.slice(2)));',
|
||||
);
|
||||
|
||||
const tools = new GSDTools({
|
||||
projectDir: tmpDir,
|
||||
gsdToolsPath: scriptPath,
|
||||
});
|
||||
|
||||
const result = await tools.exec('state', ['load']) as string[];
|
||||
|
||||
expect(result).not.toContain('--ws');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Config workstream-aware path ───────────────────────────────────────────
|
||||
|
||||
import { loadConfig } from './config.js';
|
||||
|
||||
describe('loadConfig with workstream', () => {
|
||||
let tmpDir: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = join(tmpdir(), `gsd-config-ws-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
await mkdir(tmpDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('loads config from workstream path when workstream is provided', async () => {
|
||||
const wsDir = join(tmpDir, '.planning', 'workstreams', 'frontend');
|
||||
await mkdir(wsDir, { recursive: true });
|
||||
await writeFile(
|
||||
join(wsDir, 'config.json'),
|
||||
JSON.stringify({ model_profile: 'performance' }),
|
||||
);
|
||||
|
||||
const config = await loadConfig(tmpDir, 'frontend');
|
||||
|
||||
expect(config.model_profile).toBe('performance');
|
||||
});
|
||||
|
||||
it('falls back to root config when workstream config is missing', async () => {
|
||||
// Create root config but no workstream config
|
||||
await mkdir(join(tmpDir, '.planning'), { recursive: true });
|
||||
await writeFile(
|
||||
join(tmpDir, '.planning', 'config.json'),
|
||||
JSON.stringify({ model_profile: 'balanced' }),
|
||||
);
|
||||
|
||||
const config = await loadConfig(tmpDir, 'frontend');
|
||||
|
||||
expect(config.model_profile).toBe('balanced');
|
||||
});
|
||||
|
||||
it('loads from root .planning/ when workstream is undefined', async () => {
|
||||
await mkdir(join(tmpDir, '.planning'), { recursive: true });
|
||||
await writeFile(
|
||||
join(tmpDir, '.planning', 'config.json'),
|
||||
JSON.stringify({ model_profile: 'economy' }),
|
||||
);
|
||||
|
||||
const config = await loadConfig(tmpDir);
|
||||
|
||||
expect(config.model_profile).toBe('economy');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── ContextEngine workstream-aware planning dir ────────────────────────────
|
||||
|
||||
describe('ContextEngine with workstream', () => {
|
||||
let tmpDir: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = join(tmpdir(), `gsd-ctx-ws-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
await mkdir(tmpDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('resolves files from workstream planning dir', async () => {
|
||||
const { ContextEngine } = await import('./context-engine.js');
|
||||
const { PhaseType } = await import('./types.js');
|
||||
|
||||
const wsDir = join(tmpDir, '.planning', 'workstreams', 'backend');
|
||||
await mkdir(wsDir, { recursive: true });
|
||||
await writeFile(join(wsDir, 'STATE.md'), '# State\nPhase: 01');
|
||||
|
||||
const engine = new ContextEngine(tmpDir, undefined, undefined, 'backend');
|
||||
const files = await engine.resolveContextFiles(PhaseType.Execute);
|
||||
|
||||
expect(files.state).toContain('Phase: 01');
|
||||
});
|
||||
|
||||
it('resolves files from root .planning/ without workstream', async () => {
|
||||
const { ContextEngine } = await import('./context-engine.js');
|
||||
const { PhaseType } = await import('./types.js');
|
||||
|
||||
await mkdir(join(tmpDir, '.planning'), { recursive: true });
|
||||
await writeFile(join(tmpDir, '.planning', 'STATE.md'), '# State\nPhase: 02');
|
||||
|
||||
const engine = new ContextEngine(tmpDir);
|
||||
const files = await engine.resolveContextFiles(PhaseType.Execute);
|
||||
|
||||
expect(files.state).toContain('Phase: 02');
|
||||
});
|
||||
});
|
||||
@@ -205,3 +205,90 @@ describe('config-set agent_skills', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── global: prefix support (#1992) ──────────────────────────────────────────
|
||||
|
||||
describe('agent-skills global: prefix', () => {
|
||||
let tmpDir;
|
||||
let fakeHome;
|
||||
let globalSkillsDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
// Create a fake HOME with ~/.claude/skills/ structure
|
||||
fakeHome = fs.mkdtempSync(path.join(require('os').tmpdir(), 'gsd-1992-home-'));
|
||||
globalSkillsDir = path.join(fakeHome, '.claude', 'skills');
|
||||
fs.mkdirSync(globalSkillsDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
fs.rmSync(fakeHome, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
function createGlobalSkill(name) {
|
||||
const skillDir = path.join(globalSkillsDir, name);
|
||||
fs.mkdirSync(skillDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(skillDir, 'SKILL.md'), `# ${name}\nGlobal skill content.\n`);
|
||||
return skillDir;
|
||||
}
|
||||
|
||||
test('global:valid-skill resolves to $HOME/.claude/skills/valid-skill/SKILL.md', () => {
|
||||
createGlobalSkill('valid-skill');
|
||||
writeConfig(tmpDir, {
|
||||
agent_skills: { 'gsd-executor': ['global:valid-skill'] },
|
||||
});
|
||||
|
||||
const result = runGsdTools(['agent-skills', 'gsd-executor'], tmpDir, { HOME: fakeHome, USERPROFILE: fakeHome });
|
||||
assert.ok(result.output.includes('valid-skill/SKILL.md'), `should reference the global skill: ${result.output}`);
|
||||
assert.ok(result.output.includes('<agent_skills>'), 'should emit agent_skills block');
|
||||
});
|
||||
|
||||
test('global:invalid!name is rejected by regex and skipped', () => {
|
||||
writeConfig(tmpDir, {
|
||||
agent_skills: { 'gsd-executor': ['global:invalid!name'] },
|
||||
});
|
||||
|
||||
const result = runGsdTools(['agent-skills', 'gsd-executor'], tmpDir, { HOME: fakeHome, USERPROFILE: fakeHome });
|
||||
// No valid skills → empty output, command succeeds
|
||||
assert.strictEqual(result.output, '', 'should skip invalid name without crashing');
|
||||
});
|
||||
|
||||
test('global:missing-skill is skipped when directory is absent', () => {
|
||||
// Do NOT create the skill directory
|
||||
writeConfig(tmpDir, {
|
||||
agent_skills: { 'gsd-executor': ['global:missing-skill'] },
|
||||
});
|
||||
|
||||
const result = runGsdTools(['agent-skills', 'gsd-executor'], tmpDir, { HOME: fakeHome, USERPROFILE: fakeHome });
|
||||
assert.strictEqual(result.output, '', 'should skip missing skill gracefully');
|
||||
});
|
||||
|
||||
test('mix of global: and project-relative paths both resolve correctly', () => {
|
||||
createGlobalSkill('shadcn');
|
||||
|
||||
// Create a project-relative skill
|
||||
const projectSkillDir = path.join(tmpDir, 'skills', 'local-skill');
|
||||
fs.mkdirSync(projectSkillDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(projectSkillDir, 'SKILL.md'), '# local\n');
|
||||
|
||||
writeConfig(tmpDir, {
|
||||
agent_skills: { 'gsd-executor': ['global:shadcn', 'skills/local-skill'] },
|
||||
});
|
||||
|
||||
const result = runGsdTools(['agent-skills', 'gsd-executor'], tmpDir, { HOME: fakeHome, USERPROFILE: fakeHome });
|
||||
assert.ok(result.output.includes('shadcn/SKILL.md'), 'should include global shadcn');
|
||||
assert.ok(result.output.includes('skills/local-skill/SKILL.md'), 'should include project-relative skill');
|
||||
});
|
||||
|
||||
test('global: with empty name produces clear warning and skips', () => {
|
||||
writeConfig(tmpDir, {
|
||||
agent_skills: { 'gsd-executor': ['global:'] },
|
||||
});
|
||||
|
||||
const result = runGsdTools(['agent-skills', 'gsd-executor'], tmpDir, { HOME: fakeHome, USERPROFILE: fakeHome });
|
||||
assert.strictEqual(result.output, '', 'should skip empty global: prefix');
|
||||
// The warning goes to stderr — cannot assert on it through runGsdTools's output field,
|
||||
// but the command must not crash and must return empty.
|
||||
});
|
||||
});
|
||||
|
||||
95
tests/atomic-write-coverage.test.cjs
Normal file
95
tests/atomic-write-coverage.test.cjs
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Structural regression guard for atomic write usage (#1972).
|
||||
*
|
||||
* Ensures that milestone.cjs, phase.cjs, and frontmatter.cjs do NOT
|
||||
* contain bare fs.writeFileSync calls targeting .planning/ files. All
|
||||
* such writes must go through atomicWriteFileSync to prevent partial
|
||||
* writes from corrupting planning artifacts on crash.
|
||||
*
|
||||
* Allowed exceptions:
|
||||
* - Writes to .gitkeep (empty files, no corruption risk)
|
||||
* - Writes to archive directories (new files, not read-modify-write)
|
||||
*
|
||||
* This test is structural — it reads the source files and parses for
|
||||
* bare writeFileSync patterns. It complements functional tests in
|
||||
* atomic-write.test.cjs which verify the helper itself.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { test, describe } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
|
||||
const libDir = path.resolve(__dirname, '..', 'get-shit-done', 'bin', 'lib');
|
||||
|
||||
/**
|
||||
* Find all fs.writeFileSync(...) call sites in a file.
|
||||
* Returns array of { line: number, text: string }.
|
||||
*/
|
||||
function findBareWrites(filePath) {
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
const lines = content.split('\n');
|
||||
const hits = [];
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
if (/\bfs\.writeFileSync\s*\(/.test(lines[i])) {
|
||||
hits.push({ line: i + 1, text: lines[i].trim() });
|
||||
}
|
||||
}
|
||||
return hits;
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify a bare write as allowed (archive, .gitkeep) or disallowed.
|
||||
*/
|
||||
function isAllowedException(lineText) {
|
||||
// .gitkeep writes (empty file, no corruption risk)
|
||||
if (/\.gitkeep/.test(lineText)) return true;
|
||||
// Archive directory writes (new files, not read-modify-write)
|
||||
if (/archiveDir/.test(lineText)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
describe('atomic write coverage (#1972)', () => {
|
||||
const targetFiles = ['milestone.cjs', 'phase.cjs', 'frontmatter.cjs'];
|
||||
|
||||
for (const file of targetFiles) {
|
||||
test(`${file}: all fs.writeFileSync calls target allowed exceptions`, () => {
|
||||
const filePath = path.join(libDir, file);
|
||||
assert.ok(fs.existsSync(filePath), `${file} must exist at ${filePath}`);
|
||||
|
||||
const hits = findBareWrites(filePath);
|
||||
const violations = hits.filter(h => !isAllowedException(h.text));
|
||||
|
||||
if (violations.length > 0) {
|
||||
const report = violations.map(v => ` line ${v.line}: ${v.text}`).join('\n');
|
||||
assert.fail(
|
||||
`${file} contains ${violations.length} bare fs.writeFileSync call(s) targeting planning files.\n` +
|
||||
`These should use atomicWriteFileSync instead:\n${report}`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test(`${file}: imports atomicWriteFileSync from core.cjs`, () => {
|
||||
const filePath = path.join(libDir, file);
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
assert.match(
|
||||
content,
|
||||
/atomicWriteFileSync.*require\(['"]\.\/core\.cjs['"]\)|atomicWriteFileSync[^)]*\}\s*=\s*require\(['"]\.\/core\.cjs['"]\)/s,
|
||||
`${file} must import atomicWriteFileSync from core.cjs`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
test('all three files use atomicWriteFileSync at least once', () => {
|
||||
for (const file of targetFiles) {
|
||||
const content = fs.readFileSync(path.join(libDir, file), 'utf-8');
|
||||
assert.match(
|
||||
content,
|
||||
/atomicWriteFileSync\s*\(/,
|
||||
`${file} must contain at least one atomicWriteFileSync call`
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
process.env.GSD_TEST_MODE = '1';
|
||||
|
||||
const { describe, test, beforeEach, afterEach } = require('node:test');
|
||||
const { describe, test, before, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
@@ -20,8 +20,22 @@ const os = require('os');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
const INSTALL_SRC = path.join(__dirname, '..', 'bin', 'install.js');
|
||||
const BUILD_SCRIPT = path.join(__dirname, '..', 'scripts', 'build-hooks.js');
|
||||
const { install, copyCommandsAsClaudeSkills } = require(INSTALL_SRC);
|
||||
|
||||
// ─── Ensure hooks/dist/ is populated before install tests ────────────────────
|
||||
// With --test-concurrency=4, other install tests (bug-1834, bug-1924) run
|
||||
// build-hooks.js concurrently. That script creates hooks/dist/ empty first,
|
||||
// then copies files — creating a window where this test sees an empty dir and
|
||||
// install() fails with "directory is empty" → process.exit(1).
|
||||
|
||||
before(() => {
|
||||
execFileSync(process.execPath, [BUILD_SCRIPT], {
|
||||
encoding: 'utf-8',
|
||||
stdio: 'pipe',
|
||||
});
|
||||
});
|
||||
|
||||
// ─── #1736: local install deploys commands/gsd/ ─────────────────────────────
|
||||
|
||||
describe('#1736: local Claude install populates .claude/commands/gsd/', () => {
|
||||
|
||||
@@ -157,8 +157,8 @@ describe('#1834: install.js source handles .sh files in the hook copy loop', ()
|
||||
const anchorPhrase = 'configDirReplacement';
|
||||
const anchorIdx = src.indexOf(anchorPhrase);
|
||||
assert.ok(anchorIdx !== -1, 'hook copy loop anchor (configDirReplacement) not found in install.js');
|
||||
// Extract a window large enough to contain the if/else block (≈1000 chars)
|
||||
const region = src.slice(anchorIdx, anchorIdx + 1000);
|
||||
// Extract a window large enough to contain the if/else block (≈1500 chars)
|
||||
const region = src.slice(anchorIdx, anchorIdx + 1500);
|
||||
assert.ok(
|
||||
region.includes("entry.endsWith('.js')"),
|
||||
"install.js hook copy loop must check entry.endsWith('.js')"
|
||||
|
||||
100
tests/bug-1967-cache-invalidation.test.cjs
Normal file
100
tests/bug-1967-cache-invalidation.test.cjs
Normal file
@@ -0,0 +1,100 @@
|
||||
/**
|
||||
* Regression tests for #1967 cache invalidation.
|
||||
*
|
||||
* The disk scan cache in buildStateFrontmatter must be invalidated on
|
||||
* writeStateMd to prevent stale reads if multiple state-mutating
|
||||
* operations occur within the same Node process. This matters for:
|
||||
* - SDK callers that require() gsd-tools.cjs as a module
|
||||
* - Future dispatcher extensions that handle compound operations
|
||||
* - Tests that import state.cjs directly
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { test, describe, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
const os = require('node:os');
|
||||
|
||||
const state = require('../get-shit-done/bin/lib/state.cjs');
|
||||
|
||||
describe('buildStateFrontmatter cache invalidation (#1967)', () => {
|
||||
let tmpDir;
|
||||
let planningDir;
|
||||
let phasesDir;
|
||||
let statePath;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gsd-1967-cache-'));
|
||||
planningDir = path.join(tmpDir, '.planning');
|
||||
phasesDir = path.join(planningDir, 'phases');
|
||||
fs.mkdirSync(phasesDir, { recursive: true });
|
||||
|
||||
// Create a minimal config and STATE.md
|
||||
fs.writeFileSync(
|
||||
path.join(planningDir, 'config.json'),
|
||||
JSON.stringify({ project_code: 'TEST' })
|
||||
);
|
||||
|
||||
statePath = path.join(planningDir, 'STATE.md');
|
||||
fs.writeFileSync(statePath, [
|
||||
'# State',
|
||||
'',
|
||||
'**Current Phase:** 1',
|
||||
'**Status:** executing',
|
||||
'**Total Phases:** 2',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
// Start with one phase directory containing one PLAN
|
||||
const phase1 = path.join(phasesDir, '01-foo');
|
||||
fs.mkdirSync(phase1);
|
||||
fs.writeFileSync(path.join(phase1, '01-1-PLAN.md'), '---\nphase: 1\nplan: 1\n---\n# Plan\n');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('writeStateMd invalidates cache so subsequent reads see new disk state', () => {
|
||||
// First write — populates cache via buildStateFrontmatter
|
||||
const content1 = fs.readFileSync(statePath, 'utf-8');
|
||||
state.writeStateMd(statePath, content1, tmpDir);
|
||||
|
||||
// Create a NEW phase directory AFTER the first write
|
||||
// Without cache invalidation, the second write would still see only 1 phase
|
||||
const phase2 = path.join(phasesDir, '02-bar');
|
||||
fs.mkdirSync(phase2);
|
||||
fs.writeFileSync(path.join(phase2, '02-1-PLAN.md'), '---\nphase: 2\nplan: 1\n---\n# Plan\n');
|
||||
fs.writeFileSync(path.join(phase2, '02-1-SUMMARY.md'), '---\nstatus: complete\n---\n# Summary\n');
|
||||
|
||||
// Second write in the SAME process — must see the new phase
|
||||
const content2 = fs.readFileSync(statePath, 'utf-8');
|
||||
state.writeStateMd(statePath, content2, tmpDir);
|
||||
|
||||
// Read back and parse frontmatter to verify it reflects 2 phases, not 1
|
||||
const result = fs.readFileSync(statePath, 'utf-8');
|
||||
const fmMatch = result.match(/^---\n([\s\S]*?)\n---/);
|
||||
assert.ok(fmMatch, 'STATE.md should have frontmatter after writeStateMd');
|
||||
|
||||
const fm = fmMatch[1];
|
||||
// Should show 2 total phases (the new disk state), not 1 (stale cache)
|
||||
const totalPhasesMatch = fm.match(/total_phases:\s*(\d+)/);
|
||||
assert.ok(totalPhasesMatch, 'frontmatter should contain total_phases');
|
||||
assert.strictEqual(
|
||||
parseInt(totalPhasesMatch[1], 10),
|
||||
2,
|
||||
'total_phases should reflect new disk state (2), not stale cache (1)'
|
||||
);
|
||||
|
||||
// Should show 1 completed phase (phase 2 has SUMMARY)
|
||||
const completedMatch = fm.match(/completed_phases:\s*(\d+)/);
|
||||
assert.ok(completedMatch, 'frontmatter should contain completed_phases');
|
||||
assert.strictEqual(
|
||||
parseInt(completedMatch[1], 10),
|
||||
1,
|
||||
'completed_phases should reflect new disk state (1 complete), not stale cache (0)'
|
||||
);
|
||||
});
|
||||
});
|
||||
179
tests/bug-1974-context-exhaustion-record.test.cjs
Normal file
179
tests/bug-1974-context-exhaustion-record.test.cjs
Normal file
@@ -0,0 +1,179 @@
|
||||
/**
|
||||
* Integration tests for gsd-context-monitor.js auto-record on CRITICAL (#1974).
|
||||
*
|
||||
* Verifies:
|
||||
* 1. On CRITICAL + active GSD project, subprocess is spawned and STATE.md
|
||||
* receives the "Stopped At" field.
|
||||
* 2. Subsequent CRITICAL firings within the same session do NOT re-fire
|
||||
* the subprocess (sentinel guard prevents repeated overwrites).
|
||||
* 3. When no .planning/STATE.md exists, the subprocess is not spawned.
|
||||
* 4. Path resolution uses __dirname, not hardcoded ~/.claude/.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { test, describe, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
const os = require('node:os');
|
||||
const { spawnSync } = require('node:child_process');
|
||||
|
||||
const HOOK_PATH = path.resolve(__dirname, '..', 'hooks', 'gsd-context-monitor.js');
|
||||
|
||||
/**
|
||||
* Run the hook with a given session id and context percentage.
|
||||
* Writes a bridge metrics file first, then pipes the hook input via stdin.
|
||||
* Returns after the hook exits.
|
||||
*/
|
||||
function runHook(sessionId, remainingPct, cwd) {
|
||||
// Write the bridge metrics file the hook reads
|
||||
const bridgePath = path.join(os.tmpdir(), `claude-ctx-${sessionId}.json`);
|
||||
fs.writeFileSync(bridgePath, JSON.stringify({
|
||||
session_id: sessionId,
|
||||
remaining_percentage: remainingPct,
|
||||
used_pct: 100 - remainingPct,
|
||||
timestamp: Math.floor(Date.now() / 1000),
|
||||
}));
|
||||
|
||||
const input = JSON.stringify({
|
||||
session_id: sessionId,
|
||||
cwd,
|
||||
});
|
||||
|
||||
const result = spawnSync(process.execPath, [HOOK_PATH], {
|
||||
input,
|
||||
encoding: 'utf-8',
|
||||
timeout: 10000,
|
||||
env: { ...process.env, HOME: process.env.HOME },
|
||||
});
|
||||
|
||||
return { exitCode: result.status, stdout: result.stdout, stderr: result.stderr };
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait up to `ms` for a file to exist (the subprocess is fire-and-forget).
|
||||
*/
|
||||
function waitForStoppedAt(statePath, ms = 2000) {
|
||||
const deadline = Date.now() + ms;
|
||||
while (Date.now() < deadline) {
|
||||
try {
|
||||
const content = fs.readFileSync(statePath, 'utf-8');
|
||||
if (/Stopped [Aa]t.*context exhaustion/.test(content)) return content;
|
||||
} catch { /* file may briefly not exist during atomic write */ }
|
||||
// Tight poll loop — subprocess should complete in <100ms
|
||||
const start = Date.now();
|
||||
while (Date.now() - start < 50) { /* spin */ }
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
describe('#1974 context exhaustion auto-record', () => {
|
||||
let tmpDir;
|
||||
let statePath;
|
||||
let sessionId;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gsd-1974-'));
|
||||
const planningDir = path.join(tmpDir, '.planning');
|
||||
fs.mkdirSync(planningDir, { recursive: true });
|
||||
|
||||
// Minimal STATE.md with Stopped At field
|
||||
statePath = path.join(planningDir, 'STATE.md');
|
||||
fs.writeFileSync(statePath, [
|
||||
'# Session State',
|
||||
'',
|
||||
'**Current Phase:** 1',
|
||||
'**Status:** executing',
|
||||
'**Last session:** unset',
|
||||
'**Last Date:** unset',
|
||||
'**Stopped At:** None',
|
||||
'**Resume File:** None',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
// Minimal config.json required by gsd-tools
|
||||
fs.writeFileSync(path.join(planningDir, 'config.json'), JSON.stringify({ project_code: 'TEST' }));
|
||||
|
||||
sessionId = `test-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
// Clean up bridge files
|
||||
try {
|
||||
const warnPath = path.join(os.tmpdir(), `claude-ctx-${sessionId}-warned.json`);
|
||||
if (fs.existsSync(warnPath)) fs.unlinkSync(warnPath);
|
||||
const bridgePath = path.join(os.tmpdir(), `claude-ctx-${sessionId}.json`);
|
||||
if (fs.existsSync(bridgePath)) fs.unlinkSync(bridgePath);
|
||||
} catch { /* noop */ }
|
||||
});
|
||||
|
||||
test('spawns subprocess and writes Stopped At field on CRITICAL with active GSD', () => {
|
||||
// Trigger CRITICAL — remaining <= 25
|
||||
const result = runHook(sessionId, 20, tmpDir);
|
||||
assert.strictEqual(result.exitCode, 0, `hook should exit 0: ${result.stderr}`);
|
||||
|
||||
// Wait for fire-and-forget subprocess to write STATE.md
|
||||
const content = waitForStoppedAt(statePath);
|
||||
assert.ok(content, `STATE.md should contain "context exhaustion" after CRITICAL fire`);
|
||||
assert.match(content, /context exhaustion at \d+%/);
|
||||
});
|
||||
|
||||
test('does NOT spawn subprocess when .planning/STATE.md is absent', () => {
|
||||
// Delete STATE.md to simulate non-GSD project
|
||||
fs.unlinkSync(statePath);
|
||||
const originalMtime = Date.now();
|
||||
|
||||
const result = runHook(sessionId, 20, tmpDir);
|
||||
assert.strictEqual(result.exitCode, 0);
|
||||
|
||||
// Wait a bit then verify STATE.md was NOT recreated
|
||||
const start = Date.now();
|
||||
while (Date.now() - start < 500) { /* spin */ }
|
||||
assert.ok(!fs.existsSync(statePath), 'STATE.md should not be recreated when absent');
|
||||
});
|
||||
|
||||
test('sentinel prevents repeated firing within same session', () => {
|
||||
// First CRITICAL fire — should record
|
||||
runHook(sessionId, 20, tmpDir);
|
||||
const content1 = waitForStoppedAt(statePath);
|
||||
assert.ok(content1, 'first fire should record Stopped At');
|
||||
|
||||
// Extract the timestamp from first fire
|
||||
const firstMatch = content1.match(/context exhaustion at (\d+)%/);
|
||||
assert.ok(firstMatch, 'first fire should have numeric usedPct');
|
||||
|
||||
// Manually set Stopped At to a sentinel value to detect second fire
|
||||
const modified = content1.replace(/(\*\*Stopped At:\*\*) .+/, '$1 SENTINEL_SHOULD_NOT_CHANGE');
|
||||
fs.writeFileSync(statePath, modified);
|
||||
|
||||
// Second CRITICAL fire — should NOT re-fire the subprocess
|
||||
runHook(sessionId, 18, tmpDir);
|
||||
|
||||
// Wait and verify the sentinel is preserved
|
||||
const start = Date.now();
|
||||
while (Date.now() - start < 500) { /* spin */ }
|
||||
const content2 = fs.readFileSync(statePath, 'utf-8');
|
||||
assert.match(
|
||||
content2,
|
||||
/SENTINEL_SHOULD_NOT_CHANGE/,
|
||||
'second CRITICAL fire should not re-record (sentinel guard)'
|
||||
);
|
||||
});
|
||||
|
||||
test('hook uses __dirname-based path (runtime-agnostic)', () => {
|
||||
// Verify the hook source references __dirname, not ~/.claude/
|
||||
const hookSource = fs.readFileSync(HOOK_PATH, 'utf-8');
|
||||
assert.match(
|
||||
hookSource,
|
||||
/path\.join\(__dirname,\s*'\.\.',\s*'get-shit-done'/,
|
||||
'hook must use __dirname-based path resolution for gsd-tools.cjs'
|
||||
);
|
||||
assert.doesNotMatch(
|
||||
hookSource,
|
||||
/process\.env\.HOME.*\.claude.*get-shit-done.*gsd-tools\.cjs/,
|
||||
'hook must not hardcode ~/.claude/ path'
|
||||
);
|
||||
});
|
||||
});
|
||||
219
tests/bug-2075-worktree-deletion-safeguards.test.cjs
Normal file
219
tests/bug-2075-worktree-deletion-safeguards.test.cjs
Normal file
@@ -0,0 +1,219 @@
|
||||
/**
|
||||
* Regression tests for #2075: gsd-executor worktree merge systematically
|
||||
* deletes prior-wave committed files.
|
||||
*
|
||||
* Three failure modes documented in issue #2075:
|
||||
*
|
||||
* Failure Mode B (PRIMARY — unaddressed before this fix):
|
||||
* Executor agent runs `git clean` inside the worktree, removing files
|
||||
* committed on the feature branch. git clean treats them as "untracked"
|
||||
* from the worktree's perspective and deletes them. The executor then
|
||||
* commits only its own deliverables; the subsequent merge brings the
|
||||
* deletions onto the main branch.
|
||||
*
|
||||
* Failure Mode A (partially addressed in PR #1982):
|
||||
* Worktree created from wrong branch base. Audit all worktree-spawning
|
||||
* workflows for worktree_branch_check presence.
|
||||
*
|
||||
* Failure Mode C:
|
||||
* Stale content from wrong base overwrites shared files. Covered by
|
||||
* the --hard reset in the worktree_branch_check.
|
||||
*
|
||||
* Defense-in-depth (from #1977):
|
||||
* Post-commit deletion check: already in gsd-executor.md (--diff-filter=D).
|
||||
* Pre-merge deletion check: already in execute-phase.md (--diff-filter=D).
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { describe, test } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const EXECUTOR_AGENT_PATH = path.join(__dirname, '..', 'agents', 'gsd-executor.md');
|
||||
const EXECUTE_PHASE_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'execute-phase.md');
|
||||
const QUICK_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'quick.md');
|
||||
const DIAGNOSE_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'diagnose-issues.md');
|
||||
|
||||
describe('bug-2075: worktree deletion safeguards', () => {
|
||||
|
||||
describe('Failure Mode B: git clean prohibition in executor agent', () => {
|
||||
test('gsd-executor.md explicitly prohibits git clean in worktree context', () => {
|
||||
const content = fs.readFileSync(EXECUTOR_AGENT_PATH, 'utf-8');
|
||||
|
||||
// Must have an explicit prohibition section mentioning git clean
|
||||
const prohibitsGitClean = (
|
||||
content.includes('git clean') &&
|
||||
(
|
||||
/NEVER.*git clean/i.test(content) ||
|
||||
/git clean.*NEVER/i.test(content) ||
|
||||
/do not.*git clean/i.test(content) ||
|
||||
/git clean.*prohibited/i.test(content) ||
|
||||
/prohibited.*git clean/i.test(content) ||
|
||||
/forbidden.*git clean/i.test(content) ||
|
||||
/git clean.*forbidden/i.test(content) ||
|
||||
/must not.*git clean/i.test(content) ||
|
||||
/git clean.*must not/i.test(content)
|
||||
)
|
||||
);
|
||||
|
||||
assert.ok(
|
||||
prohibitsGitClean,
|
||||
'gsd-executor.md must explicitly prohibit git clean — running it inside a worktree deletes files committed on the feature branch (#2075 Failure Mode B)'
|
||||
);
|
||||
});
|
||||
|
||||
test('gsd-executor.md git clean prohibition explains the worktree data-loss risk', () => {
|
||||
const content = fs.readFileSync(EXECUTOR_AGENT_PATH, 'utf-8');
|
||||
|
||||
// The prohibition must be accompanied by a reason — not just a bare rule
|
||||
// Look for the word "worktree" near the git clean prohibition
|
||||
const gitCleanIdx = content.indexOf('git clean');
|
||||
assert.ok(gitCleanIdx > -1, 'gsd-executor.md must mention git clean (to prohibit it)');
|
||||
|
||||
// Extract context around the git clean mention (500 chars either side)
|
||||
const contextStart = Math.max(0, gitCleanIdx - 500);
|
||||
const contextEnd = Math.min(content.length, gitCleanIdx + 500);
|
||||
const context = content.slice(contextStart, contextEnd);
|
||||
|
||||
const hasWorktreeRationale = (
|
||||
/worktree/i.test(context) ||
|
||||
/delete/i.test(context) ||
|
||||
/untracked/i.test(context)
|
||||
);
|
||||
|
||||
assert.ok(
|
||||
hasWorktreeRationale,
|
||||
'The git clean prohibition in gsd-executor.md must explain why: git clean in a worktree deletes files that appear untracked but are committed on the feature branch'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Failure Mode A: worktree_branch_check audit across all worktree-spawning workflows', () => {
|
||||
test('execute-phase.md has worktree_branch_check block with --hard reset', () => {
|
||||
const content = fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
|
||||
const blockMatch = content.match(/<worktree_branch_check>([\s\S]*?)<\/worktree_branch_check>/);
|
||||
assert.ok(
|
||||
blockMatch,
|
||||
'execute-phase.md must contain a <worktree_branch_check> block'
|
||||
);
|
||||
|
||||
const block = blockMatch[1];
|
||||
assert.ok(
|
||||
block.includes('reset --hard'),
|
||||
'execute-phase.md worktree_branch_check must use git reset --hard (not --soft)'
|
||||
);
|
||||
assert.ok(
|
||||
!block.includes('reset --soft'),
|
||||
'execute-phase.md worktree_branch_check must not use git reset --soft'
|
||||
);
|
||||
});
|
||||
|
||||
test('quick.md has worktree_branch_check block with --hard reset', () => {
|
||||
const content = fs.readFileSync(QUICK_PATH, 'utf-8');
|
||||
|
||||
const blockMatch = content.match(/<worktree_branch_check>([\s\S]*?)<\/worktree_branch_check>/);
|
||||
assert.ok(
|
||||
blockMatch,
|
||||
'quick.md must contain a <worktree_branch_check> block'
|
||||
);
|
||||
|
||||
const block = blockMatch[1];
|
||||
assert.ok(
|
||||
block.includes('reset --hard'),
|
||||
'quick.md worktree_branch_check must use git reset --hard (not --soft)'
|
||||
);
|
||||
assert.ok(
|
||||
!block.includes('reset --soft'),
|
||||
'quick.md worktree_branch_check must not use git reset --soft'
|
||||
);
|
||||
});
|
||||
|
||||
test('diagnose-issues.md has worktree_branch_check instruction for spawned agents', () => {
|
||||
const content = fs.readFileSync(DIAGNOSE_PATH, 'utf-8');
|
||||
|
||||
assert.ok(
|
||||
content.includes('worktree_branch_check'),
|
||||
'diagnose-issues.md must include worktree_branch_check instruction for spawned debug agents'
|
||||
);
|
||||
|
||||
assert.ok(
|
||||
content.includes('reset --hard'),
|
||||
'diagnose-issues.md worktree_branch_check must instruct agents to use git reset --hard'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Defense-in-depth: post-commit deletion check (from #1977)', () => {
|
||||
test('gsd-executor.md task_commit_protocol has post-commit deletion verification', () => {
|
||||
const content = fs.readFileSync(EXECUTOR_AGENT_PATH, 'utf-8');
|
||||
|
||||
assert.ok(
|
||||
content.includes('--diff-filter=D'),
|
||||
'gsd-executor.md must include --diff-filter=D to detect accidental file deletions after each commit'
|
||||
);
|
||||
|
||||
// Must have a warning about unexpected deletions
|
||||
assert.ok(
|
||||
content.includes('DELETIONS') || content.includes('WARNING'),
|
||||
'gsd-executor.md must emit a warning when a commit includes unexpected file deletions'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Defense-in-depth: pre-merge deletion check (from #1977)', () => {
|
||||
test('execute-phase.md worktree merge section has pre-merge deletion check', () => {
|
||||
const content = fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
|
||||
const worktreeCleanupStart = content.indexOf('Worktree cleanup');
|
||||
assert.ok(
|
||||
worktreeCleanupStart > -1,
|
||||
'execute-phase.md must have a worktree cleanup section'
|
||||
);
|
||||
|
||||
const cleanupSection = content.slice(worktreeCleanupStart);
|
||||
|
||||
assert.ok(
|
||||
cleanupSection.includes('--diff-filter=D'),
|
||||
'execute-phase.md worktree cleanup must use --diff-filter=D to block deletion-introducing merges'
|
||||
);
|
||||
|
||||
// Deletion check must appear before git merge
|
||||
const deletionCheckIdx = cleanupSection.indexOf('--diff-filter=D');
|
||||
const gitMergeIdx = cleanupSection.indexOf('git merge');
|
||||
assert.ok(
|
||||
deletionCheckIdx < gitMergeIdx,
|
||||
'--diff-filter=D deletion check must appear before git merge in the worktree cleanup section'
|
||||
);
|
||||
|
||||
assert.ok(
|
||||
cleanupSection.includes('BLOCKED') || cleanupSection.includes('deletion'),
|
||||
'execute-phase.md must block or warn when the worktree branch contains file deletions'
|
||||
);
|
||||
});
|
||||
|
||||
test('quick.md worktree merge section has pre-merge deletion check', () => {
|
||||
const content = fs.readFileSync(QUICK_PATH, 'utf-8');
|
||||
|
||||
const mergeIdx = content.indexOf('git merge');
|
||||
assert.ok(mergeIdx > -1, 'quick.md must contain a git merge operation');
|
||||
|
||||
// Find the worktree cleanup block (starts after "Worktree cleanup")
|
||||
const worktreeCleanupStart = content.indexOf('Worktree cleanup');
|
||||
assert.ok(
|
||||
worktreeCleanupStart > -1,
|
||||
'quick.md must have a worktree cleanup section'
|
||||
);
|
||||
|
||||
const cleanupSection = content.slice(worktreeCleanupStart);
|
||||
|
||||
assert.ok(
|
||||
cleanupSection.includes('--diff-filter=D') || cleanupSection.includes('diff-filter'),
|
||||
'quick.md worktree cleanup must check for file deletions before merging'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
});
|
||||
201
tests/claude-md-path.test.cjs
Normal file
201
tests/claude-md-path.test.cjs
Normal file
@@ -0,0 +1,201 @@
|
||||
/**
|
||||
* Tests for configurable claude_md_path setting (#2010)
|
||||
*/
|
||||
|
||||
const { describe, test, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs');
|
||||
|
||||
describe('claude_md_path config key', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('claude_md_path is in VALID_CONFIG_KEYS', () => {
|
||||
const { VALID_CONFIG_KEYS } = require('../get-shit-done/bin/lib/config.cjs');
|
||||
assert.ok(VALID_CONFIG_KEYS.has('claude_md_path'));
|
||||
});
|
||||
|
||||
test('config template includes claude_md_path', () => {
|
||||
const templatePath = path.join(__dirname, '..', 'get-shit-done', 'templates', 'config.json');
|
||||
const template = JSON.parse(fs.readFileSync(templatePath, 'utf-8'));
|
||||
assert.strictEqual(template.claude_md_path, './CLAUDE.md');
|
||||
});
|
||||
|
||||
test('config-get claude_md_path returns default value when not set', () => {
|
||||
// Create a config.json without claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
fs.writeFileSync(configPath, JSON.stringify({ mode: 'interactive' }), 'utf-8');
|
||||
|
||||
const result = runGsdTools('config-get claude_md_path --default ./CLAUDE.md', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
assert.strictEqual(JSON.parse(result.output), './CLAUDE.md');
|
||||
});
|
||||
|
||||
test('config-set claude_md_path works', () => {
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
fs.writeFileSync(configPath, JSON.stringify({ mode: 'interactive' }), 'utf-8');
|
||||
|
||||
const setResult = runGsdTools('config-set claude_md_path .claude/CLAUDE.md', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(setResult.success, `Expected success but got error: ${setResult.error}`);
|
||||
|
||||
const getResult = runGsdTools('config-get claude_md_path', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(getResult.success, `Expected success but got error: ${getResult.error}`);
|
||||
assert.strictEqual(JSON.parse(getResult.output), '.claude/CLAUDE.md');
|
||||
});
|
||||
|
||||
test('buildNewProjectConfig includes claude_md_path default', () => {
|
||||
// Use config-new-project which calls buildNewProjectConfig
|
||||
const result = runGsdTools('config-new-project', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||
assert.strictEqual(config.claude_md_path, './CLAUDE.md');
|
||||
});
|
||||
});
|
||||
|
||||
describe('cmdGenerateClaudeProfile reads claude_md_path from config', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('uses claude_md_path from config when no --output or --global', () => {
|
||||
// Set up config with custom claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
const customPath = '.claude/CLAUDE.md';
|
||||
fs.writeFileSync(configPath, JSON.stringify({ claude_md_path: customPath }), 'utf-8');
|
||||
|
||||
// Create the target directory
|
||||
fs.mkdirSync(path.join(tmpDir, '.claude'), { recursive: true });
|
||||
|
||||
// Create a minimal analysis file
|
||||
const analysisPath = path.join(tmpDir, '.planning', 'analysis.json');
|
||||
const analysis = {
|
||||
dimensions: {
|
||||
communication_style: { rating: 'terse-direct', confidence: 'HIGH' },
|
||||
},
|
||||
data_source: 'test',
|
||||
};
|
||||
fs.writeFileSync(analysisPath, JSON.stringify(analysis), 'utf-8');
|
||||
|
||||
const result = runGsdTools(
|
||||
['generate-claude-profile', '--analysis', analysisPath],
|
||||
tmpDir,
|
||||
{ HOME: tmpDir }
|
||||
);
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
const realTmpDir = fs.realpathSync(tmpDir);
|
||||
const expectedPath = path.join(realTmpDir, customPath);
|
||||
assert.strictEqual(parsed.claude_md_path, expectedPath);
|
||||
assert.ok(fs.existsSync(expectedPath), `Expected file at ${expectedPath}`);
|
||||
});
|
||||
|
||||
test('--output flag overrides claude_md_path from config', () => {
|
||||
// Set up config with custom claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
fs.writeFileSync(configPath, JSON.stringify({ claude_md_path: '.claude/CLAUDE.md' }), 'utf-8');
|
||||
|
||||
// Create analysis file
|
||||
const analysisPath = path.join(tmpDir, '.planning', 'analysis.json');
|
||||
const analysis = {
|
||||
dimensions: {
|
||||
communication_style: { rating: 'terse-direct', confidence: 'HIGH' },
|
||||
},
|
||||
data_source: 'test',
|
||||
};
|
||||
fs.writeFileSync(analysisPath, JSON.stringify(analysis), 'utf-8');
|
||||
|
||||
const outputFile = 'custom-output.md';
|
||||
const result = runGsdTools(
|
||||
['generate-claude-profile', '--analysis', analysisPath, '--output', outputFile],
|
||||
tmpDir,
|
||||
{ HOME: tmpDir }
|
||||
);
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
const realTmpDir = fs.realpathSync(tmpDir);
|
||||
assert.strictEqual(parsed.claude_md_path, path.join(realTmpDir, outputFile));
|
||||
});
|
||||
});
|
||||
|
||||
describe('cmdGenerateClaudeMd reads claude_md_path from config', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
// Create minimal project files so generate-claude-md has something to read
|
||||
fs.writeFileSync(
|
||||
path.join(tmpDir, '.planning', 'PROJECT.md'),
|
||||
['# Test Project', '', 'A test project.'].join('\n'),
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('uses claude_md_path from config when no --output', () => {
|
||||
// Set up config with custom claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
const customPath = '.claude/CLAUDE.md';
|
||||
fs.writeFileSync(configPath, JSON.stringify({ claude_md_path: customPath }), 'utf-8');
|
||||
|
||||
// Create the target directory
|
||||
fs.mkdirSync(path.join(tmpDir, '.claude'), { recursive: true });
|
||||
|
||||
const result = runGsdTools('generate-claude-md', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
const realTmpDir = fs.realpathSync(tmpDir);
|
||||
const expectedPath = path.join(realTmpDir, customPath);
|
||||
assert.strictEqual(parsed.claude_md_path, expectedPath);
|
||||
assert.ok(fs.existsSync(expectedPath), `Expected file at ${expectedPath}`);
|
||||
});
|
||||
|
||||
test('--output flag overrides claude_md_path from config', () => {
|
||||
// Set up config with custom claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
fs.writeFileSync(configPath, JSON.stringify({ claude_md_path: '.claude/CLAUDE.md' }), 'utf-8');
|
||||
|
||||
const outputFile = 'my-custom.md';
|
||||
const result = runGsdTools(['generate-claude-md', '--output', outputFile], tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
const realTmpDir = fs.realpathSync(tmpDir);
|
||||
assert.strictEqual(parsed.claude_md_path, path.join(realTmpDir, outputFile));
|
||||
});
|
||||
|
||||
test('defaults to ./CLAUDE.md when config has no claude_md_path', () => {
|
||||
// Set up config without claude_md_path
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
fs.writeFileSync(configPath, JSON.stringify({ mode: 'interactive' }), 'utf-8');
|
||||
|
||||
const result = runGsdTools('generate-claude-md', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
const realTmpDir = fs.realpathSync(tmpDir);
|
||||
assert.strictEqual(parsed.claude_md_path, path.join(realTmpDir, 'CLAUDE.md'));
|
||||
});
|
||||
});
|
||||
140
tests/code-review-command.test.cjs
Normal file
140
tests/code-review-command.test.cjs
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* Tests for code_review_command hook in ship workflow (#1876)
|
||||
*
|
||||
* Validates that the external code review command integration is properly
|
||||
* wired into config, templates, and the ship workflow.
|
||||
*/
|
||||
|
||||
const { describe, test, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { createTempProject, cleanup, runGsdTools } = require('./helpers.cjs');
|
||||
|
||||
const CONFIG_CJS_PATH = path.join(__dirname, '..', 'get-shit-done', 'bin', 'lib', 'config.cjs');
|
||||
const SHIP_MD_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'ship.md');
|
||||
const CONFIG_TEMPLATE_PATH = path.join(__dirname, '..', 'get-shit-done', 'templates', 'config.json');
|
||||
|
||||
describe('code_review_command config key', () => {
|
||||
test('workflow.code_review_command is in VALID_CONFIG_KEYS', () => {
|
||||
const { VALID_CONFIG_KEYS } = require(CONFIG_CJS_PATH);
|
||||
assert.ok(
|
||||
VALID_CONFIG_KEYS.has('workflow.code_review_command'),
|
||||
'workflow.code_review_command must be in VALID_CONFIG_KEYS'
|
||||
);
|
||||
});
|
||||
|
||||
test('config-set accepts workflow.code_review_command', () => {
|
||||
const tmpDir = createTempProject();
|
||||
try {
|
||||
// Create config.json first
|
||||
fs.mkdirSync(path.join(tmpDir, '.planning'), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(tmpDir, '.planning', 'config.json'),
|
||||
JSON.stringify({ workflow: {} }, null, 2)
|
||||
);
|
||||
|
||||
const result = runGsdTools(
|
||||
['config-set', 'workflow.code_review_command', 'my-review-tool --review'],
|
||||
tmpDir,
|
||||
{ HOME: tmpDir }
|
||||
);
|
||||
assert.ok(result.success, 'config-set should succeed');
|
||||
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.updated, true);
|
||||
assert.strictEqual(parsed.key, 'workflow.code_review_command');
|
||||
assert.strictEqual(parsed.value, 'my-review-tool --review');
|
||||
} finally {
|
||||
cleanup(tmpDir);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('config template', () => {
|
||||
test('config.json template has code_review_command under workflow section', () => {
|
||||
const template = JSON.parse(fs.readFileSync(CONFIG_TEMPLATE_PATH, 'utf-8'));
|
||||
assert.ok(template.workflow, 'template must have workflow section');
|
||||
assert.ok(
|
||||
'code_review_command' in template.workflow,
|
||||
'workflow section must contain code_review_command key'
|
||||
);
|
||||
assert.strictEqual(
|
||||
template.workflow.code_review_command,
|
||||
null,
|
||||
'code_review_command default should be null'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ship workflow code_review_command integration', () => {
|
||||
const shipContent = fs.readFileSync(SHIP_MD_PATH, 'utf-8');
|
||||
|
||||
test('ship.md contains code_review_command config check', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('code_review_command'),
|
||||
'ship.md must reference code_review_command'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md has external review sub-step that reads config', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('config-get') && shipContent.includes('workflow.code_review_command'),
|
||||
'ship.md must read workflow.code_review_command from config'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md generates diff against base branch for review', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('git diff') && shipContent.includes('BASE_BRANCH'),
|
||||
'ship.md must generate a diff using BASE_BRANCH for the external review'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md has JSON parsing for external review output', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('verdict') && shipContent.includes('APPROVED'),
|
||||
'ship.md must parse JSON output with verdict field'
|
||||
);
|
||||
assert.ok(
|
||||
shipContent.includes('REVISE'),
|
||||
'ship.md must handle REVISE verdict'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md has timeout handling for external review command (120s)', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('120') || shipContent.includes('timeout'),
|
||||
'ship.md must have timeout handling (120s) for external review command'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md has stderr capture on failure', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('stderr'),
|
||||
'ship.md must capture stderr on external review command failure'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md pipes review prompt to command via stdin', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('stdin'),
|
||||
'ship.md must pipe the review prompt to the command via stdin'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md includes diff stats in review prompt', () => {
|
||||
assert.ok(
|
||||
shipContent.includes('diff --stat') || shipContent.includes('diffstat') || shipContent.includes('--stat'),
|
||||
'ship.md must include diff stats in the review prompt'
|
||||
);
|
||||
});
|
||||
|
||||
test('ship.md falls through to existing review flow on failure', () => {
|
||||
// The external review should not block the existing manual review options
|
||||
assert.ok(
|
||||
shipContent.includes('AskUserQuestion') || shipContent.includes('Skip review'),
|
||||
'ship.md must still offer the existing manual review flow after external review'
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1196,6 +1196,7 @@ describe('E2E: Copilot full install verification', () => {
|
||||
'gsd-integration-checker.agent.md',
|
||||
'gsd-intel-updater.agent.md',
|
||||
'gsd-nyquist-auditor.agent.md',
|
||||
'gsd-pattern-mapper.agent.md',
|
||||
'gsd-phase-researcher.agent.md',
|
||||
'gsd-plan-checker.agent.md',
|
||||
'gsd-planner.agent.md',
|
||||
|
||||
@@ -1629,9 +1629,11 @@ describe('findProjectRoot', () => {
|
||||
// ─── reapStaleTempFiles ─────────────────────────────────────────────────────
|
||||
|
||||
describe('reapStaleTempFiles', () => {
|
||||
const gsdTmpDir = path.join(os.tmpdir(), 'gsd');
|
||||
|
||||
test('removes stale gsd-*.json files older than maxAgeMs', () => {
|
||||
const tmpDir = os.tmpdir();
|
||||
const stalePath = path.join(tmpDir, `gsd-reap-test-${Date.now()}.json`);
|
||||
fs.mkdirSync(gsdTmpDir, { recursive: true });
|
||||
const stalePath = path.join(gsdTmpDir, `gsd-reap-test-${Date.now()}.json`);
|
||||
fs.writeFileSync(stalePath, '{}');
|
||||
// Set mtime to 10 minutes ago
|
||||
const oldTime = new Date(Date.now() - 10 * 60 * 1000);
|
||||
@@ -1643,8 +1645,8 @@ describe('reapStaleTempFiles', () => {
|
||||
});
|
||||
|
||||
test('preserves fresh gsd-*.json files', () => {
|
||||
const tmpDir = os.tmpdir();
|
||||
const freshPath = path.join(tmpDir, `gsd-reap-fresh-${Date.now()}.json`);
|
||||
fs.mkdirSync(gsdTmpDir, { recursive: true });
|
||||
const freshPath = path.join(gsdTmpDir, `gsd-reap-fresh-${Date.now()}.json`);
|
||||
fs.writeFileSync(freshPath, '{}');
|
||||
|
||||
reapStaleTempFiles('gsd-reap-fresh-', { maxAgeMs: 5 * 60 * 1000 });
|
||||
@@ -1655,8 +1657,8 @@ describe('reapStaleTempFiles', () => {
|
||||
});
|
||||
|
||||
test('removes stale temp directories when present', () => {
|
||||
const tmpDir = os.tmpdir();
|
||||
const staleDir = fs.mkdtempSync(path.join(tmpDir, 'gsd-reap-dir-'));
|
||||
fs.mkdirSync(gsdTmpDir, { recursive: true });
|
||||
const staleDir = fs.mkdtempSync(path.join(gsdTmpDir, 'gsd-reap-dir-'));
|
||||
fs.writeFileSync(path.join(staleDir, 'data.jsonl'), 'test');
|
||||
// Set mtime to 10 minutes ago
|
||||
const oldTime = new Date(Date.now() - 10 * 60 * 1000);
|
||||
|
||||
173
tests/cross-ai-execution.test.cjs
Normal file
173
tests/cross-ai-execution.test.cjs
Normal file
@@ -0,0 +1,173 @@
|
||||
const { test, describe } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const CONFIG_PATH = path.join(__dirname, '..', 'get-shit-done', 'bin', 'lib', 'config.cjs');
|
||||
const EXECUTE_PHASE_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'execute-phase.md');
|
||||
const CONFIG_TEMPLATE_PATH = path.join(__dirname, '..', 'get-shit-done', 'templates', 'config.json');
|
||||
|
||||
describe('cross-AI execution', () => {
|
||||
|
||||
describe('config keys', () => {
|
||||
test('workflow.cross_ai_execution is in VALID_CONFIG_KEYS', () => {
|
||||
const { VALID_CONFIG_KEYS } = require(CONFIG_PATH);
|
||||
assert.ok(VALID_CONFIG_KEYS.has('workflow.cross_ai_execution'),
|
||||
'VALID_CONFIG_KEYS must include workflow.cross_ai_execution');
|
||||
});
|
||||
|
||||
test('workflow.cross_ai_command is in VALID_CONFIG_KEYS', () => {
|
||||
const { VALID_CONFIG_KEYS } = require(CONFIG_PATH);
|
||||
assert.ok(VALID_CONFIG_KEYS.has('workflow.cross_ai_command'),
|
||||
'VALID_CONFIG_KEYS must include workflow.cross_ai_command');
|
||||
});
|
||||
|
||||
test('workflow.cross_ai_timeout is in VALID_CONFIG_KEYS', () => {
|
||||
const { VALID_CONFIG_KEYS } = require(CONFIG_PATH);
|
||||
assert.ok(VALID_CONFIG_KEYS.has('workflow.cross_ai_timeout'),
|
||||
'VALID_CONFIG_KEYS must include workflow.cross_ai_timeout');
|
||||
});
|
||||
});
|
||||
|
||||
describe('config template defaults', () => {
|
||||
test('config template has cross_ai_execution default', () => {
|
||||
const template = JSON.parse(fs.readFileSync(CONFIG_TEMPLATE_PATH, 'utf-8'));
|
||||
assert.strictEqual(template.workflow.cross_ai_execution, false,
|
||||
'cross_ai_execution should default to false');
|
||||
});
|
||||
|
||||
test('config template has cross_ai_command default', () => {
|
||||
const template = JSON.parse(fs.readFileSync(CONFIG_TEMPLATE_PATH, 'utf-8'));
|
||||
assert.strictEqual(template.workflow.cross_ai_command, '',
|
||||
'cross_ai_command should default to empty string');
|
||||
});
|
||||
|
||||
test('config template has cross_ai_timeout default', () => {
|
||||
const template = JSON.parse(fs.readFileSync(CONFIG_TEMPLATE_PATH, 'utf-8'));
|
||||
assert.strictEqual(template.workflow.cross_ai_timeout, 300,
|
||||
'cross_ai_timeout should default to 300 seconds');
|
||||
});
|
||||
});
|
||||
|
||||
describe('execute-phase.md cross-AI step', () => {
|
||||
let content;
|
||||
|
||||
test('execute-phase.md has a cross-AI execution step', () => {
|
||||
content = fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<step name="cross_ai_delegation">'),
|
||||
'execute-phase.md must have a step named cross_ai_delegation');
|
||||
});
|
||||
|
||||
test('cross-AI step appears between discover_and_group_plans and execute_waves', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const discoverIdx = content.indexOf('<step name="discover_and_group_plans">');
|
||||
const crossAiIdx = content.indexOf('<step name="cross_ai_delegation">');
|
||||
const executeIdx = content.indexOf('<step name="execute_waves">');
|
||||
assert.ok(discoverIdx < crossAiIdx, 'cross_ai_delegation must come after discover_and_group_plans');
|
||||
assert.ok(crossAiIdx < executeIdx, 'cross_ai_delegation must come before execute_waves');
|
||||
});
|
||||
|
||||
test('cross-AI step handles --cross-ai flag', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
assert.ok(content.includes('--cross-ai'),
|
||||
'execute-phase.md must reference --cross-ai flag');
|
||||
});
|
||||
|
||||
test('cross-AI step handles --no-cross-ai flag', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
assert.ok(content.includes('--no-cross-ai'),
|
||||
'execute-phase.md must reference --no-cross-ai flag');
|
||||
});
|
||||
|
||||
test('cross-AI step uses stdin-based prompt delivery', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
// The step must describe piping prompt via stdin, not shell interpolation
|
||||
assert.ok(content.includes('stdin'),
|
||||
'cross-AI step must describe stdin-based prompt delivery');
|
||||
});
|
||||
|
||||
test('cross-AI step validates summary output', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
// The step must describe validating the captured summary
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(
|
||||
crossAiSection.includes('SUMMARY') && crossAiSection.includes('valid'),
|
||||
'cross-AI step must validate the summary output'
|
||||
);
|
||||
});
|
||||
|
||||
test('cross-AI step warns about dirty working tree', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(
|
||||
crossAiSection.includes('dirty') || crossAiSection.includes('uncommitted') || crossAiSection.includes('working tree'),
|
||||
'cross-AI step must warn about dirty/uncommitted changes from external command'
|
||||
);
|
||||
});
|
||||
|
||||
test('cross-AI step reads cross_ai_command from config', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(
|
||||
crossAiSection.includes('cross_ai_command'),
|
||||
'cross-AI step must read cross_ai_command from config'
|
||||
);
|
||||
});
|
||||
|
||||
test('cross-AI step reads cross_ai_timeout from config', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(
|
||||
crossAiSection.includes('cross_ai_timeout'),
|
||||
'cross-AI step must read cross_ai_timeout from config'
|
||||
);
|
||||
});
|
||||
|
||||
test('cross-AI step handles failure with retry/skip/abort', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(crossAiSection.includes('retry'), 'cross-AI step must offer retry on failure');
|
||||
assert.ok(crossAiSection.includes('skip'), 'cross-AI step must offer skip on failure');
|
||||
assert.ok(crossAiSection.includes('abort'), 'cross-AI step must offer abort on failure');
|
||||
});
|
||||
|
||||
test('cross-AI step skips normal executor for handled plans', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const crossAiSection = content.substring(
|
||||
content.indexOf('<step name="cross_ai_delegation">'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="cross_ai_delegation">')) + '</step>'.length
|
||||
);
|
||||
assert.ok(
|
||||
crossAiSection.includes('skip') && (crossAiSection.includes('executor') || crossAiSection.includes('execute_waves')),
|
||||
'cross-AI step must describe skipping normal executor for cross-AI handled plans'
|
||||
);
|
||||
});
|
||||
|
||||
test('parse_args step includes --cross-ai and --no-cross-ai', () => {
|
||||
content = content || fs.readFileSync(EXECUTE_PHASE_PATH, 'utf-8');
|
||||
const parseArgsSection = content.substring(
|
||||
content.indexOf('<step name="parse_args"'),
|
||||
content.indexOf('</step>', content.indexOf('<step name="parse_args"')) + '</step>'.length
|
||||
);
|
||||
assert.ok(parseArgsSection.includes('--cross-ai'),
|
||||
'parse_args step must parse --cross-ai flag');
|
||||
assert.ok(parseArgsSection.includes('--no-cross-ai'),
|
||||
'parse_args step must parse --no-cross-ai flag');
|
||||
});
|
||||
});
|
||||
});
|
||||
222
tests/cursor-reviewer.test.cjs
Normal file
222
tests/cursor-reviewer.test.cjs
Normal file
@@ -0,0 +1,222 @@
|
||||
/**
|
||||
* Cursor CLI Reviewer Tests (#1960)
|
||||
*
|
||||
* Verifies that /gsd-review includes Cursor CLI as a peer reviewer:
|
||||
* - review.md workflow contains cursor detection, flag parsing, self-detection, invocation
|
||||
* - commands/gsd/review.md command file mentions --cursor flag
|
||||
* - help.md lists --cursor in the /gsd-review signature
|
||||
* - docs/COMMANDS.md has --cursor flag row
|
||||
* - docs/FEATURES.md has Cursor in the review section
|
||||
* - i18n docs mirror the same content
|
||||
* - REVIEWS.md template includes Cursor Review section
|
||||
*/
|
||||
|
||||
const { test, describe } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const ROOT = path.join(__dirname, '..');
|
||||
|
||||
describe('Cursor CLI reviewer in /gsd-review (#1960)', () => {
|
||||
|
||||
// --- review.md workflow ---
|
||||
|
||||
describe('review.md workflow', () => {
|
||||
const reviewPath = path.join(ROOT, 'get-shit-done', 'workflows', 'review.md');
|
||||
let content;
|
||||
|
||||
test('review.md exists', () => {
|
||||
assert.ok(fs.existsSync(reviewPath), 'review.md should exist');
|
||||
content = fs.readFileSync(reviewPath, 'utf-8');
|
||||
});
|
||||
|
||||
test('contains cursor CLI detection via command -v', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('command -v cursor'),
|
||||
'review.md should detect cursor CLI via "command -v cursor"'
|
||||
);
|
||||
});
|
||||
|
||||
test('contains --cursor flag parsing', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'review.md should parse --cursor flag'
|
||||
);
|
||||
});
|
||||
|
||||
test('contains CURSOR_SESSION_ID self-detection', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('CURSOR_SESSION_ID'),
|
||||
'review.md should detect self-CLI via CURSOR_SESSION_ID env var'
|
||||
);
|
||||
});
|
||||
|
||||
test('contains cursor agent invocation command', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('cursor agent -p --mode ask --trust'),
|
||||
'review.md should invoke cursor via "cursor agent -p --mode ask --trust"'
|
||||
);
|
||||
});
|
||||
|
||||
test('contains Cursor Review section in REVIEWS.md template', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('Cursor Review'),
|
||||
'review.md should include a "Cursor Review" section in the REVIEWS.md template'
|
||||
);
|
||||
});
|
||||
|
||||
test('lists cursor in the reviewers frontmatter array', () => {
|
||||
const c = fs.readFileSync(reviewPath, 'utf-8');
|
||||
assert.ok(
|
||||
/reviewers:.*cursor/.test(c),
|
||||
'review.md should list cursor in the reviewers array'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- commands/gsd/review.md ---
|
||||
|
||||
describe('commands/gsd/review.md', () => {
|
||||
const cmdPath = path.join(ROOT, 'commands', 'gsd', 'review.md');
|
||||
|
||||
test('mentions --cursor flag', () => {
|
||||
const c = fs.readFileSync(cmdPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'commands/gsd/review.md should mention --cursor flag'
|
||||
);
|
||||
});
|
||||
|
||||
test('mentions Cursor in objective or context', () => {
|
||||
const c = fs.readFileSync(cmdPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('Cursor'),
|
||||
'commands/gsd/review.md should mention Cursor'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- help.md ---
|
||||
|
||||
describe('help.md', () => {
|
||||
const helpPath = path.join(ROOT, 'get-shit-done', 'workflows', 'help.md');
|
||||
|
||||
test('lists --cursor in /gsd-review signature', () => {
|
||||
const c = fs.readFileSync(helpPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'help.md should list --cursor in the /gsd-review command signature'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- docs/COMMANDS.md ---
|
||||
|
||||
describe('docs/COMMANDS.md', () => {
|
||||
const docsPath = path.join(ROOT, 'docs', 'COMMANDS.md');
|
||||
|
||||
test('has --cursor flag row', () => {
|
||||
const c = fs.readFileSync(docsPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/COMMANDS.md should have a --cursor flag row'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- docs/FEATURES.md ---
|
||||
|
||||
describe('docs/FEATURES.md', () => {
|
||||
const featPath = path.join(ROOT, 'docs', 'FEATURES.md');
|
||||
|
||||
test('has --cursor in review command signature', () => {
|
||||
const c = fs.readFileSync(featPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/FEATURES.md should include --cursor in the review command signature'
|
||||
);
|
||||
});
|
||||
|
||||
test('mentions Cursor in the review purpose', () => {
|
||||
const c = fs.readFileSync(featPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('Cursor'),
|
||||
'docs/FEATURES.md should mention Cursor in the review section'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- i18n: ja-JP ---
|
||||
|
||||
describe('docs/ja-JP/COMMANDS.md', () => {
|
||||
const jaPath = path.join(ROOT, 'docs', 'ja-JP', 'COMMANDS.md');
|
||||
|
||||
test('has --cursor flag row', () => {
|
||||
const c = fs.readFileSync(jaPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/ja-JP/COMMANDS.md should have a --cursor flag row'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('docs/ja-JP/FEATURES.md', () => {
|
||||
const jaPath = path.join(ROOT, 'docs', 'ja-JP', 'FEATURES.md');
|
||||
|
||||
test('has --cursor in review command signature', () => {
|
||||
const c = fs.readFileSync(jaPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/ja-JP/FEATURES.md should include --cursor in the review command signature'
|
||||
);
|
||||
});
|
||||
|
||||
test('mentions Cursor in the review section', () => {
|
||||
const c = fs.readFileSync(jaPath, 'utf-8');
|
||||
assert.ok(
|
||||
/Cursor/i.test(fs.readFileSync(jaPath, 'utf-8')),
|
||||
'docs/ja-JP/FEATURES.md should mention Cursor in the review section'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- i18n: ko-KR ---
|
||||
|
||||
describe('docs/ko-KR/COMMANDS.md', () => {
|
||||
const koPath = path.join(ROOT, 'docs', 'ko-KR', 'COMMANDS.md');
|
||||
|
||||
test('has --cursor flag row', () => {
|
||||
const c = fs.readFileSync(koPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/ko-KR/COMMANDS.md should have a --cursor flag row'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('docs/ko-KR/FEATURES.md', () => {
|
||||
const koPath = path.join(ROOT, 'docs', 'ko-KR', 'FEATURES.md');
|
||||
|
||||
test('has --cursor in review command signature', () => {
|
||||
const c = fs.readFileSync(koPath, 'utf-8');
|
||||
assert.ok(
|
||||
c.includes('--cursor'),
|
||||
'docs/ko-KR/FEATURES.md should include --cursor in the review command signature'
|
||||
);
|
||||
});
|
||||
|
||||
test('mentions Cursor in the review section', () => {
|
||||
const c = fs.readFileSync(koPath, 'utf-8');
|
||||
assert.ok(
|
||||
/Cursor/i.test(fs.readFileSync(koPath, 'utf-8')),
|
||||
'docs/ko-KR/FEATURES.md should mention Cursor in the review section'
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
168
tests/extract-learnings.test.cjs
Normal file
168
tests/extract-learnings.test.cjs
Normal file
@@ -0,0 +1,168 @@
|
||||
/**
|
||||
* Extract-Learnings Command & Workflow Tests
|
||||
*
|
||||
* Validates command file existence, frontmatter correctness, workflow content,
|
||||
* 4 learning categories, capture_thought handling, graceful degradation,
|
||||
* LEARNINGS.md output, and missing artifact handling.
|
||||
*/
|
||||
|
||||
const { describe, test } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const COMMAND_PATH = path.join(__dirname, '..', 'commands', 'gsd', 'extract_learnings.md');
|
||||
const WORKFLOW_PATH = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'extract_learnings.md');
|
||||
|
||||
describe('extract-learnings command', () => {
|
||||
test('command file exists', () => {
|
||||
assert.ok(fs.existsSync(COMMAND_PATH), 'commands/gsd/extract_learnings.md should exist');
|
||||
});
|
||||
|
||||
test('command file has correct name frontmatter', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(content.includes('name: gsd:extract-learnings'), 'Command must have name: gsd:extract-learnings');
|
||||
});
|
||||
|
||||
test('command file has description frontmatter', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(content.includes('description:'), 'Command must have description frontmatter');
|
||||
});
|
||||
|
||||
test('command file has argument-hint for phase-number', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(content.includes('argument-hint:'), 'Command must have argument-hint');
|
||||
assert.ok(content.includes('<phase-number>'), 'argument-hint must reference <phase-number>');
|
||||
});
|
||||
|
||||
test('command file has allowed-tools list', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(content.includes('allowed-tools:'), 'Command must have allowed-tools');
|
||||
assert.ok(content.includes('Read'), 'allowed-tools must include Read');
|
||||
assert.ok(content.includes('Write'), 'allowed-tools must include Write');
|
||||
assert.ok(content.includes('Bash'), 'allowed-tools must include Bash');
|
||||
assert.ok(content.includes('Grep'), 'allowed-tools must include Grep');
|
||||
assert.ok(content.includes('Glob'), 'allowed-tools must include Glob');
|
||||
assert.ok(content.includes('Agent'), 'allowed-tools must include Agent');
|
||||
});
|
||||
|
||||
test('command file has type: prompt', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(content.includes('type: prompt'), 'Command must have type: prompt');
|
||||
});
|
||||
|
||||
test('command references the workflow via execution_context', () => {
|
||||
const content = fs.readFileSync(COMMAND_PATH, 'utf-8');
|
||||
assert.ok(
|
||||
content.includes('workflows/extract_learnings.md'),
|
||||
'Command must reference workflows/extract_learnings.md in execution_context'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('extract-learnings workflow', () => {
|
||||
test('workflow file exists', () => {
|
||||
assert.ok(fs.existsSync(WORKFLOW_PATH), 'workflows/extract_learnings.md should exist');
|
||||
});
|
||||
|
||||
test('workflow has objective tag', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<objective>'), 'Workflow must have <objective> tag');
|
||||
assert.ok(content.includes('</objective>'), 'Workflow must close <objective> tag');
|
||||
});
|
||||
|
||||
test('workflow has process tag', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<process>'), 'Workflow must have <process> tag');
|
||||
assert.ok(content.includes('</process>'), 'Workflow must close <process> tag');
|
||||
});
|
||||
|
||||
test('workflow has step tags', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<step name='), 'Workflow must have named step tags');
|
||||
assert.ok(content.includes('</step>'), 'Workflow must close step tags');
|
||||
});
|
||||
|
||||
test('workflow has success_criteria tag', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<success_criteria>'), 'Workflow must have <success_criteria> tag');
|
||||
assert.ok(content.includes('</success_criteria>'), 'Workflow must close <success_criteria> tag');
|
||||
});
|
||||
|
||||
test('workflow has critical_rules tag', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('<critical_rules>'), 'Workflow must have <critical_rules> tag');
|
||||
assert.ok(content.includes('</critical_rules>'), 'Workflow must close <critical_rules> tag');
|
||||
});
|
||||
|
||||
test('workflow reads required artifacts (PLAN.md and SUMMARY.md)', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('PLAN.md'), 'Workflow must reference PLAN.md');
|
||||
assert.ok(content.includes('SUMMARY.md'), 'Workflow must reference SUMMARY.md');
|
||||
});
|
||||
|
||||
test('workflow reads optional artifacts (VERIFICATION.md, UAT.md, STATE.md)', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('VERIFICATION.md'), 'Workflow must reference VERIFICATION.md');
|
||||
assert.ok(content.includes('UAT.md'), 'Workflow must reference UAT.md');
|
||||
assert.ok(content.includes('STATE.md'), 'Workflow must reference STATE.md');
|
||||
});
|
||||
|
||||
test('workflow extracts all 4 learning categories', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.toLowerCase().includes('decision'), 'Workflow must extract decisions');
|
||||
assert.ok(content.toLowerCase().includes('lesson'), 'Workflow must extract lessons');
|
||||
assert.ok(content.toLowerCase().includes('pattern'), 'Workflow must extract patterns');
|
||||
assert.ok(content.toLowerCase().includes('surprise'), 'Workflow must extract surprises');
|
||||
});
|
||||
|
||||
test('workflow handles capture_thought tool availability', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('capture_thought'), 'Workflow must reference capture_thought tool');
|
||||
});
|
||||
|
||||
test('workflow degrades gracefully when capture_thought is unavailable', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(
|
||||
content.includes('graceful') || content.includes('not available') || content.includes('unavailable') || content.includes('fallback'),
|
||||
'Workflow must handle graceful degradation when capture_thought is unavailable'
|
||||
);
|
||||
});
|
||||
|
||||
test('workflow outputs LEARNINGS.md', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('LEARNINGS.md'), 'Workflow must output LEARNINGS.md');
|
||||
});
|
||||
|
||||
test('workflow handles missing artifacts gracefully', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(
|
||||
content.includes('missing') || content.includes('not found') || content.includes('optional'),
|
||||
'Workflow must handle missing artifacts'
|
||||
);
|
||||
});
|
||||
|
||||
test('workflow includes source attribution for extracted items', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(
|
||||
content.includes('source') || content.includes('attribution') || content.includes('Source:'),
|
||||
'Workflow must include source attribution for extracted items'
|
||||
);
|
||||
});
|
||||
|
||||
test('workflow specifies LEARNINGS.md YAML frontmatter fields', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(content.includes('phase'), 'LEARNINGS.md frontmatter must include phase');
|
||||
assert.ok(content.includes('phase_name'), 'LEARNINGS.md frontmatter must include phase_name');
|
||||
assert.ok(content.includes('generated'), 'LEARNINGS.md frontmatter must include generated');
|
||||
assert.ok(content.includes('missing_artifacts'), 'LEARNINGS.md frontmatter must include missing_artifacts');
|
||||
});
|
||||
|
||||
test('workflow supports overwriting previous LEARNINGS.md on re-run', () => {
|
||||
const content = fs.readFileSync(WORKFLOW_PATH, 'utf-8');
|
||||
assert.ok(
|
||||
content.includes('overwrite') || content.includes('overwrit') || content.includes('replace'),
|
||||
'Workflow must support overwriting previous LEARNINGS.md'
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -113,6 +113,104 @@ describe('extractFrontmatter', () => {
|
||||
assert.strictEqual(result.second, 'two');
|
||||
assert.strictEqual(result.third, 'three');
|
||||
});
|
||||
|
||||
// ─── Bug #2130: body --- sequence mis-parse ──────────────────────────────
|
||||
|
||||
test('#2130: frontmatter at top with YAML example block in body — returns top frontmatter', () => {
|
||||
const content = [
|
||||
'---',
|
||||
'name: my-agent',
|
||||
'type: execute',
|
||||
'---',
|
||||
'',
|
||||
'# Documentation',
|
||||
'',
|
||||
'Here is a YAML example:',
|
||||
'',
|
||||
'```yaml',
|
||||
'---',
|
||||
'key: value',
|
||||
'other: stuff',
|
||||
'---',
|
||||
'```',
|
||||
'',
|
||||
'End of doc.',
|
||||
].join('\n');
|
||||
const result = extractFrontmatter(content);
|
||||
assert.strictEqual(result.name, 'my-agent', 'should extract name from TOP frontmatter');
|
||||
assert.strictEqual(result.type, 'execute', 'should extract type from TOP frontmatter');
|
||||
assert.strictEqual(result.key, undefined, 'should NOT extract key from body YAML block');
|
||||
assert.strictEqual(result.other, undefined, 'should NOT extract other from body YAML block');
|
||||
});
|
||||
|
||||
test('#2130: frontmatter at top with horizontal rules in body — returns top frontmatter', () => {
|
||||
const content = [
|
||||
'---',
|
||||
'title: My Doc',
|
||||
'status: active',
|
||||
'---',
|
||||
'',
|
||||
'# Section One',
|
||||
'',
|
||||
'Some text.',
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'# Section Two',
|
||||
'',
|
||||
'More text.',
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'# Section Three',
|
||||
].join('\n');
|
||||
const result = extractFrontmatter(content);
|
||||
assert.strictEqual(result.title, 'My Doc', 'should extract title from TOP frontmatter');
|
||||
assert.strictEqual(result.status, 'active', 'should extract status from TOP frontmatter');
|
||||
});
|
||||
|
||||
test('#2130: body-only --- block with no frontmatter at byte 0 — returns empty', () => {
|
||||
const content = [
|
||||
'# My Document',
|
||||
'',
|
||||
'Some intro text.',
|
||||
'',
|
||||
'---',
|
||||
'key: value',
|
||||
'other: stuff',
|
||||
'---',
|
||||
'',
|
||||
'End of doc.',
|
||||
].join('\n');
|
||||
const result = extractFrontmatter(content);
|
||||
assert.deepStrictEqual(result, {}, 'should return empty object when --- block is not at byte 0');
|
||||
});
|
||||
|
||||
test('#2130: valid frontmatter at byte 0 still works (regression guard)', () => {
|
||||
const content = [
|
||||
'---',
|
||||
'phase: 01',
|
||||
'plan: 03',
|
||||
'type: execute',
|
||||
'wave: 1',
|
||||
'depends_on: ["01-01", "01-02"]',
|
||||
'files_modified:',
|
||||
' - src/auth.ts',
|
||||
' - src/middleware.ts',
|
||||
'autonomous: true',
|
||||
'---',
|
||||
'',
|
||||
'# Plan body here',
|
||||
].join('\n');
|
||||
const result = extractFrontmatter(content);
|
||||
assert.strictEqual(result.phase, '01');
|
||||
assert.strictEqual(result.plan, '03');
|
||||
assert.strictEqual(result.type, 'execute');
|
||||
assert.strictEqual(result.wave, '1');
|
||||
assert.deepStrictEqual(result.depends_on, ['01-01', '01-02']);
|
||||
assert.deepStrictEqual(result.files_modified, ['src/auth.ts', 'src/middleware.ts']);
|
||||
assert.strictEqual(result.autonomous, 'true');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── reconstructFrontmatter ─────────────────────────────────────────────────
|
||||
|
||||
550
tests/gsd2-import.test.cjs
Normal file
550
tests/gsd2-import.test.cjs
Normal file
@@ -0,0 +1,550 @@
|
||||
'use strict';
|
||||
|
||||
const { describe, it, test, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
const { createTempDir, cleanup, runGsdTools } = require('./helpers.cjs');
|
||||
|
||||
const {
|
||||
findGsd2Root,
|
||||
parseSlicesFromRoadmap,
|
||||
parseMilestoneTitle,
|
||||
parseTaskTitle,
|
||||
parseTaskDescription,
|
||||
parseTaskMustHaves,
|
||||
parseGsd2,
|
||||
buildPlanningArtifacts,
|
||||
buildRoadmapMd,
|
||||
buildStateMd,
|
||||
slugify,
|
||||
zeroPad,
|
||||
} = require('../get-shit-done/bin/lib/gsd2-import.cjs');
|
||||
|
||||
// ─── Fixture Builders ──────────────────────────────────────────────────────
|
||||
|
||||
/** Build a minimal but complete GSD-2 .gsd/ directory in tmpDir. */
|
||||
function makeGsd2Project(tmpDir, opts = {}) {
|
||||
const gsdDir = path.join(tmpDir, '.gsd');
|
||||
const m001Dir = path.join(gsdDir, 'milestones', 'M001');
|
||||
const s01Dir = path.join(m001Dir, 'slices', 'S01');
|
||||
const s02Dir = path.join(m001Dir, 'slices', 'S02');
|
||||
const s01TasksDir = path.join(s01Dir, 'tasks');
|
||||
|
||||
fs.mkdirSync(s01TasksDir, { recursive: true });
|
||||
|
||||
fs.writeFileSync(path.join(gsdDir, 'PROJECT.md'), '# My Project\n\nA test project.\n');
|
||||
fs.writeFileSync(path.join(gsdDir, 'REQUIREMENTS.md'), [
|
||||
'# Requirements',
|
||||
'',
|
||||
'## Active',
|
||||
'',
|
||||
'### R001 — Do the thing',
|
||||
'',
|
||||
'- Status: active',
|
||||
'- Description: The core requirement.',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
const roadmap = [
|
||||
'# M001: Foundation',
|
||||
'',
|
||||
'**Vision:** Build the foundation.',
|
||||
'',
|
||||
'## Success Criteria',
|
||||
'',
|
||||
'- It works.',
|
||||
'',
|
||||
'## Slices',
|
||||
'',
|
||||
'- [x] **S01: Setup** `risk:low` `depends:[]`',
|
||||
' > After this: setup complete',
|
||||
'- [ ] **S02: Auth System** `risk:medium` `depends:[S01]`',
|
||||
' > After this: auth works',
|
||||
].join('\n');
|
||||
fs.writeFileSync(path.join(m001Dir, 'M001-ROADMAP.md'), roadmap);
|
||||
|
||||
// S01 — completed slice with research and a done task
|
||||
fs.writeFileSync(path.join(s01Dir, 'S01-PLAN.md'), [
|
||||
'# S01: Setup',
|
||||
'',
|
||||
'**Goal:** Set up the project.',
|
||||
'',
|
||||
'## Tasks',
|
||||
'- [x] **T01: Init**',
|
||||
].join('\n'));
|
||||
fs.writeFileSync(path.join(s01Dir, 'S01-RESEARCH.md'), '# Research\n\nSome research.\n');
|
||||
fs.writeFileSync(path.join(s01Dir, 'S01-SUMMARY.md'), '---\nstatus: done\n---\n\nSlice done.\n');
|
||||
|
||||
fs.writeFileSync(path.join(s01TasksDir, 'T01-PLAN.md'), [
|
||||
'# T01: Init Project',
|
||||
'',
|
||||
'**Slice:** S01 — **Milestone:** M001',
|
||||
'',
|
||||
'## Description',
|
||||
'Initialize the project structure.',
|
||||
'',
|
||||
'## Must-Haves',
|
||||
'- [x] package.json exists',
|
||||
'- [x] tsconfig.json exists',
|
||||
'',
|
||||
'## Files',
|
||||
'- `package.json`',
|
||||
'- `tsconfig.json`',
|
||||
].join('\n'));
|
||||
fs.writeFileSync(path.join(s01TasksDir, 'T01-SUMMARY.md'), [
|
||||
'---',
|
||||
'status: done',
|
||||
'completed_at: 2025-01-15',
|
||||
'---',
|
||||
'',
|
||||
'# T01: Init Project',
|
||||
'',
|
||||
'Set up package.json and tsconfig.json.',
|
||||
].join('\n'));
|
||||
|
||||
// S02 — not started: slice appears in roadmap but no slice directory
|
||||
if (opts.withS02Dir) {
|
||||
fs.mkdirSync(path.join(s02Dir, 'tasks'), { recursive: true });
|
||||
fs.writeFileSync(path.join(s02Dir, 'S02-PLAN.md'), [
|
||||
'# S02: Auth System',
|
||||
'',
|
||||
'**Goal:** Add authentication.',
|
||||
'',
|
||||
'## Tasks',
|
||||
'- [ ] **T01: JWT middleware**',
|
||||
].join('\n'));
|
||||
fs.writeFileSync(path.join(s02Dir, 'tasks', 'T01-PLAN.md'), [
|
||||
'# T01: JWT Middleware',
|
||||
'',
|
||||
'**Slice:** S02 — **Milestone:** M001',
|
||||
'',
|
||||
'## Description',
|
||||
'Implement JWT token validation middleware.',
|
||||
'',
|
||||
'## Must-Haves',
|
||||
'- [ ] validateToken() returns 401 on invalid JWT',
|
||||
].join('\n'));
|
||||
}
|
||||
|
||||
return gsdDir;
|
||||
}
|
||||
|
||||
/** Build a two-milestone GSD-2 project. */
|
||||
function makeTwoMilestoneProject(tmpDir) {
|
||||
const gsdDir = path.join(tmpDir, '.gsd');
|
||||
const m001Dir = path.join(gsdDir, 'milestones', 'M001');
|
||||
const m002Dir = path.join(gsdDir, 'milestones', 'M002');
|
||||
|
||||
fs.mkdirSync(path.join(m001Dir, 'slices', 'S01', 'tasks'), { recursive: true });
|
||||
fs.mkdirSync(path.join(m002Dir, 'slices', 'S01', 'tasks'), { recursive: true });
|
||||
|
||||
fs.writeFileSync(path.join(gsdDir, 'PROJECT.md'), '# Multi-milestone Project\n');
|
||||
|
||||
fs.writeFileSync(path.join(m001Dir, 'M001-ROADMAP.md'), [
|
||||
'# M001: Alpha',
|
||||
'',
|
||||
'## Slices',
|
||||
'',
|
||||
'- [x] **S01: Core** `risk:low` `depends:[]`',
|
||||
'- [x] **S02: API** `risk:low` `depends:[S01]`',
|
||||
].join('\n'));
|
||||
|
||||
fs.writeFileSync(path.join(m002Dir, 'M002-ROADMAP.md'), [
|
||||
'# M002: Beta',
|
||||
'',
|
||||
'## Slices',
|
||||
'',
|
||||
'- [ ] **S01: Dashboard** `risk:medium` `depends:[]`',
|
||||
].join('\n'));
|
||||
|
||||
return gsdDir;
|
||||
}
|
||||
|
||||
// ─── Unit Tests ────────────────────────────────────────────────────────────
|
||||
|
||||
describe('parseSlicesFromRoadmap', () => {
|
||||
test('parses done and pending slices', () => {
|
||||
const content = [
|
||||
'## Slices',
|
||||
'',
|
||||
'- [x] **S01: Setup** `risk:low` `depends:[]`',
|
||||
'- [ ] **S02: Auth System** `risk:medium` `depends:[S01]`',
|
||||
].join('\n');
|
||||
const slices = parseSlicesFromRoadmap(content);
|
||||
assert.strictEqual(slices.length, 2);
|
||||
assert.deepStrictEqual(slices[0], { done: true, id: 'S01', title: 'Setup' });
|
||||
assert.deepStrictEqual(slices[1], { done: false, id: 'S02', title: 'Auth System' });
|
||||
});
|
||||
|
||||
test('returns empty array when no Slices section', () => {
|
||||
const slices = parseSlicesFromRoadmap('# M001: Title\n\n## Success Criteria\n\n- Works.');
|
||||
assert.strictEqual(slices.length, 0);
|
||||
});
|
||||
|
||||
test('ignores non-slice lines in the section', () => {
|
||||
const content = [
|
||||
'## Slices',
|
||||
'',
|
||||
'Some intro text.',
|
||||
'- [x] **S01: Core** `risk:low` `depends:[]`',
|
||||
' > After this: done',
|
||||
].join('\n');
|
||||
const slices = parseSlicesFromRoadmap(content);
|
||||
assert.strictEqual(slices.length, 1);
|
||||
assert.strictEqual(slices[0].id, 'S01');
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseMilestoneTitle', () => {
|
||||
test('extracts title from first heading', () => {
|
||||
assert.strictEqual(parseMilestoneTitle('# M001: Foundation\n\nBody.'), 'Foundation');
|
||||
});
|
||||
|
||||
test('returns null when heading absent', () => {
|
||||
assert.strictEqual(parseMilestoneTitle('No heading here.'), null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseTaskTitle', () => {
|
||||
test('extracts title from task plan', () => {
|
||||
assert.strictEqual(parseTaskTitle('# T01: Init Project\n\nBody.', 'T01'), 'Init Project');
|
||||
});
|
||||
|
||||
test('falls back to provided default', () => {
|
||||
assert.strictEqual(parseTaskTitle('No heading.', 'T01'), 'T01');
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseTaskDescription', () => {
|
||||
test('extracts description body', () => {
|
||||
const content = [
|
||||
'# T01: Title',
|
||||
'',
|
||||
'## Description',
|
||||
'Do the thing.',
|
||||
'',
|
||||
'## Must-Haves',
|
||||
].join('\n');
|
||||
assert.strictEqual(parseTaskDescription(content), 'Do the thing.');
|
||||
});
|
||||
|
||||
test('returns empty string when section absent', () => {
|
||||
assert.strictEqual(parseTaskDescription('# T01: Title\n\nNo sections.'), '');
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseTaskMustHaves', () => {
|
||||
test('parses checked and unchecked items', () => {
|
||||
const content = [
|
||||
'## Must-Haves',
|
||||
'- [x] File exists',
|
||||
'- [ ] Tests pass',
|
||||
].join('\n');
|
||||
const mh = parseTaskMustHaves(content);
|
||||
assert.deepStrictEqual(mh, ['File exists', 'Tests pass']);
|
||||
});
|
||||
|
||||
test('returns empty array when section absent', () => {
|
||||
assert.deepStrictEqual(parseTaskMustHaves('# T01: Title\n\nNo sections.'), []);
|
||||
});
|
||||
});
|
||||
|
||||
describe('slugify', () => {
|
||||
test('lowercases and replaces non-alphanumeric with hyphens', () => {
|
||||
assert.strictEqual(slugify('Auth System'), 'auth-system');
|
||||
assert.strictEqual(slugify('My Feature (v2)'), 'my-feature-v2');
|
||||
});
|
||||
|
||||
test('strips leading/trailing hyphens', () => {
|
||||
assert.strictEqual(slugify(' spaces '), 'spaces');
|
||||
});
|
||||
});
|
||||
|
||||
describe('zeroPad', () => {
|
||||
test('pads to 2 digits by default', () => {
|
||||
assert.strictEqual(zeroPad(1), '01');
|
||||
assert.strictEqual(zeroPad(12), '12');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Integration Tests ─────────────────────────────────────────────────────
|
||||
|
||||
describe('parseGsd2', () => {
|
||||
let tmpDir;
|
||||
beforeEach(() => { tmpDir = createTempDir('gsd2-parse-'); });
|
||||
afterEach(() => { cleanup(tmpDir); });
|
||||
|
||||
test('reads project and requirements passthroughs', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
assert.ok(data.projectContent.includes('My Project'));
|
||||
assert.ok(data.requirements.includes('R001'));
|
||||
});
|
||||
|
||||
test('parses milestone with slices', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
assert.strictEqual(data.milestones.length, 1);
|
||||
assert.strictEqual(data.milestones[0].id, 'M001');
|
||||
assert.strictEqual(data.milestones[0].title, 'Foundation');
|
||||
assert.strictEqual(data.milestones[0].slices.length, 2);
|
||||
});
|
||||
|
||||
test('marks S01 as done, S02 as not done', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const [s01, s02] = data.milestones[0].slices;
|
||||
assert.strictEqual(s01.done, true);
|
||||
assert.strictEqual(s02.done, false);
|
||||
});
|
||||
|
||||
test('reads research for completed slice', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
assert.ok(data.milestones[0].slices[0].research.includes('Some research'));
|
||||
});
|
||||
|
||||
test('reads tasks from tasks/ directory', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const tasks = data.milestones[0].slices[0].tasks;
|
||||
assert.strictEqual(tasks.length, 1);
|
||||
assert.strictEqual(tasks[0].id, 'T01');
|
||||
assert.strictEqual(tasks[0].title, 'Init Project');
|
||||
assert.strictEqual(tasks[0].done, true);
|
||||
});
|
||||
|
||||
test('parses task must-haves', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const mh = data.milestones[0].slices[0].tasks[0].mustHaves;
|
||||
assert.deepStrictEqual(mh, ['package.json exists', 'tsconfig.json exists']);
|
||||
});
|
||||
|
||||
test('handles missing .gsd/milestones/ gracefully', () => {
|
||||
const gsdDir = path.join(tmpDir, '.gsd');
|
||||
fs.mkdirSync(gsdDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(gsdDir, 'PROJECT.md'), '# Empty\n');
|
||||
const data = parseGsd2(gsdDir);
|
||||
assert.strictEqual(data.milestones.length, 0);
|
||||
});
|
||||
|
||||
test('slice with no directory has empty tasks list', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
// S02 has no slice directory in the default fixture
|
||||
const s02 = data.milestones[0].slices[1];
|
||||
assert.strictEqual(s02.tasks.length, 0);
|
||||
assert.strictEqual(s02.research, null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildPlanningArtifacts', () => {
|
||||
let tmpDir;
|
||||
beforeEach(() => { tmpDir = createTempDir('gsd2-artifacts-'); });
|
||||
afterEach(() => { cleanup(tmpDir); });
|
||||
|
||||
test('produces PROJECT.md, REQUIREMENTS.md, ROADMAP.md, STATE.md, config.json', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
assert.ok(artifacts.has('PROJECT.md'));
|
||||
assert.ok(artifacts.has('REQUIREMENTS.md'));
|
||||
assert.ok(artifacts.has('ROADMAP.md'));
|
||||
assert.ok(artifacts.has('STATE.md'));
|
||||
assert.ok(artifacts.has('config.json'));
|
||||
});
|
||||
|
||||
test('S01 (done) maps to phase 01 with PLAN and SUMMARY', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
assert.ok(artifacts.has('phases/01-setup/01-CONTEXT.md'));
|
||||
assert.ok(artifacts.has('phases/01-setup/01-RESEARCH.md'));
|
||||
assert.ok(artifacts.has('phases/01-setup/01-01-PLAN.md'));
|
||||
assert.ok(artifacts.has('phases/01-setup/01-01-SUMMARY.md'));
|
||||
});
|
||||
|
||||
test('S02 (pending) maps to phase 02 with only CONTEXT and PLAN', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir, { withS02Dir: true });
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
assert.ok(artifacts.has('phases/02-auth-system/02-CONTEXT.md'));
|
||||
assert.ok(artifacts.has('phases/02-auth-system/02-01-PLAN.md'));
|
||||
assert.ok(!artifacts.has('phases/02-auth-system/02-01-SUMMARY.md'), 'no summary for pending task');
|
||||
});
|
||||
|
||||
test('ROADMAP.md marks S01 done, S02 pending', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
const roadmap = artifacts.get('ROADMAP.md');
|
||||
assert.ok(roadmap.includes('[x]'));
|
||||
assert.ok(roadmap.includes('[ ]'));
|
||||
});
|
||||
|
||||
test('PLAN.md includes frontmatter with phase and plan keys', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
const plan = artifacts.get('phases/01-setup/01-01-PLAN.md');
|
||||
assert.ok(plan.includes('phase: "01"'));
|
||||
assert.ok(plan.includes('plan: "01"'));
|
||||
assert.ok(plan.includes('type: "implementation"'));
|
||||
});
|
||||
|
||||
test('SUMMARY.md strips GSD-2 frontmatter and adds v1 frontmatter', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
const summary = artifacts.get('phases/01-setup/01-01-SUMMARY.md');
|
||||
assert.ok(summary.includes('phase: "01"'));
|
||||
assert.ok(summary.includes('plan: "01"'));
|
||||
// GSD-2 frontmatter field should not appear
|
||||
assert.ok(!summary.includes('completed_at:'));
|
||||
// Body content should be preserved
|
||||
assert.ok(summary.includes('Init Project'));
|
||||
});
|
||||
|
||||
test('config.json is valid JSON', () => {
|
||||
const gsdDir = makeGsd2Project(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
assert.doesNotThrow(() => JSON.parse(artifacts.get('config.json')));
|
||||
});
|
||||
|
||||
test('multi-milestone: slices numbered sequentially across milestones', () => {
|
||||
const gsdDir = makeTwoMilestoneProject(tmpDir);
|
||||
const data = parseGsd2(gsdDir);
|
||||
const artifacts = buildPlanningArtifacts(data);
|
||||
// M001/S01 → phase 01, M001/S02 → phase 02, M002/S01 → phase 03
|
||||
assert.ok(artifacts.has('phases/01-core/01-CONTEXT.md'));
|
||||
assert.ok(artifacts.has('phases/02-api/02-CONTEXT.md'));
|
||||
assert.ok(artifacts.has('phases/03-dashboard/03-CONTEXT.md'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildRoadmapMd', () => {
|
||||
test('produces milestone sections with checked/unchecked phases', () => {
|
||||
const milestones = [{ id: 'M001', title: 'Alpha', slices: [] }];
|
||||
const phaseMap = [
|
||||
{ milestoneId: 'M001', milestoneTitle: 'Alpha', slice: { done: true, title: 'Core' }, phaseNum: 1 },
|
||||
{ milestoneId: 'M001', milestoneTitle: 'Alpha', slice: { done: false, title: 'API' }, phaseNum: 2 },
|
||||
];
|
||||
const roadmap = buildRoadmapMd(milestones, phaseMap);
|
||||
assert.ok(roadmap.includes('## M001: Alpha'));
|
||||
assert.ok(roadmap.includes('[x]'));
|
||||
assert.ok(roadmap.includes('[ ]'));
|
||||
assert.ok(roadmap.includes('Phase 01: core'));
|
||||
assert.ok(roadmap.includes('Phase 02: api'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildStateMd', () => {
|
||||
test('sets current phase to first incomplete slice', () => {
|
||||
const phaseMap = [
|
||||
{ milestoneId: 'M001', milestoneTitle: 'Alpha', slice: { done: true, title: 'Core' }, phaseNum: 1 },
|
||||
{ milestoneId: 'M001', milestoneTitle: 'Alpha', slice: { done: false, title: 'API Layer' }, phaseNum: 2 },
|
||||
];
|
||||
const state = buildStateMd(phaseMap);
|
||||
assert.ok(state.includes('Phase: 02'));
|
||||
assert.ok(state.includes('api-layer'));
|
||||
assert.ok(state.includes('Ready to plan'));
|
||||
});
|
||||
|
||||
test('reports all complete when all slices done', () => {
|
||||
const phaseMap = [
|
||||
{ milestoneId: 'M001', milestoneTitle: 'Alpha', slice: { done: true, title: 'Core' }, phaseNum: 1 },
|
||||
];
|
||||
const state = buildStateMd(phaseMap);
|
||||
assert.ok(state.includes('All phases complete'));
|
||||
});
|
||||
});
|
||||
|
||||
// ─── CLI Integration Tests ──────────────────────────────────────────────────
|
||||
|
||||
describe('gsd-tools from-gsd2 CLI', () => {
|
||||
let tmpDir;
|
||||
beforeEach(() => { tmpDir = createTempDir('gsd2-cli-'); });
|
||||
afterEach(() => { cleanup(tmpDir); });
|
||||
|
||||
test('--dry-run returns preview without writing files', () => {
|
||||
makeGsd2Project(tmpDir);
|
||||
const result = runGsdTools(['from-gsd2', '--dry-run', '--raw'], tmpDir);
|
||||
assert.ok(result.success, result.error);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.dryRun, true);
|
||||
assert.ok(parsed.preview.includes('PROJECT.md'));
|
||||
assert.ok(!fs.existsSync(path.join(tmpDir, '.planning')), 'no files written in dry-run');
|
||||
});
|
||||
|
||||
test('writes .planning/ directory with correct structure', () => {
|
||||
makeGsd2Project(tmpDir);
|
||||
const result = runGsdTools(['from-gsd2', '--raw'], tmpDir);
|
||||
assert.ok(result.success, result.error);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.success, true);
|
||||
assert.ok(parsed.filesWritten > 0);
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'ROADMAP.md')));
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'STATE.md')));
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'PROJECT.md')));
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'phases', '01-setup', '01-01-PLAN.md')));
|
||||
});
|
||||
|
||||
test('errors when no .gsd/ directory present', () => {
|
||||
const result = runGsdTools(['from-gsd2', '--raw'], tmpDir);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.success, false);
|
||||
assert.ok(parsed.error.includes('No .gsd/'));
|
||||
});
|
||||
|
||||
test('errors when .planning/ already exists without --force', () => {
|
||||
makeGsd2Project(tmpDir);
|
||||
fs.mkdirSync(path.join(tmpDir, '.planning'), { recursive: true });
|
||||
const result = runGsdTools(['from-gsd2', '--raw'], tmpDir);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.success, false);
|
||||
assert.ok(parsed.error.includes('already exists'));
|
||||
});
|
||||
|
||||
test('--force overwrites existing .planning/', () => {
|
||||
makeGsd2Project(tmpDir);
|
||||
fs.mkdirSync(path.join(tmpDir, '.planning'), { recursive: true });
|
||||
fs.writeFileSync(path.join(tmpDir, '.planning', 'OLD.md'), 'old content');
|
||||
const result = runGsdTools(['from-gsd2', '--force', '--raw'], tmpDir);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.success, true);
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'ROADMAP.md')));
|
||||
});
|
||||
|
||||
test('--path resolves target directory', () => {
|
||||
const projectDir = path.join(tmpDir, 'myproject');
|
||||
fs.mkdirSync(projectDir, { recursive: true });
|
||||
makeGsd2Project(projectDir);
|
||||
// Run from tmpDir but point at projectDir
|
||||
const result = runGsdTools(['from-gsd2', '--path', projectDir, '--dry-run', '--raw'], tmpDir);
|
||||
assert.ok(result.success, result.error);
|
||||
const parsed = JSON.parse(result.output);
|
||||
assert.strictEqual(parsed.dryRun, true);
|
||||
assert.ok(parsed.preview.includes('PROJECT.md'));
|
||||
});
|
||||
|
||||
test('completion state: S01 done → [x] in ROADMAP.md', () => {
|
||||
makeGsd2Project(tmpDir);
|
||||
runGsdTools(['from-gsd2', '--raw'], tmpDir);
|
||||
const roadmap = fs.readFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), 'utf8');
|
||||
assert.ok(roadmap.includes('[x]'));
|
||||
// S02 is pending
|
||||
assert.ok(roadmap.includes('[ ]'));
|
||||
});
|
||||
|
||||
test('SUMMARY.md written for completed task, not for pending', () => {
|
||||
makeGsd2Project(tmpDir, { withS02Dir: true });
|
||||
runGsdTools(['from-gsd2', '--raw'], tmpDir);
|
||||
// S01/T01 is done → SUMMARY exists
|
||||
assert.ok(fs.existsSync(path.join(tmpDir, '.planning', 'phases', '01-setup', '01-01-SUMMARY.md')));
|
||||
// S02/T01 is pending → no SUMMARY
|
||||
assert.ok(!fs.existsSync(path.join(tmpDir, '.planning', 'phases', '02-auth-system', '02-01-SUMMARY.md')));
|
||||
});
|
||||
});
|
||||
@@ -531,4 +531,46 @@ describe('init manager', () => {
|
||||
|
||||
assert.strictEqual(output.response_language, undefined);
|
||||
});
|
||||
|
||||
test('all_complete is true when non-backlog phases are complete and 999.x exists (#2129)', () => {
|
||||
writeState(tmpDir);
|
||||
writeRoadmap(tmpDir, [
|
||||
{ number: '1', name: 'Setup', complete: true },
|
||||
{ number: '2', name: 'Core', complete: true },
|
||||
{ number: '3', name: 'Polish', complete: true },
|
||||
{ number: '999.1', name: 'Backlog idea' },
|
||||
]);
|
||||
|
||||
// Scaffold completed phases on disk
|
||||
scaffoldPhase(tmpDir, 1, { slug: 'setup', plans: 2, summaries: 2 });
|
||||
scaffoldPhase(tmpDir, 2, { slug: 'core', plans: 1, summaries: 1 });
|
||||
scaffoldPhase(tmpDir, 3, { slug: 'polish', plans: 1, summaries: 1 });
|
||||
|
||||
const result = runGsdTools('init manager', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const output = JSON.parse(result.output);
|
||||
assert.strictEqual(output.all_complete, true, 'all_complete should be true when only 999.x phases remain incomplete');
|
||||
});
|
||||
|
||||
test('all_complete false with incomplete non-backlog phase still produces recommended_actions (#2129)', () => {
|
||||
writeState(tmpDir);
|
||||
writeRoadmap(tmpDir, [
|
||||
{ number: '1', name: 'Setup', complete: true },
|
||||
{ number: '2', name: 'Core', complete: true },
|
||||
{ number: '3', name: 'Polish' },
|
||||
{ number: '999.1', name: 'Backlog idea' },
|
||||
]);
|
||||
|
||||
scaffoldPhase(tmpDir, 1, { slug: 'setup', plans: 1, summaries: 1 });
|
||||
scaffoldPhase(tmpDir, 2, { slug: 'core', plans: 1, summaries: 1 });
|
||||
// Phase 3 has no directory — should trigger discuss recommendation
|
||||
|
||||
const result = runGsdTools('init manager', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const output = JSON.parse(result.output);
|
||||
assert.strictEqual(output.all_complete, false, 'all_complete should be false with phase 3 incomplete');
|
||||
assert.ok(output.recommended_actions.length > 0, 'recommended_actions should not be empty when non-backlog phases remain');
|
||||
});
|
||||
});
|
||||
|
||||
131
tests/inline-plan-threshold.test.cjs
Normal file
131
tests/inline-plan-threshold.test.cjs
Normal file
@@ -0,0 +1,131 @@
|
||||
/**
|
||||
* Tests for workflow.inline_plan_threshold config key and routing logic (#1979).
|
||||
*
|
||||
* Verifies:
|
||||
* 1. The config key is accepted by config-set (VALID_CONFIG_KEYS contains it)
|
||||
* 2. The key is documented in planning-config.md
|
||||
* 3. The execute-plan.md routing instruction uses the correct grep pattern
|
||||
* (matches <task at any indentation, since PLAN.md templates differ)
|
||||
* 4. The workflow guards threshold=0 to disable inline routing
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { test, describe, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs');
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..');
|
||||
const executePlanPath = path.join(repoRoot, 'get-shit-done', 'workflows', 'execute-plan.md');
|
||||
const planningConfigPath = path.join(repoRoot, 'get-shit-done', 'references', 'planning-config.md');
|
||||
const configCjsPath = path.join(repoRoot, 'get-shit-done', 'bin', 'lib', 'config.cjs');
|
||||
|
||||
describe('inline_plan_threshold config key (#1979)', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('config-set accepts workflow.inline_plan_threshold', () => {
|
||||
const result = runGsdTools('config-set workflow.inline_plan_threshold 3', tmpDir);
|
||||
assert.ok(result.success, `config-set should accept workflow.inline_plan_threshold: ${result.error}`);
|
||||
});
|
||||
|
||||
test('config-set accepts threshold=0 to disable inline routing', () => {
|
||||
const result = runGsdTools('config-set workflow.inline_plan_threshold 0', tmpDir);
|
||||
assert.ok(result.success, `config-set should accept 0: ${result.error}`);
|
||||
});
|
||||
|
||||
test('VALID_CONFIG_KEYS in config.cjs contains workflow.inline_plan_threshold', () => {
|
||||
const content = fs.readFileSync(configCjsPath, 'utf-8');
|
||||
assert.match(
|
||||
content,
|
||||
/['"]workflow\.inline_plan_threshold['"]/,
|
||||
'workflow.inline_plan_threshold must be in VALID_CONFIG_KEYS'
|
||||
);
|
||||
});
|
||||
|
||||
test('planning-config.md documents workflow.inline_plan_threshold', () => {
|
||||
const content = fs.readFileSync(planningConfigPath, 'utf-8');
|
||||
assert.match(
|
||||
content,
|
||||
/workflow\.inline_plan_threshold/,
|
||||
'planning-config.md must document workflow.inline_plan_threshold'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('execute-plan.md routing instruction (#1979)', () => {
|
||||
test('grep pattern matches <task at any indentation level', () => {
|
||||
const content = fs.readFileSync(executePlanPath, 'utf-8');
|
||||
|
||||
// The new pattern should use \s* for leading whitespace, not ^ anchor alone
|
||||
// Must match both "<task type=" (unindented) and " <task type=" (indented)
|
||||
assert.match(
|
||||
content,
|
||||
/TASK_COUNT=\$\(grep -cE '\^\\s\*<task/,
|
||||
'grep pattern must allow any leading whitespace before <task'
|
||||
);
|
||||
});
|
||||
|
||||
test('inline routing is guarded by INLINE_THRESHOLD > 0', () => {
|
||||
const content = fs.readFileSync(executePlanPath, 'utf-8');
|
||||
assert.match(
|
||||
content,
|
||||
/INLINE_THRESHOLD\s*>\s*0.*TASK_COUNT\s*<=\s*INLINE_THRESHOLD/s,
|
||||
'inline routing must be guarded by INLINE_THRESHOLD > 0 so threshold=0 disables it'
|
||||
);
|
||||
});
|
||||
|
||||
test('grep pattern does NOT use ^<task alone (would miss indented tasks)', () => {
|
||||
const content = fs.readFileSync(executePlanPath, 'utf-8');
|
||||
// The old buggy pattern: grep -c "^<task" with no whitespace allowance
|
||||
const buggyPattern = /grep -c "\^<task"/;
|
||||
assert.doesNotMatch(
|
||||
content,
|
||||
buggyPattern,
|
||||
'must not use the buggy "^<task" pattern which misses indented tasks'
|
||||
);
|
||||
});
|
||||
|
||||
test('grep pattern matches real-world indented task formats', () => {
|
||||
// Simulate how the grep pattern would behave against sample PLAN.md content
|
||||
// Extract the pattern from execute-plan.md
|
||||
const content = fs.readFileSync(executePlanPath, 'utf-8');
|
||||
const patternMatch = content.match(/TASK_COUNT=\$\(grep -cE '([^']+)'/);
|
||||
assert.ok(patternMatch, 'must find TASK_COUNT grep pattern');
|
||||
|
||||
const regexSource = patternMatch[1].replace(/\\s/g, '\\s').replace(/\[\[:space:\]>\]/, '[\\s>]');
|
||||
const re = new RegExp(regexSource, 'gm');
|
||||
|
||||
// Test cases: should match all of these as single tasks
|
||||
const samples = [
|
||||
'<task type="auto">',
|
||||
' <task type="auto">',
|
||||
' <task type="checkpoint:decision">',
|
||||
'\t<task type="auto">',
|
||||
];
|
||||
for (const sample of samples) {
|
||||
const matches = sample.match(re);
|
||||
assert.ok(matches && matches.length > 0, `Pattern must match: ${JSON.stringify(sample)}`);
|
||||
}
|
||||
|
||||
// Non-task lines should not match
|
||||
const nonMatches = [
|
||||
'<tasks>',
|
||||
'</task>',
|
||||
'// <task comment',
|
||||
];
|
||||
for (const sample of nonMatches) {
|
||||
const matches = sample.match(re);
|
||||
assert.ok(!matches || matches.length === 0, `Pattern must NOT match: ${JSON.stringify(sample)}`);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -29,10 +29,11 @@ const runtimeMap = {
|
||||
'9': 'gemini',
|
||||
'10': 'kilo',
|
||||
'11': 'opencode',
|
||||
'12': 'trae',
|
||||
'13': 'windsurf'
|
||||
'12': 'qwen',
|
||||
'13': 'trae',
|
||||
'14': 'windsurf'
|
||||
};
|
||||
const allRuntimes = ['claude', 'antigravity', 'augment', 'cline', 'codebuddy', 'codex', 'copilot', 'cursor', 'gemini', 'kilo', 'opencode', 'trae', 'windsurf'];
|
||||
const allRuntimes = ['claude', 'antigravity', 'augment', 'cline', 'codebuddy', 'codex', 'copilot', 'cursor', 'gemini', 'kilo', 'opencode', 'qwen', 'trae', 'windsurf'];
|
||||
|
||||
/**
|
||||
* Simulate the parsing logic from promptRuntime without requiring readline.
|
||||
@@ -41,7 +42,7 @@ const allRuntimes = ['claude', 'antigravity', 'augment', 'cline', 'codebuddy', '
|
||||
function parseRuntimeInput(input) {
|
||||
input = input.trim() || '1';
|
||||
|
||||
if (input === '14') {
|
||||
if (input === '15') {
|
||||
return allRuntimes;
|
||||
}
|
||||
|
||||
@@ -89,16 +90,20 @@ describe('multi-runtime selection parsing', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('11'), ['opencode']);
|
||||
});
|
||||
|
||||
test('single choice for qwen', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('12'), ['qwen']);
|
||||
});
|
||||
|
||||
test('single choice for trae', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('12'), ['trae']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('13'), ['trae']);
|
||||
});
|
||||
|
||||
test('single choice for windsurf', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('13'), ['windsurf']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('14'), ['windsurf']);
|
||||
});
|
||||
|
||||
test('choice 14 returns all runtimes', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('14'), allRuntimes);
|
||||
test('choice 15 returns all runtimes', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('15'), allRuntimes);
|
||||
});
|
||||
|
||||
test('empty input defaults to claude', () => {
|
||||
@@ -107,13 +112,13 @@ describe('multi-runtime selection parsing', () => {
|
||||
});
|
||||
|
||||
test('invalid choices are ignored, falls back to claude if all invalid', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('15'), ['claude']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('16'), ['claude']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('0'), ['claude']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('abc'), ['claude']);
|
||||
});
|
||||
|
||||
test('invalid choices mixed with valid are filtered out', () => {
|
||||
assert.deepStrictEqual(parseRuntimeInput('1,15,7'), ['claude', 'copilot']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('1,16,7'), ['claude', 'copilot']);
|
||||
assert.deepStrictEqual(parseRuntimeInput('abc 3 xyz'), ['augment']);
|
||||
});
|
||||
|
||||
@@ -129,7 +134,7 @@ describe('multi-runtime selection parsing', () => {
|
||||
});
|
||||
|
||||
describe('install.js source contains multi-select support', () => {
|
||||
test('runtimeMap is defined with all 13 runtimes', () => {
|
||||
test('runtimeMap is defined with all 14 runtimes', () => {
|
||||
for (const [key, name] of Object.entries(runtimeMap)) {
|
||||
assert.ok(
|
||||
installSrc.includes(`'${key}': '${name}'`),
|
||||
@@ -146,21 +151,25 @@ describe('install.js source contains multi-select support', () => {
|
||||
}
|
||||
});
|
||||
|
||||
test('all shortcut uses option 14', () => {
|
||||
test('all shortcut uses option 15', () => {
|
||||
assert.ok(
|
||||
installSrc.includes("if (input === '14')"),
|
||||
'all shortcut uses option 14'
|
||||
installSrc.includes("if (input === '15')"),
|
||||
'all shortcut uses option 15'
|
||||
);
|
||||
});
|
||||
|
||||
test('prompt lists Trae as option 12 and All as option 14', () => {
|
||||
test('prompt lists Qwen Code as option 12, Trae as option 13 and All as option 15', () => {
|
||||
assert.ok(
|
||||
installSrc.includes('12${reset}) Trae'),
|
||||
'prompt lists Trae as option 12'
|
||||
installSrc.includes('12${reset}) Qwen Code'),
|
||||
'prompt lists Qwen Code as option 12'
|
||||
);
|
||||
assert.ok(
|
||||
installSrc.includes('14${reset}) All'),
|
||||
'prompt lists All as option 14'
|
||||
installSrc.includes('13${reset}) Trae'),
|
||||
'prompt lists Trae as option 13'
|
||||
);
|
||||
assert.ok(
|
||||
installSrc.includes('15${reset}) All'),
|
||||
'prompt lists All as option 15'
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/**
|
||||
* GSD Tools Tests - /gsd-next safety gates and consecutive-call guard
|
||||
* GSD Tools Tests - /gsd-next safety gates and prior-phase completeness scan
|
||||
*
|
||||
* Validates that the next workflow includes three hard-stop safety gates
|
||||
* (checkpoint, error state, verification), a consecutive-call budget guard,
|
||||
* and a --force bypass flag.
|
||||
* (checkpoint, error state, verification), a prior-phase completeness scan
|
||||
* replacing the old consecutive-call counter, and a --force bypass flag.
|
||||
*
|
||||
* Closes: #1732
|
||||
* Closes: #1732, #2089
|
||||
*/
|
||||
|
||||
const { test, describe } = require('node:test');
|
||||
@@ -13,7 +13,7 @@ const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
describe('/gsd-next safety gates (#1732)', () => {
|
||||
describe('/gsd-next safety gates (#1732, #2089)', () => {
|
||||
const workflowPath = path.join(__dirname, '..', 'get-shit-done', 'workflows', 'next.md');
|
||||
const commandPath = path.join(__dirname, '..', 'commands', 'gsd', 'next.md');
|
||||
|
||||
@@ -79,19 +79,72 @@ describe('/gsd-next safety gates (#1732)', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('consecutive-call budget guard', () => {
|
||||
test('prior-phase completeness scan replaces consecutive-call counter', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('.next-call-count'),
|
||||
'workflow should reference .next-call-count counter file'
|
||||
content.includes('Prior-phase completeness scan'),
|
||||
'workflow should have a prior-phase completeness scan section'
|
||||
);
|
||||
assert.ok(
|
||||
content.includes('6'),
|
||||
'consecutive guard should trigger at count >= 6'
|
||||
!content.includes('.next-call-count'),
|
||||
'workflow must not reference the old .next-call-count counter file'
|
||||
);
|
||||
assert.ok(
|
||||
content.includes('consecutively'),
|
||||
'guard should mention consecutive calls'
|
||||
!content.includes('consecutively'),
|
||||
'workflow must not reference consecutive call counting'
|
||||
);
|
||||
});
|
||||
|
||||
test('completeness scan checks plans without summaries', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('Plans without summaries') || content.includes('no SUMMARY.md'),
|
||||
'completeness scan should detect plans that ran without producing summaries'
|
||||
);
|
||||
});
|
||||
|
||||
test('completeness scan checks verification failures in prior phases', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('Verification failures not overridden') ||
|
||||
content.includes('VERIFICATION.md with `FAIL`'),
|
||||
'completeness scan should detect unoverridden FAIL items in prior phase VERIFICATION.md'
|
||||
);
|
||||
});
|
||||
|
||||
test('completeness scan checks CONTEXT.md without plans', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('CONTEXT.md without plans') ||
|
||||
content.includes('CONTEXT.md but no PLAN.md'),
|
||||
'completeness scan should detect phases with discussion but no planning'
|
||||
);
|
||||
});
|
||||
|
||||
test('completeness scan offers Continue, Stop, and Force options', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(content.includes('[C]'), 'completeness scan should offer [C] Continue option');
|
||||
assert.ok(content.includes('[S]'), 'completeness scan should offer [S] Stop option');
|
||||
assert.ok(content.includes('[F]'), 'completeness scan should offer [F] Force option');
|
||||
});
|
||||
|
||||
test('deferral path creates backlog entry using 999.x scheme', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('999.'),
|
||||
'deferral should use the 999.x backlog numbering scheme'
|
||||
);
|
||||
assert.ok(
|
||||
content.includes('Backlog') || content.includes('BACKLOG'),
|
||||
'deferral should write to the Backlog section of ROADMAP.md'
|
||||
);
|
||||
});
|
||||
|
||||
test('clean prior phases route silently with no interruption', () => {
|
||||
const content = fs.readFileSync(workflowPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('silently') || content.includes('no interruption'),
|
||||
'workflow should route without interruption when prior phases are clean'
|
||||
);
|
||||
});
|
||||
|
||||
@@ -107,7 +160,7 @@ describe('/gsd-next safety gates (#1732)', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('command definition documents --force flag', () => {
|
||||
test('command definition documents --force flag and completeness scan', () => {
|
||||
const content = fs.readFileSync(commandPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('--force'),
|
||||
@@ -117,6 +170,10 @@ describe('/gsd-next safety gates (#1732)', () => {
|
||||
content.includes('bypass safety gates'),
|
||||
'command definition should explain that --force bypasses safety gates'
|
||||
);
|
||||
assert.ok(
|
||||
content.includes('completeness'),
|
||||
'command definition should document the prior-phase completeness scan'
|
||||
);
|
||||
});
|
||||
|
||||
test('gates exit on first hit', () => {
|
||||
|
||||
111
tests/pattern-mapper.test.cjs
Normal file
111
tests/pattern-mapper.test.cjs
Normal file
@@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Tests for Pattern Mapper feature (#1861)
|
||||
*
|
||||
* Covers:
|
||||
* - Config key workflow.pattern_mapper in VALID_CONFIG_KEYS
|
||||
* - Default value is true
|
||||
* - Config round-trip (set/get)
|
||||
* - init plan-phase output includes patterns_path (null when missing, path when present)
|
||||
*/
|
||||
|
||||
const { describe, test, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs');
|
||||
|
||||
describe('pattern-mapper config key', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('workflow.pattern_mapper is a valid config key', () => {
|
||||
// Setting an invalid key produces an error; a valid key succeeds
|
||||
const result = runGsdTools('config-set workflow.pattern_mapper true', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `Expected success but got error: ${result.error}`);
|
||||
});
|
||||
|
||||
test('default value is true in CONFIG_DEFAULTS', () => {
|
||||
// Create a new project config and verify the default
|
||||
const result = runGsdTools('config-new-project', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `config-new-project failed: ${result.error}`);
|
||||
|
||||
const configPath = path.join(tmpDir, '.planning', 'config.json');
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||
assert.strictEqual(config.workflow.pattern_mapper, true);
|
||||
});
|
||||
|
||||
test('config round-trip set/get', () => {
|
||||
// Ensure config exists first
|
||||
runGsdTools('config-new-project', tmpDir, { HOME: tmpDir });
|
||||
|
||||
// Set to false
|
||||
const setResult = runGsdTools('config-set workflow.pattern_mapper false', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(setResult.success, `config-set failed: ${setResult.error}`);
|
||||
|
||||
// Get should return false
|
||||
const getResult = runGsdTools('config-get workflow.pattern_mapper', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(getResult.success, `config-get failed: ${getResult.error}`);
|
||||
assert.strictEqual(getResult.output, 'false');
|
||||
});
|
||||
});
|
||||
|
||||
describe('init plan-phase patterns_path', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
// Create minimal planning structure for init plan-phase
|
||||
const planningDir = path.join(tmpDir, '.planning');
|
||||
fs.writeFileSync(path.join(planningDir, 'STATE.md'), [
|
||||
'# State',
|
||||
'',
|
||||
'## Current Phase',
|
||||
'Phase 1 — Foundation',
|
||||
].join('\n'));
|
||||
fs.writeFileSync(path.join(planningDir, 'ROADMAP.md'), [
|
||||
'# Roadmap',
|
||||
'',
|
||||
'## Phase 1: Foundation',
|
||||
'Build the foundation.',
|
||||
'**Status:** Planning',
|
||||
'**Requirements:** [FOUND-01]',
|
||||
].join('\n'));
|
||||
|
||||
// Create phase directory
|
||||
const phaseDir = path.join(planningDir, 'phases', '01-foundation');
|
||||
fs.mkdirSync(phaseDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('patterns_path is null when no PATTERNS.md exists', () => {
|
||||
const result = runGsdTools('init plan-phase 1', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `init plan-phase failed: ${result.error}`);
|
||||
|
||||
const data = JSON.parse(result.output);
|
||||
assert.strictEqual(data.patterns_path, null);
|
||||
});
|
||||
|
||||
test('patterns_path contains correct path when PATTERNS.md exists', () => {
|
||||
// Create a PATTERNS.md in the phase directory
|
||||
const phaseDir = path.join(tmpDir, '.planning', 'phases', '01-foundation');
|
||||
fs.writeFileSync(path.join(phaseDir, '01-PATTERNS.md'), '# Patterns\n');
|
||||
|
||||
const result = runGsdTools('init plan-phase 1', tmpDir, { HOME: tmpDir });
|
||||
assert.ok(result.success, `init plan-phase failed: ${result.error}`);
|
||||
|
||||
const data = JSON.parse(result.output);
|
||||
assert.ok(data.patterns_path, 'patterns_path should not be null');
|
||||
assert.ok(data.patterns_path.includes('PATTERNS.md'), `Expected path to contain PATTERNS.md, got: ${data.patterns_path}`);
|
||||
assert.ok(data.patterns_path.includes('01-foundation'), `Expected path to include phase dir, got: ${data.patterns_path}`);
|
||||
});
|
||||
});
|
||||
173
tests/phase-complete-auto-prune.test.cjs
Normal file
173
tests/phase-complete-auto-prune.test.cjs
Normal file
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Integration tests for auto-prune on phase completion (#2087).
|
||||
*
|
||||
* When config `workflow.auto_prune_state` is true, `phase complete`
|
||||
* should automatically prune STATE.md as part of the phase transition.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { test, describe, beforeEach, afterEach } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('node:fs');
|
||||
const path = require('node:path');
|
||||
const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs');
|
||||
|
||||
function writeConfig(tmpDir, config) {
|
||||
fs.writeFileSync(path.join(tmpDir, '.planning', 'config.json'), JSON.stringify(config, null, 2));
|
||||
}
|
||||
|
||||
function writeStateMd(tmpDir, content) {
|
||||
fs.writeFileSync(path.join(tmpDir, '.planning', 'STATE.md'), content);
|
||||
}
|
||||
|
||||
function readStateMd(tmpDir) {
|
||||
return fs.readFileSync(path.join(tmpDir, '.planning', 'STATE.md'), 'utf-8');
|
||||
}
|
||||
|
||||
function writeRoadmap(tmpDir, content) {
|
||||
fs.writeFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), content);
|
||||
}
|
||||
|
||||
function setupPhase(tmpDir, phaseNum, planCount) {
|
||||
const phasesDir = path.join(tmpDir, '.planning', 'phases');
|
||||
const phaseDir = path.join(phasesDir, `${String(phaseNum).padStart(2, '0')}-test-phase`);
|
||||
fs.mkdirSync(phaseDir, { recursive: true });
|
||||
|
||||
for (let i = 1; i <= planCount; i++) {
|
||||
const planId = `${String(phaseNum).padStart(2, '0')}-${String(i).padStart(2, '0')}`;
|
||||
fs.writeFileSync(path.join(phaseDir, `${planId}-PLAN.md`), `# Plan ${planId}\n`);
|
||||
fs.writeFileSync(path.join(phaseDir, `${planId}-SUMMARY.md`), `# Summary ${planId}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
describe('phase complete auto-prune (#2087)', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('prunes STATE.md automatically when auto_prune_state is true', () => {
|
||||
writeConfig(tmpDir, {
|
||||
workflow: { auto_prune_state: true },
|
||||
});
|
||||
|
||||
writeStateMd(tmpDir, [
|
||||
'# Session State',
|
||||
'',
|
||||
'**Current Phase:** 6',
|
||||
'**Status:** Executing',
|
||||
'',
|
||||
'## Decisions',
|
||||
'',
|
||||
'- [Phase 1]: Old decision from phase 1',
|
||||
'- [Phase 2]: Old decision from phase 2',
|
||||
'- [Phase 5]: Recent decision',
|
||||
'- [Phase 6]: Current decision',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
writeRoadmap(tmpDir, [
|
||||
'# Roadmap',
|
||||
'',
|
||||
'## Phase 6: Test Phase',
|
||||
'',
|
||||
'**Plans:** 0/2',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
setupPhase(tmpDir, 6, 2);
|
||||
|
||||
const result = runGsdTools('phase complete 6', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const newState = readStateMd(tmpDir);
|
||||
// With keep-recent=3 (default), cutoff = 6-3 = 3
|
||||
// Phase 1 and 2 decisions should be pruned
|
||||
assert.doesNotMatch(newState, /\[Phase 1\]: Old decision/);
|
||||
assert.doesNotMatch(newState, /\[Phase 2\]: Old decision/);
|
||||
// Phase 5 and 6 should remain
|
||||
assert.match(newState, /\[Phase 5\]: Recent decision/);
|
||||
assert.match(newState, /\[Phase 6\]: Current decision/);
|
||||
});
|
||||
|
||||
test('does NOT prune when auto_prune_state is false (default)', () => {
|
||||
writeConfig(tmpDir, {
|
||||
workflow: { auto_prune_state: false },
|
||||
});
|
||||
|
||||
writeStateMd(tmpDir, [
|
||||
'# Session State',
|
||||
'',
|
||||
'**Current Phase:** 6',
|
||||
'**Status:** Executing',
|
||||
'',
|
||||
'## Decisions',
|
||||
'',
|
||||
'- [Phase 1]: Old decision from phase 1',
|
||||
'- [Phase 5]: Recent decision',
|
||||
'- [Phase 6]: Current decision',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
writeRoadmap(tmpDir, [
|
||||
'# Roadmap',
|
||||
'',
|
||||
'## Phase 6: Test Phase',
|
||||
'',
|
||||
'**Plans:** 0/2',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
setupPhase(tmpDir, 6, 2);
|
||||
|
||||
const result = runGsdTools('phase complete 6', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const newState = readStateMd(tmpDir);
|
||||
// Phase 1 decision should still be present (no pruning)
|
||||
assert.match(newState, /\[Phase 1\]: Old decision/);
|
||||
});
|
||||
|
||||
test('does NOT prune when auto_prune_state is absent from config', () => {
|
||||
writeConfig(tmpDir, {
|
||||
workflow: {},
|
||||
});
|
||||
|
||||
writeStateMd(tmpDir, [
|
||||
'# Session State',
|
||||
'',
|
||||
'**Current Phase:** 6',
|
||||
'**Status:** Executing',
|
||||
'',
|
||||
'## Decisions',
|
||||
'',
|
||||
'- [Phase 1]: Old decision from phase 1',
|
||||
'- [Phase 6]: Current decision',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
writeRoadmap(tmpDir, [
|
||||
'# Roadmap',
|
||||
'',
|
||||
'## Phase 6: Test Phase',
|
||||
'',
|
||||
'**Plans:** 0/2',
|
||||
'',
|
||||
].join('\n'));
|
||||
|
||||
setupPhase(tmpDir, 6, 2);
|
||||
|
||||
const result = runGsdTools('phase complete 6', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const newState = readStateMd(tmpDir);
|
||||
// Should not prune — absent means disabled (default: false)
|
||||
assert.match(newState, /\[Phase 1\]: Old decision/);
|
||||
});
|
||||
});
|
||||
177
tests/phase-researcher-app-aware.test.cjs
Normal file
177
tests/phase-researcher-app-aware.test.cjs
Normal file
@@ -0,0 +1,177 @@
|
||||
/**
|
||||
* Phase Researcher Application-Aware Tests (#1988)
|
||||
*
|
||||
* Validates that gsd-phase-researcher maps capabilities to architectural
|
||||
* tiers before diving into framework-specific research. Also validates
|
||||
* that gsd-planner and gsd-plan-checker consume the Architectural
|
||||
* Responsibility Map downstream.
|
||||
*/
|
||||
|
||||
const { test, describe } = require('node:test');
|
||||
const assert = require('node:assert/strict');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const AGENTS_DIR = path.join(__dirname, '..', 'agents');
|
||||
const TEMPLATES_DIR = path.join(__dirname, '..', 'get-shit-done', 'templates');
|
||||
|
||||
// ─── Phase Researcher: Architectural Responsibility Mapping ─────────────────
|
||||
|
||||
describe('phase-researcher: Architectural Responsibility Mapping', () => {
|
||||
const researcherPath = path.join(AGENTS_DIR, 'gsd-phase-researcher.md');
|
||||
const content = fs.readFileSync(researcherPath, 'utf-8');
|
||||
|
||||
test('contains Architectural Responsibility Mapping step', () => {
|
||||
assert.ok(
|
||||
content.includes('Architectural Responsibility Map'),
|
||||
'gsd-phase-researcher.md must contain "Architectural Responsibility Map"'
|
||||
);
|
||||
});
|
||||
|
||||
test('Architectural Responsibility Mapping step comes after Step 1 and before Step 2', () => {
|
||||
const step1Pos = content.indexOf('## Step 1:');
|
||||
// Look for the step heading specifically (not the output format section)
|
||||
const stepARMPos = content.indexOf('## Step 1.5:');
|
||||
const step2Pos = content.indexOf('## Step 2:');
|
||||
|
||||
assert.ok(step1Pos !== -1, 'Step 1 must exist');
|
||||
assert.ok(stepARMPos !== -1, 'Step 1.5 Architectural Responsibility Mapping step must exist');
|
||||
assert.ok(step2Pos !== -1, 'Step 2 must exist');
|
||||
|
||||
assert.ok(
|
||||
stepARMPos > step1Pos,
|
||||
'Step 1.5 (Architectural Responsibility Mapping) must come after Step 1'
|
||||
);
|
||||
assert.ok(
|
||||
stepARMPos < step2Pos,
|
||||
'Step 1.5 (Architectural Responsibility Mapping) must come before Step 2'
|
||||
);
|
||||
});
|
||||
|
||||
test('step is a pure reasoning step with no tool calls', () => {
|
||||
// Extract the ARM section content (between the ARM heading and the next ## Step heading)
|
||||
const armHeadingMatch = content.match(/## Step 1\.5[^\n]*Architectural Responsibility Map/);
|
||||
assert.ok(armHeadingMatch, 'Must have a Step 1.5 heading for Architectural Responsibility Mapping');
|
||||
|
||||
const armStart = content.indexOf(armHeadingMatch[0]);
|
||||
const nextStepMatch = content.indexOf('## Step 2:', armStart);
|
||||
const armSection = content.substring(armStart, nextStepMatch);
|
||||
|
||||
// Should not contain tool invocation patterns
|
||||
const toolPatterns = [
|
||||
/```bash/,
|
||||
/node "\$HOME/,
|
||||
/gsd-tools\.cjs/,
|
||||
/WebSearch/,
|
||||
/Context7/,
|
||||
/mcp__/,
|
||||
];
|
||||
|
||||
for (const pattern of toolPatterns) {
|
||||
assert.ok(
|
||||
!pattern.test(armSection),
|
||||
`Architectural Responsibility Mapping step must be pure reasoning (no tool calls), but found: ${pattern}`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test('mentions standard architectural tiers', () => {
|
||||
const armStart = content.indexOf('Architectural Responsibility Map');
|
||||
const nextStep = content.indexOf('## Step 2:', armStart);
|
||||
const armSection = content.substring(armStart, nextStep);
|
||||
|
||||
// Should reference standard tiers
|
||||
const tiers = ['browser', 'frontend', 'API', 'database'];
|
||||
const foundTiers = tiers.filter(tier =>
|
||||
armSection.toLowerCase().includes(tier.toLowerCase())
|
||||
);
|
||||
|
||||
assert.ok(
|
||||
foundTiers.length >= 3,
|
||||
`Must mention at least 3 standard architectural tiers, found: ${foundTiers.join(', ')}`
|
||||
);
|
||||
});
|
||||
|
||||
test('specifies output format as a table in RESEARCH.md', () => {
|
||||
const armStart = content.indexOf('Architectural Responsibility Map');
|
||||
const nextStep = content.indexOf('## Step 2:', armStart);
|
||||
const armSection = content.substring(armStart, nextStep);
|
||||
|
||||
assert.ok(
|
||||
armSection.includes('|') && armSection.includes('Capability'),
|
||||
'ARM step must specify a table output format with Capability column'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Planner: Architectural Responsibility Map Sanity Check ─────────────────
|
||||
|
||||
describe('planner: Architectural Responsibility Map sanity check', () => {
|
||||
const plannerPath = path.join(AGENTS_DIR, 'gsd-planner.md');
|
||||
const content = fs.readFileSync(plannerPath, 'utf-8');
|
||||
|
||||
test('references Architectural Responsibility Map', () => {
|
||||
assert.ok(
|
||||
content.includes('Architectural Responsibility Map'),
|
||||
'gsd-planner.md must reference the Architectural Responsibility Map'
|
||||
);
|
||||
});
|
||||
|
||||
test('includes sanity check against the map', () => {
|
||||
// Must mention checking/verifying plan tasks against the responsibility map
|
||||
assert.ok(
|
||||
content.includes('sanity check') || content.includes('sanity-check'),
|
||||
'gsd-planner.md must include a sanity check against the Architectural Responsibility Map'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Plan Checker: Architectural Tier Verification Dimension ────────────────
|
||||
|
||||
describe('plan-checker: Architectural Tier verification dimension', () => {
|
||||
const checkerPath = path.join(AGENTS_DIR, 'gsd-plan-checker.md');
|
||||
const content = fs.readFileSync(checkerPath, 'utf-8');
|
||||
|
||||
test('has verification dimension for architectural tier', () => {
|
||||
assert.ok(
|
||||
content.includes('Architectural Responsibility Map') ||
|
||||
content.includes('Architectural Tier'),
|
||||
'gsd-plan-checker.md must have a verification dimension for architectural tier mapping'
|
||||
);
|
||||
});
|
||||
|
||||
test('verification dimension checks plans against the map', () => {
|
||||
// Should have a dimension that references tier/responsibility checking
|
||||
assert.ok(
|
||||
content.includes('tier owner') || content.includes('tier mismatch') || content.includes('responsibility map'),
|
||||
'plan-checker verification dimension must check for tier mismatches against the responsibility map'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Research Template: Architectural Responsibility Map Section ─────────────
|
||||
|
||||
describe('research template: Architectural Responsibility Map section', () => {
|
||||
const templatePath = path.join(TEMPLATES_DIR, 'research.md');
|
||||
const content = fs.readFileSync(templatePath, 'utf-8');
|
||||
|
||||
test('mentions Architectural Responsibility Map section', () => {
|
||||
assert.ok(
|
||||
content.includes('Architectural Responsibility Map'),
|
||||
'Research template must include an Architectural Responsibility Map section'
|
||||
);
|
||||
});
|
||||
|
||||
test('template includes tier table format', () => {
|
||||
const armStart = content.indexOf('Architectural Responsibility Map');
|
||||
assert.ok(armStart !== -1, 'ARM section must exist');
|
||||
|
||||
const sectionEnd = content.indexOf('##', armStart + 10);
|
||||
const section = content.substring(armStart, sectionEnd !== -1 ? sectionEnd : armStart + 500);
|
||||
|
||||
assert.ok(
|
||||
section.includes('|') && (section.includes('Tier') || section.includes('tier')),
|
||||
'Research template ARM section must include a table format with Tier column'
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -2330,6 +2330,83 @@ describe('phase complete updates Performance Metrics', () => {
|
||||
});
|
||||
});
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// phase complete — backlog phase (999.x) exclusion (#2129)
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe('phase complete excludes 999.x backlog from next-phase (#2129)', () => {
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = createTempProject();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup(tmpDir);
|
||||
});
|
||||
|
||||
test('next phase skips 999.x backlog dirs and falls back to roadmap', () => {
|
||||
// ROADMAP defines phases 1, 2, 3 and a backlog 999.1
|
||||
fs.writeFileSync(
|
||||
path.join(tmpDir, '.planning', 'ROADMAP.md'),
|
||||
[
|
||||
'# Roadmap',
|
||||
'',
|
||||
'- [ ] Phase 1: Setup',
|
||||
'- [ ] Phase 2: Core',
|
||||
'- [ ] Phase 3: Polish',
|
||||
'- [ ] Phase 999.1: Backlog idea',
|
||||
'',
|
||||
'### Phase 1: Setup',
|
||||
'**Goal:** Initial setup',
|
||||
'',
|
||||
'### Phase 2: Core',
|
||||
'**Goal:** Build core',
|
||||
'',
|
||||
'### Phase 3: Polish',
|
||||
'**Goal:** Polish everything',
|
||||
'',
|
||||
'### Phase 999.1: Backlog idea',
|
||||
'**Goal:** Parked idea',
|
||||
].join('\n')
|
||||
);
|
||||
fs.writeFileSync(
|
||||
path.join(tmpDir, '.planning', 'STATE.md'),
|
||||
[
|
||||
'# State',
|
||||
'',
|
||||
'**Current Phase:** 02',
|
||||
'**Status:** In progress',
|
||||
'**Current Plan:** 02-01',
|
||||
'**Last Activity:** 2025-01-01',
|
||||
'**Last Activity Description:** Working',
|
||||
].join('\n')
|
||||
);
|
||||
|
||||
// Phase 1 and 2 exist on disk, phase 3 does NOT exist yet, 999.1 DOES exist
|
||||
const p1 = path.join(tmpDir, '.planning', 'phases', '01-setup');
|
||||
fs.mkdirSync(p1, { recursive: true });
|
||||
fs.writeFileSync(path.join(p1, '01-01-PLAN.md'), '# Plan');
|
||||
fs.writeFileSync(path.join(p1, '01-01-SUMMARY.md'), '# Summary');
|
||||
|
||||
const p2 = path.join(tmpDir, '.planning', 'phases', '02-core');
|
||||
fs.mkdirSync(p2, { recursive: true });
|
||||
fs.writeFileSync(path.join(p2, '02-01-PLAN.md'), '# Plan');
|
||||
fs.writeFileSync(path.join(p2, '02-01-SUMMARY.md'), '# Summary');
|
||||
|
||||
// Backlog stub on disk — this is what triggers the bug
|
||||
fs.mkdirSync(path.join(tmpDir, '.planning', 'phases', '999.1-backlog-idea'), { recursive: true });
|
||||
|
||||
const result = runGsdTools('phase complete 2', tmpDir);
|
||||
assert.ok(result.success, `Command failed: ${result.error}`);
|
||||
|
||||
const output = JSON.parse(result.output);
|
||||
// Should find phase 3 from roadmap, NOT 999.1 from filesystem
|
||||
assert.strictEqual(output.next_phase, '3', 'next_phase should be 3, not 999.1');
|
||||
assert.strictEqual(output.is_last_phase, false, 'should not be last phase');
|
||||
});
|
||||
});
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// milestone complete command
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user