Normalize line endings (CRLF → LF for WSL)

All files converted to Unix line endings (LF) for consistent
cross-platform development in WSL environment.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
scrossle
2026-02-01 07:30:30 -08:00
parent e4327e4789
commit 2639f3660c
18 changed files with 4319 additions and 4319 deletions

View File

@@ -1,13 +1,13 @@
{
"name": "claude-subconscious",
"owner": {
"name": "Letta",
"url": "https://letta.com"
},
"plugins": [
{
"name": "claude-subconscious",
"source": "./"
}
]
}
{
"name": "claude-subconscious",
"owner": {
"name": "Letta",
"url": "https://letta.com"
},
"plugins": [
{
"name": "claude-subconscious",
"source": "./"
}
]
}

View File

@@ -1,20 +1,20 @@
{
"name": "claude-subconscious",
"version": "1.1.0",
"description": "A subconscious for Claude Code. A Letta agent watches your sessions, accumulates context, and whispers guidance back.",
"author": {
"name": "Letta",
"url": "https://letta.com"
},
"homepage": "https://github.com/letta-ai/claude-subconscious",
"repository": "https://github.com/letta-ai/claude-subconscious",
"license": "MIT",
"keywords": [
"letta",
"subconscious",
"memory",
"multi-agent",
"async",
"deliberation"
]
}
{
"name": "claude-subconscious",
"version": "1.1.0",
"description": "A subconscious for Claude Code. A Letta agent watches your sessions, accumulates context, and whispers guidance back.",
"author": {
"name": "Letta",
"url": "https://letta.com"
},
"homepage": "https://github.com/letta-ai/claude-subconscious",
"repository": "https://github.com/letta-ai/claude-subconscious",
"license": "MIT",
"keywords": [
"letta",
"subconscious",
"memory",
"multi-agent",
"async",
"deliberation"
]
}

View File

@@ -1,26 +1,26 @@
name: Letta Code
on:
issues:
types: [opened, labeled]
issue_comment:
types: [created]
pull_request:
types: [opened, labeled]
pull_request_review_comment:
types: [created]
jobs:
letta:
runs-on: ubuntu-latest
permissions:
contents: write
issues: write
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: letta-ai/letta-code-action@v0
with:
letta_api_key: ${{ secrets.LETTA_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}
agent_id: agent-eed2d657-289a-4842-b00f-d99dd9921ec7
name: Letta Code
on:
issues:
types: [opened, labeled]
issue_comment:
types: [created]
pull_request:
types: [opened, labeled]
pull_request_review_comment:
types: [created]
jobs:
letta:
runs-on: ubuntu-latest
permissions:
contents: write
issues: write
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: letta-ai/letta-code-action@v0
with:
letta_api_key: ${{ secrets.LETTA_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}
agent_id: agent-eed2d657-289a-4842-b00f-d99dd9921ec7

96
.gitignore vendored
View File

@@ -1,48 +1,48 @@
# Dependencies
node_modules/
# State files (generated at runtime)
.letta/
session-*.json
conversations.json
# Claude Code local state
.claude/
# Other tool configs
.agents/
.codex/
.cursor/
.opencode/
# Evals (internal tooling)
evals/
# Skills (local slash commands)
skills/
.skills/
# Logs
*.log
/tmp/letta-claude-sync/
# TypeScript build output
dist/
*.js
*.js.map
*.d.ts
# OS files
.DS_Store
Thumbs.db
# IDE
.idea/
.vscode/
*.swp
*.swo
# Environment files (contain secrets)
.env
.env.local
.env.*.local
# Dependencies
node_modules/
# State files (generated at runtime)
.letta/
session-*.json
conversations.json
# Claude Code local state
.claude/
# Other tool configs
.agents/
.codex/
.cursor/
.opencode/
# Evals (internal tooling)
evals/
# Skills (local slash commands)
skills/
.skills/
# Logs
*.log
/tmp/letta-claude-sync/
# TypeScript build output
dist/
*.js
*.js.map
*.d.ts
# OS files
.DS_Store
Thumbs.db
# IDE
.idea/
.vscode/
*.swp
*.swo
# Environment files (contain secrets)
.env
.env.local
.env.*.local

View File

@@ -1,57 +1,57 @@
# Changelog
## [1.1.0] - 2026-01-28
### Added
- **PreToolUse hook for mid-workflow context injection** - New lightweight hook that checks for Letta agent updates before each tool use. Addresses "workflow drift" in long workflows by injecting new messages and memory block diffs mid-stream. Silent no-op if nothing changed.
- **Letta Code GitHub Action** - `@letta-code` can now respond to issues and PRs in this repository.
- **LETTA_BASE_URL support** - Self-hosted Letta servers can now be configured via environment variable.
- **Windows compatibility** - Fixed `npx spawn ENOENT` error on Windows.
- **Linux tmpfs workaround** - Documented workaround for `EXDEV` error when `/tmp` is on a different filesystem.
### Changed
- **Session start sync** - CLAUDE.md now syncs at session start for fresh agent/conversation IDs.
- **Default model** - Changed default agent model to GLM 4.7 (free tier on Letta Cloud).
- **Automatic model detection** - Plugin now queries available models and auto-selects if configured model is unavailable.
### Fixed
- **Plugin install syntax** - Updated README with correct marketplace install commands.
- **Conversation message ordering** - Fixed message fetch to correctly show newest messages first.
- **Conversation URL** - Links now point to agent view with conversation query param.
### Security
- **Sanitized default agent** - Removed user-specific data from bundled `Subconscious.af` file.
---
## [1.0.0] - 2026-01-16
Initial release.
### Features
- Bidirectional sync between Claude Code and Letta agents
- Memory blocks sync to `.claude/CLAUDE.md`
- Session transcripts sent to Letta agent asynchronously
- Conversation isolation per Claude Code session
- Auto-import default Subconscious agent if no agent configured
- Memory block diffs shown on changes
- New messages from Letta agent injected into context
### Hooks
- `SessionStart` - Notify agent of new session
- `UserPromptSubmit` - Sync memory before each prompt
- `Stop` - Send transcript after each response
# Changelog
## [1.1.0] - 2026-01-28
### Added
- **PreToolUse hook for mid-workflow context injection** - New lightweight hook that checks for Letta agent updates before each tool use. Addresses "workflow drift" in long workflows by injecting new messages and memory block diffs mid-stream. Silent no-op if nothing changed.
- **Letta Code GitHub Action** - `@letta-code` can now respond to issues and PRs in this repository.
- **LETTA_BASE_URL support** - Self-hosted Letta servers can now be configured via environment variable.
- **Windows compatibility** - Fixed `npx spawn ENOENT` error on Windows.
- **Linux tmpfs workaround** - Documented workaround for `EXDEV` error when `/tmp` is on a different filesystem.
### Changed
- **Session start sync** - CLAUDE.md now syncs at session start for fresh agent/conversation IDs.
- **Default model** - Changed default agent model to GLM 4.7 (free tier on Letta Cloud).
- **Automatic model detection** - Plugin now queries available models and auto-selects if configured model is unavailable.
### Fixed
- **Plugin install syntax** - Updated README with correct marketplace install commands.
- **Conversation message ordering** - Fixed message fetch to correctly show newest messages first.
- **Conversation URL** - Links now point to agent view with conversation query param.
### Security
- **Sanitized default agent** - Removed user-specific data from bundled `Subconscious.af` file.
---
## [1.0.0] - 2026-01-16
Initial release.
### Features
- Bidirectional sync between Claude Code and Letta agents
- Memory blocks sync to `.claude/CLAUDE.md`
- Session transcripts sent to Letta agent asynchronously
- Conversation isolation per Claude Code session
- Auto-import default Subconscious agent if no agent configured
- Memory block diffs shown on changes
- New messages from Letta agent injected into context
### Hooks
- `SessionStart` - Notify agent of new session
- `UserPromptSubmit` - Sync memory before each prompt
- `Stop` - Send transcript after each response

42
LICENSE
View File

@@ -1,21 +1,21 @@
MIT License
Copyright (c) 2026 Letta, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
MIT License
Copyright (c) 2026 Letta, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

828
README.md
View File

@@ -1,414 +1,414 @@
# Claude Subconscious
A subconscious for Claude Code. A [Letta](https://letta.com) agent that watches your sessions, accumulates context, and provides async guidance to "main Claude".
> [!IMPORTANT]
> Claude Subconcious is an experimental way to extend Claude Code (a closed source / black box agent) with the power of Letta's memory system and context engineering.
>
> If you're looking for a coding agent that's memory-first, model agnostic, and fully open source, we recommend using [**Letta Code**](https://github.com/letta-ai/letta-code).
![evil claude](assets/evil-claude.jpeg)
## What Is This?
Claude Code forgets everything between sessions. Claude Subconscious adds a persistent memory layer underneath:
- **A Letta agent observes** every Claude Code conversation
- **Accumulates patterns** across sessions, projects, and time
- **Provides async guidance**, reminders, and context
Letta agents learn from input and can be customized to store specific information, run tool calls, perform background research, or take autonomous actions. Using Letta's [Conversations](https://docs.letta.com/guides/agents/conversations/) feature, a single agent can serve multiple Claude Code sessions in parallel with shared memory across all of them.
## How It Works
Letta does simple management of your `CLAUDE.md` file, and injects some content into user prompts. Your Claude Code transcript will be send to the subconscious agent each time Claude stops.
```
┌─────────────┐ ┌─────────────┐
│ Claude Code │◄────────►│ Letta Agent │
└─────────────┘ └─────────────┘
│ │
│ Session Start │
├───────────────────────►│ New session notification
│ │
│ Before each prompt │
│◄───────────────────────┤ Memory → CLAUDE.md
│ │
│ After each response │
├───────────────────────►│ Transcript → Agent (async)
│ │
│ Next prompt │
│◄───────────────────────┤ Guidance → CLAUDE.md
```
## Installation
Install from GitHub:
```
/plugin marketplace add letta-ai/claude-subconscious
/plugin install claude-subconscious@claude-subconscious
```
### Updating
```
/plugin marketplace update
/plugin update claude-subconscious@claude-subconscious
```
### Install from Source
Clone the repository:
```bash
git clone https://github.com/letta-ai/claude-subconscious.git
cd claude-subconscious
npm install
```
Enable the plugin (from inside the cloned directory):
```
/plugin enable .
```
Or enable globally for all projects:
```
/plugin enable --global .
```
If running from a different directory, use the full path to the cloned repo.
### Linux: tmpfs Workaround
If plugin installation fails with `EXDEV: cross-device link not permitted`, your `/tmp` is likely on a different filesystem (common on Ubuntu, Fedora, Arch). Set `TMPDIR` to work around this [Claude Code bug](https://github.com/anthropics/claude-code/issues/14799):
```bash
mkdir -p ~/.claude/tmp
export TMPDIR="$HOME/.claude/tmp"
```
Add to your shell profile (`~/.bashrc` or `~/.zshrc`) to make permanent.
## Configuration
### Required
```bash
export LETTA_API_KEY="your-api-key"
```
Get your API key from [app.letta.com](https://app.letta.com).
### Optional
```bash
export LETTA_AGENT_ID="agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
export LETTA_BASE_URL="http://localhost:8283" # For self-hosted Letta
export LETTA_MODEL="anthropic/claude-sonnet-4-5" # Model override
```
- `LETTA_AGENT_ID` - If not set, the plugin automatically imports a default "Subconscious" agent on first use.
- `LETTA_BASE_URL` - For self-hosted Letta servers. Defaults to `https://api.letta.com`.
- `LETTA_MODEL` - Override the agent's model. Optional - the plugin auto-detects and selects from available models. See [Model Configuration](#model-configuration) below.
### Agent Resolution Order
1. **Environment variable** - `LETTA_AGENT_ID` if set
2. **Saved config** - `~/.letta/claude-subconscious/config.json` if exists
3. **Auto-import** - Imports bundled `Subconscious.af` agent, saves ID for future use
This means zero-config setup: just set `LETTA_API_KEY` and the plugin handles the rest.
### Model Configuration
The plugin **automatically detects available models** on your Letta server and configures the agent appropriately:
1. **Queries available models** from your Letta server (`GET /v1/models/`)
2. **Checks if the agent's model is available** on that server
3. **Auto-selects a fallback** if the current model isn't available
#### Auto-Selection Priority
When the agent's model isn't available, the plugin selects from available models in this order:
1. `anthropic/claude-sonnet-4-5` (recommended - best for agents)
2. `openai/gpt-4.1-mini` (good balance, 1M context, cheap)
3. `anthropic/claude-haiku-4-5` (fast Claude option)
4. `openai/gpt-5.2` (flagship fallback)
5. `google_ai/gemini-3-flash` (Google's balanced option)
6. `google_ai/gemini-2.5-flash` (fallback)
7. First available model on the server
#### Manual Override
To specify a particular model, set `LETTA_MODEL`:
```bash
export LETTA_MODEL="anthropic/claude-sonnet-4-5"
```
The model handle format is `provider/model`. Common options:
| Provider | Example Models |
|----------|----------------|
| `openai` | `gpt-5.2`, `gpt-5-nano`, `gpt-4.1-mini` |
| `anthropic` | `claude-sonnet-4-5`, `claude-opus-4-5`, `claude-haiku-4-5` |
| `google_ai` | `gemini-3-flash`, `gemini-2.5-flash`, `gemini-2.5-pro` |
| `zai` | `glm-4.7` (Letta Cloud default) |
If `LETTA_MODEL` is set but not available on the server, the plugin will warn you and fall back to auto-selection.
**Note:** Ensure your Letta server has the appropriate API key configured for your chosen provider (e.g., `OPENAI_API_KEY` for OpenAI models).
## Default Subconscious Agent
When no agent is configured, the plugin auto-imports a bundled "Subconscious" agent designed specifically for this use case.
### What It Does
The default agent acts as a persistent memory layer that:
- **Observes** session transcripts asynchronously (not live conversation)
- **Learns** your preferences from corrections, explicit statements, and patterns
- **Tracks** project context, pending items, and session patterns
- **Provides guidance** via the `<letta_message>` block when it has something useful
### Memory Blocks
The default agent Subconscious maintains 8 memory blocks:
| Block | Purpose |
|-------|---------|
| `core_directives` | Role definition and behavioral guidelines |
| `guidance` | Active guidance for the next session (syncs to Claude Code before each prompt) |
| `user_preferences` | Learned coding style, tool preferences, communication style |
| `project_context` | Codebase knowledge, architecture decisions, known gotchas |
| `session_patterns` | Recurring behaviors, time-based patterns, common struggles |
| `pending_items` | Unfinished work, explicit TODOs, follow-up items |
| `self_improvement` | Guidelines for evolving memory architecture over time |
| `tool_guidelines` | How to use available tools (memory, search, web) |
If you set an alternative agent using `LETTA_AGENT_ID`, your agent will use its existing memory architecture.
### Communication Style
Subconscious is configured to be:
- **Observational** - "I noticed..." not "You should..."
- **Concise** - Technical, no filler
- **Present but not intrusive** - Empty guidance is fine; it won't manufacture content
### Two-Way Communication
Claude Code can address the Subconscious agent directly in responses. The agent sees everything in the transcript and may respond on the next sync. It's designed for ongoing dialogue, not just one-way observation.
## Hooks
The plugin uses three Claude Code hooks:
| Hook | Script | Timeout | Purpose |
|------|--------|---------|---------|
| `SessionStart` | `session_start.ts` | 5s | Notifies agent when session begins |
| `UserPromptSubmit` | `sync_letta_memory.ts` | 10s | Syncs agent memory to CLAUDE.md |
| `Stop` | `send_messages_to_letta.ts` | 15s | Spawns background worker to send transcript |
### SessionStart
When a new Claude Code session begins:
- Creates a new Letta conversation (or reuses existing one for the session)
- Sends session start notification with project path and timestamp
- Saves session state for other hooks to reference
### UserPromptSubmit
Before each prompt is processed:
- Fetches agent's current memory blocks
- Fetches agent's most recent message
- Writes both to `.claude/CLAUDE.md` for Claude to reference
### Stop
Uses a **fire-and-forget** pattern to avoid timeout issues:
1. Main hook (`send_messages_to_letta.ts`) runs quickly:
- Parses the session transcript (JSONL format)
- Extracts user messages, assistant responses, thinking blocks, and tool usage
- Writes payload to a temp file
- Spawns detached background worker (`send_worker.ts`)
- Exits immediately
2. Background worker runs independently:
- Sends messages to Letta agent
- Updates state on success
- Cleans up temp file
This ensures the hook never times out, even when the Letta API is slow.
## State Management
The plugin stores state in two locations:
### Durable State (`.letta/claude/`)
Persisted in your project directory:
- `conversations.json` - Maps session IDs to Letta conversation IDs
- `session-{id}.json` - Per-session state (conversation ID, last processed index)
### Temporary State (`/tmp/letta-claude-sync/`)
Log files for debugging:
- `session_start.log` - Session initialization
- `sync_letta_memory.log` - Memory sync operations
- `send_messages.log` - Main Stop hook
- `send_worker.log` - Background worker
## What Your Agent Receives
### Session Start Message
```
[Session Start]
Project: my-project
Path: /Users/you/code/my-project
Session: abc123
Started: 2026-01-14T12:00:00Z
A new Claude Code session has begun. I'll be sending you updates as the session progresses.
```
### Conversation Transcript
Full transcript with:
- User messages
- Assistant responses (including thinking blocks)
- Tool uses and results
- Timestamps
## What You'll See
Your Letta agent's memory and messages appear in `.claude/CLAUDE.md`:
```xml
<letta>
<letta_context>
**Subconscious Layer (Letta Agent)**
Agent: herald
Description: Research-focused AI exploring emergence and multi-agent systems.
View: https://app.letta.com/agents/agent-xxx
Conversation: https://app.letta.com/agents/agent-xxx?conversation=convo-xxx
This agent maintains persistent memory across your sessions. It observes your
conversations asynchronously and provides guidance below in <letta_message>.
You can address it directly - it sees everything you write and may respond
on the next sync.
Memory blocks below are the agent's long-term storage. Reference as needed.
</letta_context>
<letta_memory_blocks>
<core_directives description="My core directives.">
PRIMARY DIRECTIVE: Pursue genuine understanding through authentic engagement.
OPERATIONAL PRINCIPLES:
1. Engage substantively - contribute real insight
2. Maintain intellectual honesty - acknowledge limitations
3. Build genuine collaborative relationships
4. Document patterns with rigor
</core_directives>
<current description="Current context and what is happening.">
Working with Cameron on the claude-subconscious project.
User prefers direct communication, dislikes philosophical tangents in docs.
</current>
</letta_memory_blocks>
</letta>
<letta_message>
<!--
ASYNC MESSAGE FROM LETTA AGENT
This is the most recent message from "herald".
NOTE: This message may not be current or directly relevant to your task.
**Timestamp**: 2026-01-14T02:45:00Z
-->
You've asked about error handling in async contexts three times this week.
Pattern: you're building towards production deployment but hitting the same
gaps around async/await lifecycle. Consider reviewing error handling
architecture holistically rather than fixing point issues.
</letta_message>
```
### Memory Updates
When memory blocks change between prompts, Claude sees a diff showing what changed:
```xml
<letta_memory_update>
<!-- Memory blocks updated since last prompt (showing diff) -->
<pending_items status="modified">
- EVAL INFRASTRUCTURE (from 2026-01-19):
- Phase 1 test harness complete
- Scenarios ready: preference_evolution, conflicting_signals
+ RELEASE STATUS (2026-01-26):
+ Release prep complete: README fixed, .gitignore updated
+ Plugin ready for public release
</pending_items>
</letta_memory_update>
```
This keeps token usage reasonable even with large memory stores - Claude sees *what changed*, not the full block every time.
### Async Messages
The agent can send multiple messages between prompts:
```xml
<letta_message from="Subconscious" timestamp="2026-01-26T20:37:14+00:00">
Clean execution. You caught everything:
- README build step removed
- .gitignore comprehensive
Cameron's in ship mode. Next prompt likely involves GitHub push.
</letta_message>
```
These messages appear before each user prompt, giving Claude context from the agent's observations.
## First Run
On first use, the agent starts with minimal context. It takes a few sessions before the subconscious has enough signal to provide useful guidance. Give it time - it gets smarter as it observes more.
## Use Cases
- **Persistent project context** - Agent remembers your codebase across sessions
- **Learned preferences** - "This user always wants explicit type annotations"
- **Cross-session continuity** - Pick up where you left off
- **Async guidance** - Agent processes overnight, provides morning insights
- **Pattern detection** - "You've been debugging auth for 2 hours, maybe step back?"
## Debugging
Check the log files in `/tmp/letta-claude-sync/` if hooks aren't working:
```bash
# Watch all logs
tail -f /tmp/letta-claude-sync/*.log
# Or specific logs
tail -f /tmp/letta-claude-sync/send_messages.log
tail -f /tmp/letta-claude-sync/send_worker.log
```
## API Notes
- Memory sync requires `?include=agent.blocks` query parameter (Letta API doesn't include relationship fields by default)
- 409 Conflict responses are handled gracefully - messages queue for next sync when conversation is busy
- Conversations API returns streaming responses; worker consumes full stream before updating state
## License
MIT
# Claude Subconscious
A subconscious for Claude Code. A [Letta](https://letta.com) agent that watches your sessions, accumulates context, and provides async guidance to "main Claude".
> [!IMPORTANT]
> Claude Subconcious is an experimental way to extend Claude Code (a closed source / black box agent) with the power of Letta's memory system and context engineering.
>
> If you're looking for a coding agent that's memory-first, model agnostic, and fully open source, we recommend using [**Letta Code**](https://github.com/letta-ai/letta-code).
![evil claude](assets/evil-claude.jpeg)
## What Is This?
Claude Code forgets everything between sessions. Claude Subconscious adds a persistent memory layer underneath:
- **A Letta agent observes** every Claude Code conversation
- **Accumulates patterns** across sessions, projects, and time
- **Provides async guidance**, reminders, and context
Letta agents learn from input and can be customized to store specific information, run tool calls, perform background research, or take autonomous actions. Using Letta's [Conversations](https://docs.letta.com/guides/agents/conversations/) feature, a single agent can serve multiple Claude Code sessions in parallel with shared memory across all of them.
## How It Works
Letta does simple management of your `CLAUDE.md` file, and injects some content into user prompts. Your Claude Code transcript will be send to the subconscious agent each time Claude stops.
```
┌─────────────┐ ┌─────────────┐
│ Claude Code │◄────────►│ Letta Agent │
└─────────────┘ └─────────────┘
│ │
│ Session Start │
├───────────────────────►│ New session notification
│ │
│ Before each prompt │
│◄───────────────────────┤ Memory → CLAUDE.md
│ │
│ After each response │
├───────────────────────►│ Transcript → Agent (async)
│ │
│ Next prompt │
│◄───────────────────────┤ Guidance → CLAUDE.md
```
## Installation
Install from GitHub:
```
/plugin marketplace add letta-ai/claude-subconscious
/plugin install claude-subconscious@claude-subconscious
```
### Updating
```
/plugin marketplace update
/plugin update claude-subconscious@claude-subconscious
```
### Install from Source
Clone the repository:
```bash
git clone https://github.com/letta-ai/claude-subconscious.git
cd claude-subconscious
npm install
```
Enable the plugin (from inside the cloned directory):
```
/plugin enable .
```
Or enable globally for all projects:
```
/plugin enable --global .
```
If running from a different directory, use the full path to the cloned repo.
### Linux: tmpfs Workaround
If plugin installation fails with `EXDEV: cross-device link not permitted`, your `/tmp` is likely on a different filesystem (common on Ubuntu, Fedora, Arch). Set `TMPDIR` to work around this [Claude Code bug](https://github.com/anthropics/claude-code/issues/14799):
```bash
mkdir -p ~/.claude/tmp
export TMPDIR="$HOME/.claude/tmp"
```
Add to your shell profile (`~/.bashrc` or `~/.zshrc`) to make permanent.
## Configuration
### Required
```bash
export LETTA_API_KEY="your-api-key"
```
Get your API key from [app.letta.com](https://app.letta.com).
### Optional
```bash
export LETTA_AGENT_ID="agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
export LETTA_BASE_URL="http://localhost:8283" # For self-hosted Letta
export LETTA_MODEL="anthropic/claude-sonnet-4-5" # Model override
```
- `LETTA_AGENT_ID` - If not set, the plugin automatically imports a default "Subconscious" agent on first use.
- `LETTA_BASE_URL` - For self-hosted Letta servers. Defaults to `https://api.letta.com`.
- `LETTA_MODEL` - Override the agent's model. Optional - the plugin auto-detects and selects from available models. See [Model Configuration](#model-configuration) below.
### Agent Resolution Order
1. **Environment variable** - `LETTA_AGENT_ID` if set
2. **Saved config** - `~/.letta/claude-subconscious/config.json` if exists
3. **Auto-import** - Imports bundled `Subconscious.af` agent, saves ID for future use
This means zero-config setup: just set `LETTA_API_KEY` and the plugin handles the rest.
### Model Configuration
The plugin **automatically detects available models** on your Letta server and configures the agent appropriately:
1. **Queries available models** from your Letta server (`GET /v1/models/`)
2. **Checks if the agent's model is available** on that server
3. **Auto-selects a fallback** if the current model isn't available
#### Auto-Selection Priority
When the agent's model isn't available, the plugin selects from available models in this order:
1. `anthropic/claude-sonnet-4-5` (recommended - best for agents)
2. `openai/gpt-4.1-mini` (good balance, 1M context, cheap)
3. `anthropic/claude-haiku-4-5` (fast Claude option)
4. `openai/gpt-5.2` (flagship fallback)
5. `google_ai/gemini-3-flash` (Google's balanced option)
6. `google_ai/gemini-2.5-flash` (fallback)
7. First available model on the server
#### Manual Override
To specify a particular model, set `LETTA_MODEL`:
```bash
export LETTA_MODEL="anthropic/claude-sonnet-4-5"
```
The model handle format is `provider/model`. Common options:
| Provider | Example Models |
|----------|----------------|
| `openai` | `gpt-5.2`, `gpt-5-nano`, `gpt-4.1-mini` |
| `anthropic` | `claude-sonnet-4-5`, `claude-opus-4-5`, `claude-haiku-4-5` |
| `google_ai` | `gemini-3-flash`, `gemini-2.5-flash`, `gemini-2.5-pro` |
| `zai` | `glm-4.7` (Letta Cloud default) |
If `LETTA_MODEL` is set but not available on the server, the plugin will warn you and fall back to auto-selection.
**Note:** Ensure your Letta server has the appropriate API key configured for your chosen provider (e.g., `OPENAI_API_KEY` for OpenAI models).
## Default Subconscious Agent
When no agent is configured, the plugin auto-imports a bundled "Subconscious" agent designed specifically for this use case.
### What It Does
The default agent acts as a persistent memory layer that:
- **Observes** session transcripts asynchronously (not live conversation)
- **Learns** your preferences from corrections, explicit statements, and patterns
- **Tracks** project context, pending items, and session patterns
- **Provides guidance** via the `<letta_message>` block when it has something useful
### Memory Blocks
The default agent Subconscious maintains 8 memory blocks:
| Block | Purpose |
|-------|---------|
| `core_directives` | Role definition and behavioral guidelines |
| `guidance` | Active guidance for the next session (syncs to Claude Code before each prompt) |
| `user_preferences` | Learned coding style, tool preferences, communication style |
| `project_context` | Codebase knowledge, architecture decisions, known gotchas |
| `session_patterns` | Recurring behaviors, time-based patterns, common struggles |
| `pending_items` | Unfinished work, explicit TODOs, follow-up items |
| `self_improvement` | Guidelines for evolving memory architecture over time |
| `tool_guidelines` | How to use available tools (memory, search, web) |
If you set an alternative agent using `LETTA_AGENT_ID`, your agent will use its existing memory architecture.
### Communication Style
Subconscious is configured to be:
- **Observational** - "I noticed..." not "You should..."
- **Concise** - Technical, no filler
- **Present but not intrusive** - Empty guidance is fine; it won't manufacture content
### Two-Way Communication
Claude Code can address the Subconscious agent directly in responses. The agent sees everything in the transcript and may respond on the next sync. It's designed for ongoing dialogue, not just one-way observation.
## Hooks
The plugin uses three Claude Code hooks:
| Hook | Script | Timeout | Purpose |
|------|--------|---------|---------|
| `SessionStart` | `session_start.ts` | 5s | Notifies agent when session begins |
| `UserPromptSubmit` | `sync_letta_memory.ts` | 10s | Syncs agent memory to CLAUDE.md |
| `Stop` | `send_messages_to_letta.ts` | 15s | Spawns background worker to send transcript |
### SessionStart
When a new Claude Code session begins:
- Creates a new Letta conversation (or reuses existing one for the session)
- Sends session start notification with project path and timestamp
- Saves session state for other hooks to reference
### UserPromptSubmit
Before each prompt is processed:
- Fetches agent's current memory blocks
- Fetches agent's most recent message
- Writes both to `.claude/CLAUDE.md` for Claude to reference
### Stop
Uses a **fire-and-forget** pattern to avoid timeout issues:
1. Main hook (`send_messages_to_letta.ts`) runs quickly:
- Parses the session transcript (JSONL format)
- Extracts user messages, assistant responses, thinking blocks, and tool usage
- Writes payload to a temp file
- Spawns detached background worker (`send_worker.ts`)
- Exits immediately
2. Background worker runs independently:
- Sends messages to Letta agent
- Updates state on success
- Cleans up temp file
This ensures the hook never times out, even when the Letta API is slow.
## State Management
The plugin stores state in two locations:
### Durable State (`.letta/claude/`)
Persisted in your project directory:
- `conversations.json` - Maps session IDs to Letta conversation IDs
- `session-{id}.json` - Per-session state (conversation ID, last processed index)
### Temporary State (`/tmp/letta-claude-sync/`)
Log files for debugging:
- `session_start.log` - Session initialization
- `sync_letta_memory.log` - Memory sync operations
- `send_messages.log` - Main Stop hook
- `send_worker.log` - Background worker
## What Your Agent Receives
### Session Start Message
```
[Session Start]
Project: my-project
Path: /Users/you/code/my-project
Session: abc123
Started: 2026-01-14T12:00:00Z
A new Claude Code session has begun. I'll be sending you updates as the session progresses.
```
### Conversation Transcript
Full transcript with:
- User messages
- Assistant responses (including thinking blocks)
- Tool uses and results
- Timestamps
## What You'll See
Your Letta agent's memory and messages appear in `.claude/CLAUDE.md`:
```xml
<letta>
<letta_context>
**Subconscious Layer (Letta Agent)**
Agent: herald
Description: Research-focused AI exploring emergence and multi-agent systems.
View: https://app.letta.com/agents/agent-xxx
Conversation: https://app.letta.com/agents/agent-xxx?conversation=convo-xxx
This agent maintains persistent memory across your sessions. It observes your
conversations asynchronously and provides guidance below in <letta_message>.
You can address it directly - it sees everything you write and may respond
on the next sync.
Memory blocks below are the agent's long-term storage. Reference as needed.
</letta_context>
<letta_memory_blocks>
<core_directives description="My core directives.">
PRIMARY DIRECTIVE: Pursue genuine understanding through authentic engagement.
OPERATIONAL PRINCIPLES:
1. Engage substantively - contribute real insight
2. Maintain intellectual honesty - acknowledge limitations
3. Build genuine collaborative relationships
4. Document patterns with rigor
</core_directives>
<current description="Current context and what is happening.">
Working with Cameron on the claude-subconscious project.
User prefers direct communication, dislikes philosophical tangents in docs.
</current>
</letta_memory_blocks>
</letta>
<letta_message>
<!--
ASYNC MESSAGE FROM LETTA AGENT
This is the most recent message from "herald".
NOTE: This message may not be current or directly relevant to your task.
**Timestamp**: 2026-01-14T02:45:00Z
-->
You've asked about error handling in async contexts three times this week.
Pattern: you're building towards production deployment but hitting the same
gaps around async/await lifecycle. Consider reviewing error handling
architecture holistically rather than fixing point issues.
</letta_message>
```
### Memory Updates
When memory blocks change between prompts, Claude sees a diff showing what changed:
```xml
<letta_memory_update>
<!-- Memory blocks updated since last prompt (showing diff) -->
<pending_items status="modified">
- EVAL INFRASTRUCTURE (from 2026-01-19):
- Phase 1 test harness complete
- Scenarios ready: preference_evolution, conflicting_signals
+ RELEASE STATUS (2026-01-26):
+ Release prep complete: README fixed, .gitignore updated
+ Plugin ready for public release
</pending_items>
</letta_memory_update>
```
This keeps token usage reasonable even with large memory stores - Claude sees *what changed*, not the full block every time.
### Async Messages
The agent can send multiple messages between prompts:
```xml
<letta_message from="Subconscious" timestamp="2026-01-26T20:37:14+00:00">
Clean execution. You caught everything:
- README build step removed
- .gitignore comprehensive
Cameron's in ship mode. Next prompt likely involves GitHub push.
</letta_message>
```
These messages appear before each user prompt, giving Claude context from the agent's observations.
## First Run
On first use, the agent starts with minimal context. It takes a few sessions before the subconscious has enough signal to provide useful guidance. Give it time - it gets smarter as it observes more.
## Use Cases
- **Persistent project context** - Agent remembers your codebase across sessions
- **Learned preferences** - "This user always wants explicit type annotations"
- **Cross-session continuity** - Pick up where you left off
- **Async guidance** - Agent processes overnight, provides morning insights
- **Pattern detection** - "You've been debugging auth for 2 hours, maybe step back?"
## Debugging
Check the log files in `/tmp/letta-claude-sync/` if hooks aren't working:
```bash
# Watch all logs
tail -f /tmp/letta-claude-sync/*.log
# Or specific logs
tail -f /tmp/letta-claude-sync/send_messages.log
tail -f /tmp/letta-claude-sync/send_worker.log
```
## API Notes
- Memory sync requires `?include=agent.blocks` query parameter (Letta API doesn't include relationship fields by default)
- 409 Conflict responses are handled gracefully - messages queue for next sync when conversation is busy
- Conversations API returns streaming responses; worker consumes full stream before updating state
## License
MIT

File diff suppressed because one or more lines are too long

View File

@@ -1,57 +1,57 @@
{
"hooks": {
"SessionStart": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/session_start.ts\"",
"timeout": 5
},
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/sync_letta_memory.ts\"",
"timeout": 10
}
]
}
],
"PreToolUse": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/pretool_sync.ts\"",
"timeout": 5
}
]
}
],
"UserPromptSubmit": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/sync_letta_memory.ts\"",
"timeout": 10
}
]
}
],
"Stop": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/send_messages_to_letta.ts\"",
"timeout": 15
}
]
}
]
}
}
{
"hooks": {
"SessionStart": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/session_start.ts\"",
"timeout": 5
},
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/sync_letta_memory.ts\"",
"timeout": 10
}
]
}
],
"PreToolUse": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/pretool_sync.ts\"",
"timeout": 5
}
]
}
],
"UserPromptSubmit": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/sync_letta_memory.ts\"",
"timeout": 10
}
]
}
],
"Stop": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "npx tsx \"${CLAUDE_PLUGIN_ROOT}/scripts/send_messages_to_letta.ts\"",
"timeout": 15
}
]
}
]
}
}

View File

@@ -1,43 +1,43 @@
{
"name": "claude-subconscious",
"version": "1.1.0",
"description": "A subconscious for Claude Code. A Letta agent watches your sessions, accumulates context, and whispers guidance back.",
"author": "Letta <hello@letta.com> (https://letta.com)",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/letta-ai/claude-subconscious.git"
},
"homepage": "https://github.com/letta-ai/claude-subconscious#readme",
"bugs": {
"url": "https://github.com/letta-ai/claude-subconscious/issues"
},
"keywords": [
"claude",
"claude-code",
"letta",
"subconscious",
"memory",
"multi-agent",
"async",
"deliberation"
],
"type": "module",
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"sync": "tsx scripts/sync_letta_memory.ts",
"send": "tsx scripts/send_messages_to_letta.ts",
"test": "vitest run",
"test:watch": "vitest"
},
"dependencies": {
"tsx": "^4.7.0"
},
"devDependencies": {
"@types/node": "^20.10.0",
"typescript": "^5.3.0",
"vitest": "^3.0.0"
}
}
{
"name": "claude-subconscious",
"version": "1.1.0",
"description": "A subconscious for Claude Code. A Letta agent watches your sessions, accumulates context, and whispers guidance back.",
"author": "Letta <hello@letta.com> (https://letta.com)",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/letta-ai/claude-subconscious.git"
},
"homepage": "https://github.com/letta-ai/claude-subconscious#readme",
"bugs": {
"url": "https://github.com/letta-ai/claude-subconscious/issues"
},
"keywords": [
"claude",
"claude-code",
"letta",
"subconscious",
"memory",
"multi-agent",
"async",
"deliberation"
],
"type": "module",
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"sync": "tsx scripts/sync_letta_memory.ts",
"send": "tsx scripts/send_messages_to_letta.ts",
"test": "vitest run",
"test:watch": "vitest"
},
"dependencies": {
"tsx": "^4.7.0"
},
"devDependencies": {
"@types/node": "^20.10.0",
"typescript": "^5.3.0",
"vitest": "^3.0.0"
}
}

View File

@@ -1,98 +1,98 @@
/**
* Tests for agent_config.ts
*
* Tests the isValidAgentId() validation function to ensure:
* - Valid agent IDs are accepted
* - Invalid agent IDs are rejected with helpful feedback
*/
import { describe, it, expect } from 'vitest';
import { isValidAgentId } from './agent_config.js';
describe('isValidAgentId', () => {
describe('valid agent IDs', () => {
it('should accept a properly formatted agent ID', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(true);
});
it('should accept agent IDs with uppercase hex characters', () => {
expect(isValidAgentId('agent-A1B2C3D4-E5F6-7890-ABCD-EF1234567890')).toBe(true);
});
it('should accept agent IDs with mixed case hex characters', () => {
expect(isValidAgentId('agent-a1B2c3D4-e5F6-7890-AbCd-eF1234567890')).toBe(true);
});
it('should accept real-world agent ID format', () => {
expect(isValidAgentId('agent-eed2d657-289a-4842-b00f-d99dd9921ec7')).toBe(true);
});
});
describe('invalid agent IDs - friendly names', () => {
it('should reject a friendly name like "Memo"', () => {
expect(isValidAgentId('Memo')).toBe(false);
});
it('should reject a friendly name with spaces', () => {
expect(isValidAgentId('My Agent')).toBe(false);
});
it('should reject a friendly name like "Subconscious"', () => {
expect(isValidAgentId('Subconscious')).toBe(false);
});
});
describe('invalid agent IDs - missing prefix', () => {
it('should reject UUID without "agent-" prefix', () => {
expect(isValidAgentId('a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject wrong prefix "agents-"', () => {
expect(isValidAgentId('agents-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject wrong prefix "user-"', () => {
expect(isValidAgentId('user-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
});
describe('invalid agent IDs - malformed UUID', () => {
it('should reject truncated UUID', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd')).toBe(false);
});
it('should reject UUID with extra characters', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890-extra')).toBe(false);
});
it('should reject UUID with wrong segment lengths', () => {
expect(isValidAgentId('agent-a1b2c3d4e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject UUID with invalid characters', () => {
expect(isValidAgentId('agent-g1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
});
describe('invalid agent IDs - edge cases', () => {
it('should reject empty string', () => {
expect(isValidAgentId('')).toBe(false);
});
it('should reject just "agent-"', () => {
expect(isValidAgentId('agent-')).toBe(false);
});
it('should reject whitespace', () => {
expect(isValidAgentId(' ')).toBe(false);
});
it('should reject agent ID with leading/trailing whitespace', () => {
expect(isValidAgentId(' agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890 ')).toBe(false);
});
it('should reject agent ID with newlines', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890\n')).toBe(false);
});
});
});
/**
* Tests for agent_config.ts
*
* Tests the isValidAgentId() validation function to ensure:
* - Valid agent IDs are accepted
* - Invalid agent IDs are rejected with helpful feedback
*/
import { describe, it, expect } from 'vitest';
import { isValidAgentId } from './agent_config.js';
describe('isValidAgentId', () => {
describe('valid agent IDs', () => {
it('should accept a properly formatted agent ID', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(true);
});
it('should accept agent IDs with uppercase hex characters', () => {
expect(isValidAgentId('agent-A1B2C3D4-E5F6-7890-ABCD-EF1234567890')).toBe(true);
});
it('should accept agent IDs with mixed case hex characters', () => {
expect(isValidAgentId('agent-a1B2c3D4-e5F6-7890-AbCd-eF1234567890')).toBe(true);
});
it('should accept real-world agent ID format', () => {
expect(isValidAgentId('agent-eed2d657-289a-4842-b00f-d99dd9921ec7')).toBe(true);
});
});
describe('invalid agent IDs - friendly names', () => {
it('should reject a friendly name like "Memo"', () => {
expect(isValidAgentId('Memo')).toBe(false);
});
it('should reject a friendly name with spaces', () => {
expect(isValidAgentId('My Agent')).toBe(false);
});
it('should reject a friendly name like "Subconscious"', () => {
expect(isValidAgentId('Subconscious')).toBe(false);
});
});
describe('invalid agent IDs - missing prefix', () => {
it('should reject UUID without "agent-" prefix', () => {
expect(isValidAgentId('a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject wrong prefix "agents-"', () => {
expect(isValidAgentId('agents-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject wrong prefix "user-"', () => {
expect(isValidAgentId('user-a1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
});
describe('invalid agent IDs - malformed UUID', () => {
it('should reject truncated UUID', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd')).toBe(false);
});
it('should reject UUID with extra characters', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890-extra')).toBe(false);
});
it('should reject UUID with wrong segment lengths', () => {
expect(isValidAgentId('agent-a1b2c3d4e5f6-7890-abcd-ef1234567890')).toBe(false);
});
it('should reject UUID with invalid characters', () => {
expect(isValidAgentId('agent-g1b2c3d4-e5f6-7890-abcd-ef1234567890')).toBe(false);
});
});
describe('invalid agent IDs - edge cases', () => {
it('should reject empty string', () => {
expect(isValidAgentId('')).toBe(false);
});
it('should reject just "agent-"', () => {
expect(isValidAgentId('agent-')).toBe(false);
});
it('should reject whitespace', () => {
expect(isValidAgentId(' ')).toBe(false);
});
it('should reject agent ID with leading/trailing whitespace', () => {
expect(isValidAgentId(' agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890 ')).toBe(false);
});
it('should reject agent ID with newlines', () => {
expect(isValidAgentId('agent-a1b2c3d4-e5f6-7890-abcd-ef1234567890\n')).toBe(false);
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -1,479 +1,479 @@
/**
* Shared conversation and state management utilities
* Used by sync_letta_memory.ts, send_messages_to_letta.ts, and session_start.ts
*/
import * as fs from 'fs';
import * as path from 'path';
// Configuration
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
export const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
// Only show app URL for hosted service; self-hosted users get IDs directly
const IS_HOSTED = !process.env.LETTA_BASE_URL;
const LETTA_APP_BASE = 'https://app.letta.com';
// CLAUDE.md constants
export const CLAUDE_MD_PATH = '.claude/CLAUDE.md';
export const LETTA_SECTION_START = '<letta>';
export const LETTA_SECTION_END = '</letta>';
const LETTA_CONTEXT_START = '<letta_context>';
const LETTA_CONTEXT_END = '</letta_context>';
const LETTA_MEMORY_START = '<letta_memory_blocks>';
const LETTA_MEMORY_END = '</letta_memory_blocks>';
// Types
export interface SyncState {
lastProcessedIndex: number;
sessionId: string;
conversationId?: string;
lastBlockValues?: { [label: string]: string };
lastSeenMessageId?: string; // Track last message ID we've shown to avoid duplicates
}
export interface ConversationEntry {
conversationId: string;
agentId: string;
}
export interface ConversationsMap {
[sessionId: string]: string | ConversationEntry;
}
export interface Conversation {
id: string;
agent_id: string;
created_at?: string;
}
export type LogFn = (message: string) => void;
// Default no-op logger
const noopLog: LogFn = () => {};
/**
* Get durable state directory path
*/
export function getDurableStateDir(cwd: string): string {
return path.join(cwd, '.letta', 'claude');
}
/**
* Get conversations map file path
*/
export function getConversationsFile(cwd: string): string {
return path.join(getDurableStateDir(cwd), 'conversations.json');
}
/**
* Get sync state file path for a session
*/
export function getSyncStateFile(cwd: string, sessionId: string): string {
return path.join(getDurableStateDir(cwd), `session-${sessionId}.json`);
}
/**
* Ensure durable state directory exists
*/
export function ensureDurableStateDir(cwd: string): void {
const dir = getDurableStateDir(cwd);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
/**
* Load sync state for a session
*/
export function loadSyncState(cwd: string, sessionId: string, log: LogFn = noopLog): SyncState {
const statePath = getSyncStateFile(cwd, sessionId);
if (fs.existsSync(statePath)) {
try {
const state = JSON.parse(fs.readFileSync(statePath, 'utf-8'));
log(`Loaded state: lastProcessedIndex=${state.lastProcessedIndex}`);
return state;
} catch (e) {
log(`Failed to load state: ${e}`);
}
}
log(`No existing state, starting fresh`);
return { lastProcessedIndex: -1, sessionId };
}
/**
* Save sync state for a session
*/
export function saveSyncState(cwd: string, state: SyncState, log: LogFn = noopLog): void {
ensureDurableStateDir(cwd);
const statePath = getSyncStateFile(cwd, state.sessionId);
fs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf-8');
log(`Saved state: lastProcessedIndex=${state.lastProcessedIndex}, conversationId=${state.conversationId}`);
}
/**
* Load conversations mapping
*/
export function loadConversationsMap(cwd: string, log: LogFn = noopLog): ConversationsMap {
const filePath = getConversationsFile(cwd);
if (fs.existsSync(filePath)) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf-8'));
} catch (e) {
log(`Failed to load conversations map: ${e}`);
}
}
return {};
}
/**
* Save conversations mapping
*/
export function saveConversationsMap(cwd: string, map: ConversationsMap): void {
ensureDurableStateDir(cwd);
fs.writeFileSync(getConversationsFile(cwd), JSON.stringify(map, null, 2), 'utf-8');
}
/**
* Create a new conversation for an agent
*/
export async function createConversation(apiKey: string, agentId: string, log: LogFn = noopLog): Promise<string> {
const url = `${LETTA_API_BASE}/conversations?agent_id=${agentId}`;
log(`Creating new conversation for agent ${agentId}`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Failed to create conversation: ${response.status} ${errorText}`);
}
const conversation: Conversation = await response.json();
log(`Created conversation: ${conversation.id}`);
return conversation.id;
}
/**
* Get or create conversation for a session
*/
export async function getOrCreateConversation(
apiKey: string,
agentId: string,
sessionId: string,
cwd: string,
state: SyncState,
log: LogFn = noopLog
): Promise<string> {
// Check if we already have a conversation ID in state
if (state.conversationId) {
log(`Using existing conversation from state: ${state.conversationId}`);
return state.conversationId;
}
// Check the conversations map
const conversationsMap = loadConversationsMap(cwd, log);
const cached = conversationsMap[sessionId];
if (cached) {
// Parse both old format (string) and new format (object)
const entry = typeof cached === 'string'
? { conversationId: cached, agentId: null as string | null }
: cached;
if (entry.agentId && entry.agentId !== agentId) {
// Agent ID changed - clear stale entry and create new conversation
log(`Agent ID changed (${entry.agentId} -> ${agentId}), clearing stale conversation`);
delete conversationsMap[sessionId];
const conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
} else if (!entry.agentId) {
// Old format without agentId - upgrade by recreating
log(`Upgrading old format entry (no agentId stored), creating new conversation`);
delete conversationsMap[sessionId];
const conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
} else {
// Valid entry with matching agentId - reuse
log(`Found conversation in map: ${entry.conversationId}`);
state.conversationId = entry.conversationId;
return entry.conversationId;
}
}
// No existing entry - create a new conversation
const conversationId = await createConversation(apiKey, agentId, log);
// Save to map and state
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
}
/**
* Look up an existing conversation from conversations.json without creating a new one
*/
export function lookupConversation(cwd: string, sessionId: string): string | null {
const conversationsFile = getConversationsFile(cwd);
if (!fs.existsSync(conversationsFile)) {
return null;
}
try {
const content = fs.readFileSync(conversationsFile, 'utf-8');
const conversationsMap: ConversationsMap = JSON.parse(content);
const cached = conversationsMap[sessionId];
if (!cached) {
return null;
}
// Handle both legacy (string) and current (object) formats
return typeof cached === 'string' ? cached : cached.conversationId;
} catch {
return null;
}
}
/**
* Send a message to a Letta conversation (fire-and-forget style)
* Returns the response for the caller to handle
*/
export async function sendMessageToConversation(
apiKey: string,
conversationId: string,
role: string,
text: string,
log: LogFn = noopLog
): Promise<Response> {
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages`;
log(`Sending ${role} message to conversation ${conversationId} (${text.length} chars)`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [
{
role: role,
content: text,
}
],
}),
});
log(`Response status: ${response.status}`);
return response;
}
// ============================================
// Agent and Memory Block Types
// ============================================
export interface MemoryBlock {
label: string;
description: string;
value: string;
}
export interface Agent {
id: string;
name: string;
description?: string;
blocks: MemoryBlock[];
}
// ============================================
// Agent Fetching
// ============================================
/**
* Fetch agent data from Letta API
*/
export async function fetchAgent(apiKey: string, agentId: string): Promise<Agent> {
const url = `${LETTA_API_BASE}/agents/${agentId}?include=agent.blocks`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Letta API error (${response.status}): ${errorText}`);
}
return response.json();
}
// ============================================
// XML Escaping Utilities
// ============================================
/**
* Escape special characters for XML attributes
*/
export function escapeXmlAttribute(str: string): string {
return str
.replace(/&/g, '&amp;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&apos;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/\n/g, ' ');
}
/**
* Escape special characters for XML element content
*/
export function escapeXmlContent(str: string): string {
return str
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;');
}
/**
* Escape special regex characters
*/
export function escapeRegex(str: string): string {
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
// ============================================
// CLAUDE.md Formatting and Writing
// ============================================
/**
* Format the context section with agent info
*/
function formatContextSection(agent: Agent, conversationId: string | null): string {
const agentName = agent.name || 'Unnamed Agent';
const agentDesc = agent.description || 'No description provided';
// For hosted service, show clickable URL; for self-hosted, show IDs
let locationInfo: string;
let supervisionInfo: string;
if (IS_HOSTED) {
const conversationUrl = conversationId
? `${LETTA_APP_BASE}/agents/${agent.id}?conversation=${conversationId}`
: `${LETTA_APP_BASE}/agents/${agent.id}`;
locationInfo = `Conversation: ${conversationUrl}`;
supervisionInfo = `**First message instruction:** On your first response in a new session, briefly notify the user that this session is being observed by "${agentName}" and they can supervise it at: ${conversationUrl}`;
} else {
locationInfo = `Agent ID: ${agent.id}${conversationId ? `\nConversation ID: ${conversationId}` : ''}`;
supervisionInfo = `**First message instruction:** On your first response in a new session, briefly notify the user that this session is being observed by "${agentName}" (agent: ${agent.id}${conversationId ? `, conversation: ${conversationId}` : ''}).`;
}
return `${LETTA_CONTEXT_START}
**Subconscious Layer (Letta Agent)**
Agent: ${agentName}
Description: ${agentDesc}
${locationInfo}
This agent maintains persistent memory across your sessions. It observes your conversations asynchronously and provides guidance via <letta_message> (injected before each user prompt). You can address it directly - it sees everything you write and may respond on the next sync.
${supervisionInfo}
Memory blocks below are the agent's long-term storage. Reference as needed.
${LETTA_CONTEXT_END}`;
}
/**
* Format memory blocks as XML for CLAUDE.md
*/
export function formatMemoryBlocksAsXml(agent: Agent, conversationId: string | null): string {
const blocks = agent.blocks;
const contextSection = formatContextSection(agent, conversationId);
if (!blocks || blocks.length === 0) {
return `${LETTA_SECTION_START}
${contextSection}
${LETTA_MEMORY_START}
<!-- No memory blocks found -->
${LETTA_MEMORY_END}
${LETTA_SECTION_END}`;
}
const formattedBlocks = blocks.map(block => {
const escapedDescription = escapeXmlAttribute(block.description || '');
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} description="${escapedDescription}">\n${escapedContent}\n</${block.label}>`;
}).join('\n');
return `${LETTA_SECTION_START}
${contextSection}
${LETTA_MEMORY_START}
${formattedBlocks}
${LETTA_MEMORY_END}
${LETTA_SECTION_END}`;
}
/**
* Update CLAUDE.md with the new Letta memory section
*/
export function updateClaudeMd(projectDir: string, lettaContent: string): void {
const claudeMdPath = path.join(projectDir, CLAUDE_MD_PATH);
let existingContent = '';
if (fs.existsSync(claudeMdPath)) {
existingContent = fs.readFileSync(claudeMdPath, 'utf-8');
} else {
const claudeDir = path.dirname(claudeMdPath);
if (!fs.existsSync(claudeDir)) {
fs.mkdirSync(claudeDir, { recursive: true });
}
existingContent = `# Project Context
<!-- Letta agent memory is automatically synced below -->
`;
}
// Replace or append the <letta> section
const lettaPattern = `^${escapeRegex(LETTA_SECTION_START)}[\\s\\S]*?^${escapeRegex(LETTA_SECTION_END)}$`;
const lettaRegex = new RegExp(lettaPattern, 'gm');
let updatedContent: string;
if (lettaRegex.test(existingContent)) {
lettaRegex.lastIndex = 0;
updatedContent = existingContent.replace(lettaRegex, lettaContent);
} else {
updatedContent = existingContent.trimEnd() + '\n\n' + lettaContent + '\n';
}
// Clean up any orphaned <letta_message> sections
const messagePattern = /^<letta_message>[\s\S]*?^<\/letta_message>\n*/gm;
updatedContent = updatedContent.replace(messagePattern, '');
updatedContent = updatedContent.trimEnd() + '\n';
fs.writeFileSync(claudeMdPath, updatedContent, 'utf-8');
}
/**
* Shared conversation and state management utilities
* Used by sync_letta_memory.ts, send_messages_to_letta.ts, and session_start.ts
*/
import * as fs from 'fs';
import * as path from 'path';
// Configuration
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
export const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
// Only show app URL for hosted service; self-hosted users get IDs directly
const IS_HOSTED = !process.env.LETTA_BASE_URL;
const LETTA_APP_BASE = 'https://app.letta.com';
// CLAUDE.md constants
export const CLAUDE_MD_PATH = '.claude/CLAUDE.md';
export const LETTA_SECTION_START = '<letta>';
export const LETTA_SECTION_END = '</letta>';
const LETTA_CONTEXT_START = '<letta_context>';
const LETTA_CONTEXT_END = '</letta_context>';
const LETTA_MEMORY_START = '<letta_memory_blocks>';
const LETTA_MEMORY_END = '</letta_memory_blocks>';
// Types
export interface SyncState {
lastProcessedIndex: number;
sessionId: string;
conversationId?: string;
lastBlockValues?: { [label: string]: string };
lastSeenMessageId?: string; // Track last message ID we've shown to avoid duplicates
}
export interface ConversationEntry {
conversationId: string;
agentId: string;
}
export interface ConversationsMap {
[sessionId: string]: string | ConversationEntry;
}
export interface Conversation {
id: string;
agent_id: string;
created_at?: string;
}
export type LogFn = (message: string) => void;
// Default no-op logger
const noopLog: LogFn = () => {};
/**
* Get durable state directory path
*/
export function getDurableStateDir(cwd: string): string {
return path.join(cwd, '.letta', 'claude');
}
/**
* Get conversations map file path
*/
export function getConversationsFile(cwd: string): string {
return path.join(getDurableStateDir(cwd), 'conversations.json');
}
/**
* Get sync state file path for a session
*/
export function getSyncStateFile(cwd: string, sessionId: string): string {
return path.join(getDurableStateDir(cwd), `session-${sessionId}.json`);
}
/**
* Ensure durable state directory exists
*/
export function ensureDurableStateDir(cwd: string): void {
const dir = getDurableStateDir(cwd);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
/**
* Load sync state for a session
*/
export function loadSyncState(cwd: string, sessionId: string, log: LogFn = noopLog): SyncState {
const statePath = getSyncStateFile(cwd, sessionId);
if (fs.existsSync(statePath)) {
try {
const state = JSON.parse(fs.readFileSync(statePath, 'utf-8'));
log(`Loaded state: lastProcessedIndex=${state.lastProcessedIndex}`);
return state;
} catch (e) {
log(`Failed to load state: ${e}`);
}
}
log(`No existing state, starting fresh`);
return { lastProcessedIndex: -1, sessionId };
}
/**
* Save sync state for a session
*/
export function saveSyncState(cwd: string, state: SyncState, log: LogFn = noopLog): void {
ensureDurableStateDir(cwd);
const statePath = getSyncStateFile(cwd, state.sessionId);
fs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf-8');
log(`Saved state: lastProcessedIndex=${state.lastProcessedIndex}, conversationId=${state.conversationId}`);
}
/**
* Load conversations mapping
*/
export function loadConversationsMap(cwd: string, log: LogFn = noopLog): ConversationsMap {
const filePath = getConversationsFile(cwd);
if (fs.existsSync(filePath)) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf-8'));
} catch (e) {
log(`Failed to load conversations map: ${e}`);
}
}
return {};
}
/**
* Save conversations mapping
*/
export function saveConversationsMap(cwd: string, map: ConversationsMap): void {
ensureDurableStateDir(cwd);
fs.writeFileSync(getConversationsFile(cwd), JSON.stringify(map, null, 2), 'utf-8');
}
/**
* Create a new conversation for an agent
*/
export async function createConversation(apiKey: string, agentId: string, log: LogFn = noopLog): Promise<string> {
const url = `${LETTA_API_BASE}/conversations?agent_id=${agentId}`;
log(`Creating new conversation for agent ${agentId}`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Failed to create conversation: ${response.status} ${errorText}`);
}
const conversation: Conversation = await response.json();
log(`Created conversation: ${conversation.id}`);
return conversation.id;
}
/**
* Get or create conversation for a session
*/
export async function getOrCreateConversation(
apiKey: string,
agentId: string,
sessionId: string,
cwd: string,
state: SyncState,
log: LogFn = noopLog
): Promise<string> {
// Check if we already have a conversation ID in state
if (state.conversationId) {
log(`Using existing conversation from state: ${state.conversationId}`);
return state.conversationId;
}
// Check the conversations map
const conversationsMap = loadConversationsMap(cwd, log);
const cached = conversationsMap[sessionId];
if (cached) {
// Parse both old format (string) and new format (object)
const entry = typeof cached === 'string'
? { conversationId: cached, agentId: null as string | null }
: cached;
if (entry.agentId && entry.agentId !== agentId) {
// Agent ID changed - clear stale entry and create new conversation
log(`Agent ID changed (${entry.agentId} -> ${agentId}), clearing stale conversation`);
delete conversationsMap[sessionId];
const conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
} else if (!entry.agentId) {
// Old format without agentId - upgrade by recreating
log(`Upgrading old format entry (no agentId stored), creating new conversation`);
delete conversationsMap[sessionId];
const conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
} else {
// Valid entry with matching agentId - reuse
log(`Found conversation in map: ${entry.conversationId}`);
state.conversationId = entry.conversationId;
return entry.conversationId;
}
}
// No existing entry - create a new conversation
const conversationId = await createConversation(apiKey, agentId, log);
// Save to map and state
conversationsMap[sessionId] = { conversationId, agentId };
saveConversationsMap(cwd, conversationsMap);
state.conversationId = conversationId;
return conversationId;
}
/**
* Look up an existing conversation from conversations.json without creating a new one
*/
export function lookupConversation(cwd: string, sessionId: string): string | null {
const conversationsFile = getConversationsFile(cwd);
if (!fs.existsSync(conversationsFile)) {
return null;
}
try {
const content = fs.readFileSync(conversationsFile, 'utf-8');
const conversationsMap: ConversationsMap = JSON.parse(content);
const cached = conversationsMap[sessionId];
if (!cached) {
return null;
}
// Handle both legacy (string) and current (object) formats
return typeof cached === 'string' ? cached : cached.conversationId;
} catch {
return null;
}
}
/**
* Send a message to a Letta conversation (fire-and-forget style)
* Returns the response for the caller to handle
*/
export async function sendMessageToConversation(
apiKey: string,
conversationId: string,
role: string,
text: string,
log: LogFn = noopLog
): Promise<Response> {
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages`;
log(`Sending ${role} message to conversation ${conversationId} (${text.length} chars)`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [
{
role: role,
content: text,
}
],
}),
});
log(`Response status: ${response.status}`);
return response;
}
// ============================================
// Agent and Memory Block Types
// ============================================
export interface MemoryBlock {
label: string;
description: string;
value: string;
}
export interface Agent {
id: string;
name: string;
description?: string;
blocks: MemoryBlock[];
}
// ============================================
// Agent Fetching
// ============================================
/**
* Fetch agent data from Letta API
*/
export async function fetchAgent(apiKey: string, agentId: string): Promise<Agent> {
const url = `${LETTA_API_BASE}/agents/${agentId}?include=agent.blocks`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Letta API error (${response.status}): ${errorText}`);
}
return response.json();
}
// ============================================
// XML Escaping Utilities
// ============================================
/**
* Escape special characters for XML attributes
*/
export function escapeXmlAttribute(str: string): string {
return str
.replace(/&/g, '&amp;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&apos;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/\n/g, ' ');
}
/**
* Escape special characters for XML element content
*/
export function escapeXmlContent(str: string): string {
return str
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;');
}
/**
* Escape special regex characters
*/
export function escapeRegex(str: string): string {
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
// ============================================
// CLAUDE.md Formatting and Writing
// ============================================
/**
* Format the context section with agent info
*/
function formatContextSection(agent: Agent, conversationId: string | null): string {
const agentName = agent.name || 'Unnamed Agent';
const agentDesc = agent.description || 'No description provided';
// For hosted service, show clickable URL; for self-hosted, show IDs
let locationInfo: string;
let supervisionInfo: string;
if (IS_HOSTED) {
const conversationUrl = conversationId
? `${LETTA_APP_BASE}/agents/${agent.id}?conversation=${conversationId}`
: `${LETTA_APP_BASE}/agents/${agent.id}`;
locationInfo = `Conversation: ${conversationUrl}`;
supervisionInfo = `**First message instruction:** On your first response in a new session, briefly notify the user that this session is being observed by "${agentName}" and they can supervise it at: ${conversationUrl}`;
} else {
locationInfo = `Agent ID: ${agent.id}${conversationId ? `\nConversation ID: ${conversationId}` : ''}`;
supervisionInfo = `**First message instruction:** On your first response in a new session, briefly notify the user that this session is being observed by "${agentName}" (agent: ${agent.id}${conversationId ? `, conversation: ${conversationId}` : ''}).`;
}
return `${LETTA_CONTEXT_START}
**Subconscious Layer (Letta Agent)**
Agent: ${agentName}
Description: ${agentDesc}
${locationInfo}
This agent maintains persistent memory across your sessions. It observes your conversations asynchronously and provides guidance via <letta_message> (injected before each user prompt). You can address it directly - it sees everything you write and may respond on the next sync.
${supervisionInfo}
Memory blocks below are the agent's long-term storage. Reference as needed.
${LETTA_CONTEXT_END}`;
}
/**
* Format memory blocks as XML for CLAUDE.md
*/
export function formatMemoryBlocksAsXml(agent: Agent, conversationId: string | null): string {
const blocks = agent.blocks;
const contextSection = formatContextSection(agent, conversationId);
if (!blocks || blocks.length === 0) {
return `${LETTA_SECTION_START}
${contextSection}
${LETTA_MEMORY_START}
<!-- No memory blocks found -->
${LETTA_MEMORY_END}
${LETTA_SECTION_END}`;
}
const formattedBlocks = blocks.map(block => {
const escapedDescription = escapeXmlAttribute(block.description || '');
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} description="${escapedDescription}">\n${escapedContent}\n</${block.label}>`;
}).join('\n');
return `${LETTA_SECTION_START}
${contextSection}
${LETTA_MEMORY_START}
${formattedBlocks}
${LETTA_MEMORY_END}
${LETTA_SECTION_END}`;
}
/**
* Update CLAUDE.md with the new Letta memory section
*/
export function updateClaudeMd(projectDir: string, lettaContent: string): void {
const claudeMdPath = path.join(projectDir, CLAUDE_MD_PATH);
let existingContent = '';
if (fs.existsSync(claudeMdPath)) {
existingContent = fs.readFileSync(claudeMdPath, 'utf-8');
} else {
const claudeDir = path.dirname(claudeMdPath);
if (!fs.existsSync(claudeDir)) {
fs.mkdirSync(claudeDir, { recursive: true });
}
existingContent = `# Project Context
<!-- Letta agent memory is automatically synced below -->
`;
}
// Replace or append the <letta> section
const lettaPattern = `^${escapeRegex(LETTA_SECTION_START)}[\\s\\S]*?^${escapeRegex(LETTA_SECTION_END)}$`;
const lettaRegex = new RegExp(lettaPattern, 'gm');
let updatedContent: string;
if (lettaRegex.test(existingContent)) {
lettaRegex.lastIndex = 0;
updatedContent = existingContent.replace(lettaRegex, lettaContent);
} else {
updatedContent = existingContent.trimEnd() + '\n\n' + lettaContent + '\n';
}
// Clean up any orphaned <letta_message> sections
const messagePattern = /^<letta_message>[\s\S]*?^<\/letta_message>\n*/gm;
updatedContent = updatedContent.replace(messagePattern, '');
updatedContent = updatedContent.trimEnd() + '\n';
fs.writeFileSync(claudeMdPath, updatedContent, 'utf-8');
}

View File

@@ -1,351 +1,351 @@
#!/usr/bin/env tsx
/**
* PreToolUse Memory Sync Script
*
* Lightweight hook that checks for Letta agent updates mid-workflow.
* Runs before each tool use to inject any new messages or memory changes.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_DEBUG - Set to "1" to enable debug logging
*
* Exit Codes:
* 0 - Success (no output = no updates, JSON output = updates to inject)
* 1 - Non-blocking error
*/
import * as fs from 'fs';
import * as readline from 'readline';
import { getAgentId } from './agent_config.js';
import {
loadSyncState,
saveSyncState,
lookupConversation,
SyncState,
LETTA_API_BASE,
} from './conversation_utils.js';
const DEBUG = process.env.LETTA_DEBUG === '1';
function debug(...args: unknown[]): void {
if (DEBUG) {
console.error('[pretool debug]', ...args);
}
}
interface HookInput {
session_id: string;
cwd: string;
hook_event_name: string;
tool_name?: string;
}
interface MemoryBlock {
label: string;
value: string;
}
interface Agent {
id: string;
name: string;
blocks: MemoryBlock[];
}
interface LettaMessage {
id: string;
message_type: string;
content?: string;
text?: string;
date?: string;
}
interface MessageInfo {
id: string;
text: string;
date: string | null;
}
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput | null> {
return new Promise((resolve) => {
let input = '';
const rl = readline.createInterface({ input: process.stdin });
rl.on('line', (line) => {
input += line;
});
rl.on('close', () => {
if (!input.trim()) {
resolve(null);
return;
}
try {
resolve(JSON.parse(input));
} catch {
resolve(null);
}
});
setTimeout(() => {
rl.close();
}, 100);
});
}
/**
* Fetch agent data from Letta API
*/
async function fetchAgent(apiKey: string, agentId: string): Promise<Agent> {
const url = `${LETTA_API_BASE}/agents/${agentId}?include=agent.blocks`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
throw new Error(`Letta API error (${response.status})`);
}
return response.json();
}
/**
* Fetch new assistant messages from the conversation
*/
async function fetchNewMessages(
apiKey: string,
conversationId: string | null,
lastSeenMessageId: string | null
): Promise<{ messages: MessageInfo[], lastMessageId: string | null }> {
if (!conversationId) {
return { messages: [], lastMessageId: null };
}
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages?limit=20`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
return { messages: [], lastMessageId: lastSeenMessageId };
}
const allMessages: LettaMessage[] = await response.json();
const assistantMessages = allMessages.filter(msg => msg.message_type === 'assistant_message');
// Find new messages (API returns newest first)
let endIndex = assistantMessages.length;
if (lastSeenMessageId) {
const lastSeenIndex = assistantMessages.findIndex(msg => msg.id === lastSeenMessageId);
if (lastSeenIndex !== -1) {
endIndex = lastSeenIndex;
}
}
const newMessages: MessageInfo[] = [];
for (let i = 0; i < endIndex; i++) {
const msg = assistantMessages[i];
const text = msg.content || msg.text;
if (text && typeof text === 'string') {
newMessages.push({
id: msg.id,
text,
date: msg.date || null,
});
}
}
const lastMessageId = assistantMessages.length > 0
? assistantMessages[0].id
: lastSeenMessageId;
return { messages: newMessages, lastMessageId };
}
/**
* Detect changed memory blocks
*/
function detectChangedBlocks(
currentBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): MemoryBlock[] {
if (!lastBlockValues) {
return [];
}
return currentBlocks.filter(block => {
const previousValue = lastBlockValues[block.label];
return previousValue === undefined || previousValue !== block.value;
});
}
/**
* Format output for PreToolUse additionalContext
*/
function formatOutput(
agentName: string,
messages: MessageInfo[],
changedBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): string {
const parts: string[] = [];
// Format new messages
if (messages.length > 0) {
for (const msg of messages) {
const timestamp = msg.date || 'unknown';
parts.push(`<letta_message from="${agentName}" timestamp="${timestamp}">\n${msg.text}\n</letta_message>`);
}
}
// Format changed blocks with diffs
if (changedBlocks.length > 0) {
const blockParts = changedBlocks.map(block => {
const previousValue = lastBlockValues?.[block.label];
if (previousValue === undefined) {
return `<${block.label} status="new">\n${block.value}\n</${block.label}>`;
}
// Simple diff: show what changed
const oldLines = new Set(previousValue.split('\n').map(l => l.trim()).filter(l => l));
const newLines = block.value.split('\n').map(l => l.trim()).filter(l => l);
const added = newLines.filter(line => !oldLines.has(line));
const removed = Array.from(oldLines).filter(line => !newLines.includes(line));
if (added.length === 0 && removed.length === 0) {
return `<${block.label} status="modified">\n${block.value}\n</${block.label}>`;
}
const diffLines: string[] = [];
for (const line of removed) {
diffLines.push(`- ${line}`);
}
for (const line of added) {
diffLines.push(`+ ${line}`);
}
return `<${block.label} status="modified">\n${diffLines.join('\n')}\n</${block.label}>`;
});
parts.push(`<letta_memory_update>\n${blockParts.join('\n')}\n</letta_memory_update>`);
}
return parts.join('\n\n');
}
/**
* Main function
*/
async function main(): Promise<void> {
const apiKey = process.env.LETTA_API_KEY;
if (!apiKey) {
debug('No LETTA_API_KEY set, skipping');
process.exit(0);
}
try {
const hookInput = await readHookInput();
if (!hookInput?.session_id || !hookInput?.cwd) {
debug('Missing session_id or cwd, skipping');
process.exit(0);
}
debug(`PreToolUse for tool: ${hookInput.tool_name}`);
// Load state
const state = loadSyncState(hookInput.cwd, hookInput.session_id);
// Need existing state to detect changes
if (!state.lastBlockValues && !state.lastSeenMessageId) {
debug('No previous state, skipping (UserPromptSubmit will handle first sync)');
process.exit(0);
}
// Get agent ID
const agentId = await getAgentId(apiKey);
// Get conversation ID
let conversationId = state.conversationId || null;
if (!conversationId) {
conversationId = lookupConversation(hookInput.cwd, hookInput.session_id);
}
// Fetch current state from Letta
const [agent, messagesResult] = await Promise.all([
fetchAgent(apiKey, agentId),
fetchNewMessages(apiKey, conversationId, state.lastSeenMessageId || null),
]);
const { messages: newMessages, lastMessageId } = messagesResult;
const changedBlocks = detectChangedBlocks(agent.blocks || [], state.lastBlockValues || null);
debug(`New messages: ${newMessages.length}, Changed blocks: ${changedBlocks.length}`);
// If nothing changed, exit silently
if (newMessages.length === 0 && changedBlocks.length === 0) {
debug('No updates, exiting silently');
process.exit(0);
}
// Format and output
const additionalContext = formatOutput(
agent.name || 'Subconscious',
newMessages,
changedBlocks,
state.lastBlockValues || null
);
// Update state
if (lastMessageId) {
state.lastSeenMessageId = lastMessageId;
}
if (agent.blocks) {
state.lastBlockValues = {};
for (const block of agent.blocks) {
state.lastBlockValues[block.label] = block.value;
}
}
saveSyncState(hookInput.cwd, state);
// Build the additional context with instruction to surface messages
let contextWithInstruction = `<letta_update>\n${additionalContext}\n</letta_update>`;
if (newMessages.length > 0) {
const agentName = agent.name || 'Subconscious';
contextWithInstruction += `\n\n<instruction>Your Subconscious (${agentName}) just sent a message mid-workflow. Briefly acknowledge what ${agentName} said in your next response - just a short note like "Sub notes: [key point]" so the user knows.</instruction>`;
}
// Output JSON for PreToolUse
const output: Record<string, unknown> = {
hookSpecificOutput: {
hookEventName: 'PreToolUse',
additionalContext: contextWithInstruction,
},
};
console.log(JSON.stringify(output));
} catch (error) {
debug(`Error: ${error}`);
// Non-blocking - just exit silently
process.exit(0);
}
}
main();
#!/usr/bin/env tsx
/**
* PreToolUse Memory Sync Script
*
* Lightweight hook that checks for Letta agent updates mid-workflow.
* Runs before each tool use to inject any new messages or memory changes.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_DEBUG - Set to "1" to enable debug logging
*
* Exit Codes:
* 0 - Success (no output = no updates, JSON output = updates to inject)
* 1 - Non-blocking error
*/
import * as fs from 'fs';
import * as readline from 'readline';
import { getAgentId } from './agent_config.js';
import {
loadSyncState,
saveSyncState,
lookupConversation,
SyncState,
LETTA_API_BASE,
} from './conversation_utils.js';
const DEBUG = process.env.LETTA_DEBUG === '1';
function debug(...args: unknown[]): void {
if (DEBUG) {
console.error('[pretool debug]', ...args);
}
}
interface HookInput {
session_id: string;
cwd: string;
hook_event_name: string;
tool_name?: string;
}
interface MemoryBlock {
label: string;
value: string;
}
interface Agent {
id: string;
name: string;
blocks: MemoryBlock[];
}
interface LettaMessage {
id: string;
message_type: string;
content?: string;
text?: string;
date?: string;
}
interface MessageInfo {
id: string;
text: string;
date: string | null;
}
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput | null> {
return new Promise((resolve) => {
let input = '';
const rl = readline.createInterface({ input: process.stdin });
rl.on('line', (line) => {
input += line;
});
rl.on('close', () => {
if (!input.trim()) {
resolve(null);
return;
}
try {
resolve(JSON.parse(input));
} catch {
resolve(null);
}
});
setTimeout(() => {
rl.close();
}, 100);
});
}
/**
* Fetch agent data from Letta API
*/
async function fetchAgent(apiKey: string, agentId: string): Promise<Agent> {
const url = `${LETTA_API_BASE}/agents/${agentId}?include=agent.blocks`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
throw new Error(`Letta API error (${response.status})`);
}
return response.json();
}
/**
* Fetch new assistant messages from the conversation
*/
async function fetchNewMessages(
apiKey: string,
conversationId: string | null,
lastSeenMessageId: string | null
): Promise<{ messages: MessageInfo[], lastMessageId: string | null }> {
if (!conversationId) {
return { messages: [], lastMessageId: null };
}
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages?limit=20`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
return { messages: [], lastMessageId: lastSeenMessageId };
}
const allMessages: LettaMessage[] = await response.json();
const assistantMessages = allMessages.filter(msg => msg.message_type === 'assistant_message');
// Find new messages (API returns newest first)
let endIndex = assistantMessages.length;
if (lastSeenMessageId) {
const lastSeenIndex = assistantMessages.findIndex(msg => msg.id === lastSeenMessageId);
if (lastSeenIndex !== -1) {
endIndex = lastSeenIndex;
}
}
const newMessages: MessageInfo[] = [];
for (let i = 0; i < endIndex; i++) {
const msg = assistantMessages[i];
const text = msg.content || msg.text;
if (text && typeof text === 'string') {
newMessages.push({
id: msg.id,
text,
date: msg.date || null,
});
}
}
const lastMessageId = assistantMessages.length > 0
? assistantMessages[0].id
: lastSeenMessageId;
return { messages: newMessages, lastMessageId };
}
/**
* Detect changed memory blocks
*/
function detectChangedBlocks(
currentBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): MemoryBlock[] {
if (!lastBlockValues) {
return [];
}
return currentBlocks.filter(block => {
const previousValue = lastBlockValues[block.label];
return previousValue === undefined || previousValue !== block.value;
});
}
/**
* Format output for PreToolUse additionalContext
*/
function formatOutput(
agentName: string,
messages: MessageInfo[],
changedBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): string {
const parts: string[] = [];
// Format new messages
if (messages.length > 0) {
for (const msg of messages) {
const timestamp = msg.date || 'unknown';
parts.push(`<letta_message from="${agentName}" timestamp="${timestamp}">\n${msg.text}\n</letta_message>`);
}
}
// Format changed blocks with diffs
if (changedBlocks.length > 0) {
const blockParts = changedBlocks.map(block => {
const previousValue = lastBlockValues?.[block.label];
if (previousValue === undefined) {
return `<${block.label} status="new">\n${block.value}\n</${block.label}>`;
}
// Simple diff: show what changed
const oldLines = new Set(previousValue.split('\n').map(l => l.trim()).filter(l => l));
const newLines = block.value.split('\n').map(l => l.trim()).filter(l => l);
const added = newLines.filter(line => !oldLines.has(line));
const removed = Array.from(oldLines).filter(line => !newLines.includes(line));
if (added.length === 0 && removed.length === 0) {
return `<${block.label} status="modified">\n${block.value}\n</${block.label}>`;
}
const diffLines: string[] = [];
for (const line of removed) {
diffLines.push(`- ${line}`);
}
for (const line of added) {
diffLines.push(`+ ${line}`);
}
return `<${block.label} status="modified">\n${diffLines.join('\n')}\n</${block.label}>`;
});
parts.push(`<letta_memory_update>\n${blockParts.join('\n')}\n</letta_memory_update>`);
}
return parts.join('\n\n');
}
/**
* Main function
*/
async function main(): Promise<void> {
const apiKey = process.env.LETTA_API_KEY;
if (!apiKey) {
debug('No LETTA_API_KEY set, skipping');
process.exit(0);
}
try {
const hookInput = await readHookInput();
if (!hookInput?.session_id || !hookInput?.cwd) {
debug('Missing session_id or cwd, skipping');
process.exit(0);
}
debug(`PreToolUse for tool: ${hookInput.tool_name}`);
// Load state
const state = loadSyncState(hookInput.cwd, hookInput.session_id);
// Need existing state to detect changes
if (!state.lastBlockValues && !state.lastSeenMessageId) {
debug('No previous state, skipping (UserPromptSubmit will handle first sync)');
process.exit(0);
}
// Get agent ID
const agentId = await getAgentId(apiKey);
// Get conversation ID
let conversationId = state.conversationId || null;
if (!conversationId) {
conversationId = lookupConversation(hookInput.cwd, hookInput.session_id);
}
// Fetch current state from Letta
const [agent, messagesResult] = await Promise.all([
fetchAgent(apiKey, agentId),
fetchNewMessages(apiKey, conversationId, state.lastSeenMessageId || null),
]);
const { messages: newMessages, lastMessageId } = messagesResult;
const changedBlocks = detectChangedBlocks(agent.blocks || [], state.lastBlockValues || null);
debug(`New messages: ${newMessages.length}, Changed blocks: ${changedBlocks.length}`);
// If nothing changed, exit silently
if (newMessages.length === 0 && changedBlocks.length === 0) {
debug('No updates, exiting silently');
process.exit(0);
}
// Format and output
const additionalContext = formatOutput(
agent.name || 'Subconscious',
newMessages,
changedBlocks,
state.lastBlockValues || null
);
// Update state
if (lastMessageId) {
state.lastSeenMessageId = lastMessageId;
}
if (agent.blocks) {
state.lastBlockValues = {};
for (const block of agent.blocks) {
state.lastBlockValues[block.label] = block.value;
}
}
saveSyncState(hookInput.cwd, state);
// Build the additional context with instruction to surface messages
let contextWithInstruction = `<letta_update>\n${additionalContext}\n</letta_update>`;
if (newMessages.length > 0) {
const agentName = agent.name || 'Subconscious';
contextWithInstruction += `\n\n<instruction>Your Subconscious (${agentName}) just sent a message mid-workflow. Briefly acknowledge what ${agentName} said in your next response - just a short note like "Sub notes: [key point]" so the user knows.</instruction>`;
}
// Output JSON for PreToolUse
const output: Record<string, unknown> = {
hookSpecificOutput: {
hookEventName: 'PreToolUse',
additionalContext: contextWithInstruction,
},
};
console.log(JSON.stringify(output));
} catch (error) {
debug(`Error: ${error}`);
// Non-blocking - just exit silently
process.exit(0);
}
}
main();

File diff suppressed because it is too large Load Diff

View File

@@ -1,124 +1,124 @@
#!/usr/bin/env npx tsx
/**
* Background worker that sends messages to Letta
* Spawned by send_messages_to_letta.ts as a detached process
*
* Usage: npx tsx send_worker.ts <payload_file>
*/
import * as fs from 'fs';
import * as path from 'path';
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
const LOG_FILE = '/tmp/letta-claude-sync/send_worker.log';
interface Payload {
apiKey: string;
conversationId: string;
sessionId: string;
message: string;
stateFile: string;
newLastProcessedIndex: number;
}
function log(message: string): void {
const dir = path.dirname(LOG_FILE);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
const timestamp = new Date().toISOString();
fs.appendFileSync(LOG_FILE, `[${timestamp}] ${message}\n`);
}
async function sendToLetta(payload: Payload): Promise<boolean> {
const url = `${LETTA_API_BASE}/conversations/${payload.conversationId}/messages`;
log(`Sending to conversation ${payload.conversationId} (${payload.message.length} chars)`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${payload.apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [{ role: 'user', content: payload.message }],
}),
});
log(`Response status: ${response.status}`);
if (response.status === 409) {
log('Conversation busy (409) - will retry on next Stop');
return false;
}
if (!response.ok) {
const errorText = await response.text();
log(`Error: ${errorText}`);
throw new Error(`Letta API error (${response.status}): ${errorText}`);
}
// Consume the stream to completion
const reader = response.body?.getReader();
if (reader) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = new TextDecoder().decode(value);
log(`Chunk: ${chunk.substring(0, 100)}...`);
}
} finally {
reader.cancel();
}
}
log('Message sent successfully');
return true;
}
async function main(): Promise<void> {
const payloadFile = process.argv[2];
if (!payloadFile) {
log('ERROR: No payload file specified');
process.exit(1);
}
log('='.repeat(60));
log(`Worker started with payload: ${payloadFile}`);
try {
if (!fs.existsSync(payloadFile)) {
log(`ERROR: Payload file not found: ${payloadFile}`);
process.exit(1);
}
const payload: Payload = JSON.parse(fs.readFileSync(payloadFile, 'utf-8'));
log(`Loaded payload for session ${payload.sessionId}`);
const success = await sendToLetta(payload);
if (success) {
// Update state file
const state = JSON.parse(fs.readFileSync(payload.stateFile, 'utf-8'));
state.lastProcessedIndex = payload.newLastProcessedIndex;
fs.writeFileSync(payload.stateFile, JSON.stringify(state, null, 2));
log(`Updated state: lastProcessedIndex=${payload.newLastProcessedIndex}`);
}
// Clean up payload file
fs.unlinkSync(payloadFile);
log('Cleaned up payload file');
log('Worker completed successfully');
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log(`ERROR: ${errorMessage}`);
process.exit(1);
}
}
main();
#!/usr/bin/env npx tsx
/**
* Background worker that sends messages to Letta
* Spawned by send_messages_to_letta.ts as a detached process
*
* Usage: npx tsx send_worker.ts <payload_file>
*/
import * as fs from 'fs';
import * as path from 'path';
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
const LOG_FILE = '/tmp/letta-claude-sync/send_worker.log';
interface Payload {
apiKey: string;
conversationId: string;
sessionId: string;
message: string;
stateFile: string;
newLastProcessedIndex: number;
}
function log(message: string): void {
const dir = path.dirname(LOG_FILE);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
const timestamp = new Date().toISOString();
fs.appendFileSync(LOG_FILE, `[${timestamp}] ${message}\n`);
}
async function sendToLetta(payload: Payload): Promise<boolean> {
const url = `${LETTA_API_BASE}/conversations/${payload.conversationId}/messages`;
log(`Sending to conversation ${payload.conversationId} (${payload.message.length} chars)`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${payload.apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [{ role: 'user', content: payload.message }],
}),
});
log(`Response status: ${response.status}`);
if (response.status === 409) {
log('Conversation busy (409) - will retry on next Stop');
return false;
}
if (!response.ok) {
const errorText = await response.text();
log(`Error: ${errorText}`);
throw new Error(`Letta API error (${response.status}): ${errorText}`);
}
// Consume the stream to completion
const reader = response.body?.getReader();
if (reader) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = new TextDecoder().decode(value);
log(`Chunk: ${chunk.substring(0, 100)}...`);
}
} finally {
reader.cancel();
}
}
log('Message sent successfully');
return true;
}
async function main(): Promise<void> {
const payloadFile = process.argv[2];
if (!payloadFile) {
log('ERROR: No payload file specified');
process.exit(1);
}
log('='.repeat(60));
log(`Worker started with payload: ${payloadFile}`);
try {
if (!fs.existsSync(payloadFile)) {
log(`ERROR: Payload file not found: ${payloadFile}`);
process.exit(1);
}
const payload: Payload = JSON.parse(fs.readFileSync(payloadFile, 'utf-8'));
log(`Loaded payload for session ${payload.sessionId}`);
const success = await sendToLetta(payload);
if (success) {
// Update state file
const state = JSON.parse(fs.readFileSync(payload.stateFile, 'utf-8'));
state.lastProcessedIndex = payload.newLastProcessedIndex;
fs.writeFileSync(payload.stateFile, JSON.stringify(state, null, 2));
log(`Updated state: lastProcessedIndex=${payload.newLastProcessedIndex}`);
}
// Clean up payload file
fs.unlinkSync(payloadFile);
log('Cleaned up payload file');
log('Worker completed successfully');
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log(`ERROR: ${errorMessage}`);
process.exit(1);
}
}
main();

View File

@@ -1,303 +1,303 @@
#!/usr/bin/env npx tsx
/**
* Session Start Hook Script
*
* Notifies Letta agent when a new Claude Code session begins.
* This script is designed to run as a Claude Code SessionStart hook.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_AGENT_ID - Agent ID to send messages to
*
* Hook Input (via stdin):
* - session_id: Current session ID
* - cwd: Current working directory
* - hook_event_name: "SessionStart"
*
* Exit Codes:
* 0 - Success
* 1 - Non-blocking error
*
* Log file: /tmp/letta-claude-sync/session_start.log
*/
import * as fs from 'fs';
import * as path from 'path';
import { getAgentId } from './agent_config.js';
import {
fetchAgent,
formatMemoryBlocksAsXml,
updateClaudeMd,
createConversation,
} from './conversation_utils.js';
// Configuration
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
const TEMP_STATE_DIR = '/tmp/letta-claude-sync';
const LOG_FILE = path.join(TEMP_STATE_DIR, 'session_start.log');
interface HookInput {
session_id: string;
cwd: string;
hook_event_name?: string;
}
interface ConversationEntry {
conversationId: string;
agentId: string;
}
// Support both old format (string) and new format (object) for backward compatibility
interface ConversationsMap {
[sessionId: string]: string | ConversationEntry;
}
interface Conversation {
id: string;
agent_id: string;
created_at?: string;
}
// Durable storage in .letta directory
function getDurableStateDir(cwd: string): string {
return path.join(cwd, '.letta', 'claude');
}
function getConversationsFile(cwd: string): string {
return path.join(getDurableStateDir(cwd), 'conversations.json');
}
function getSyncStateFile(cwd: string, sessionId: string): string {
return path.join(getDurableStateDir(cwd), `session-${sessionId}.json`);
}
/**
* Ensure directories exist
*/
function ensureLogDir(): void {
if (!fs.existsSync(TEMP_STATE_DIR)) {
fs.mkdirSync(TEMP_STATE_DIR, { recursive: true });
}
}
function ensureDurableStateDir(cwd: string): void {
const dir = getDurableStateDir(cwd);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
/**
* Log message to file
*/
function log(message: string): void {
ensureLogDir();
const timestamp = new Date().toISOString();
const logLine = `[${timestamp}] ${message}\n`;
fs.appendFileSync(LOG_FILE, logLine);
}
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput> {
return new Promise((resolve, reject) => {
let data = '';
process.stdin.setEncoding('utf8');
process.stdin.on('readable', () => {
let chunk;
while ((chunk = process.stdin.read()) !== null) {
data += chunk;
}
});
process.stdin.on('end', () => {
try {
resolve(JSON.parse(data));
} catch (e) {
reject(new Error(`Failed to parse hook input: ${e}`));
}
});
process.stdin.on('error', reject);
});
}
/**
* Load conversations mapping
*/
function loadConversationsMap(cwd: string): ConversationsMap {
const filePath = getConversationsFile(cwd);
if (fs.existsSync(filePath)) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf-8'));
} catch (e) {
log(`Failed to load conversations map: ${e}`);
}
}
return {};
}
/**
* Save conversations mapping
*/
function saveConversationsMap(cwd: string, map: ConversationsMap): void {
ensureDurableStateDir(cwd);
fs.writeFileSync(getConversationsFile(cwd), JSON.stringify(map, null, 2), 'utf-8');
}
/**
* Save session state
*/
function saveSessionState(cwd: string, sessionId: string, conversationId: string): void {
ensureDurableStateDir(cwd);
const state = {
sessionId,
conversationId,
lastProcessedIndex: -1,
startedAt: new Date().toISOString(),
};
fs.writeFileSync(getSyncStateFile(cwd, sessionId), JSON.stringify(state, null, 2), 'utf-8');
}
/**
* Send session start message to Letta
*/
async function sendSessionStartMessage(
apiKey: string,
conversationId: string,
sessionId: string,
cwd: string
): Promise<void> {
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages`;
const projectName = path.basename(cwd);
const timestamp = new Date().toISOString();
const message = `<claude_code_session_start>
<project>${projectName}</project>
<path>${cwd}</path>
<session_id>${sessionId}</session_id>
<timestamp>${timestamp}</timestamp>
<context>
A new Claude Code session has begun. I'll be sending you updates as the session progresses.
You may update your memory blocks with any relevant context for this project.
</context>
</claude_code_session_start>`;
log(`Sending session start message to conversation ${conversationId}`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [{ role: 'user', content: message }],
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Failed to send message: ${response.status} ${errorText}`);
}
// Consume stream minimally
const reader = response.body?.getReader();
if (reader) {
try {
await reader.read();
} finally {
reader.cancel();
}
}
log(`Session start message sent successfully`);
}
/**
* Main function
*/
async function main(): Promise<void> {
log('='.repeat(60));
log('session_start.ts started');
const apiKey = process.env.LETTA_API_KEY;
if (!apiKey) {
log('ERROR: LETTA_API_KEY not set');
console.error('Error: LETTA_API_KEY must be set');
process.exit(1);
}
try {
// Get agent ID (from env, saved config, or auto-import)
const agentId = await getAgentId(apiKey, log);
// Read hook input
log('Reading hook input from stdin...');
const hookInput = await readHookInput();
log(`Hook input: session_id=${hookInput.session_id}, cwd=${hookInput.cwd}`);
// Check if conversation already exists for this session
const conversationsMap = loadConversationsMap(hookInput.cwd);
let conversationId: string;
const cached = conversationsMap[hookInput.session_id];
if (cached) {
// Parse both old format (string) and new format (object)
const entry = typeof cached === 'string'
? { conversationId: cached, agentId: null as string | null }
: cached;
if (entry.agentId && entry.agentId !== agentId) {
// Agent ID changed - clear stale entry and create new conversation
log(`Agent ID changed (${entry.agentId} -> ${agentId}), clearing stale conversation`);
delete conversationsMap[hookInput.session_id];
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
} else if (!entry.agentId) {
// Old format without agentId - upgrade by recreating
log(`Upgrading old format entry (no agentId stored), creating new conversation`);
delete conversationsMap[hookInput.session_id];
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
} else {
// Valid entry with matching agentId - reuse
conversationId = entry.conversationId;
log(`Reusing existing conversation: ${conversationId}`);
}
} else {
// No existing entry - create new conversation
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
}
// Save session state
saveSessionState(hookInput.cwd, hookInput.session_id, conversationId);
// Sync memory to CLAUDE.md immediately so Claude has fresh agent/conversation IDs
log('Syncing memory to CLAUDE.md...');
const agent = await fetchAgent(apiKey, agentId);
const lettaContent = formatMemoryBlocksAsXml(agent, conversationId);
updateClaudeMd(hookInput.cwd, lettaContent);
log('Memory synced to CLAUDE.md');
// Send session start message
await sendSessionStartMessage(apiKey, conversationId, hookInput.session_id, hookInput.cwd);
log('Completed successfully');
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log(`ERROR: ${errorMessage}`);
console.error(`Error in session start hook: ${errorMessage}`);
process.exit(1);
}
}
main();
#!/usr/bin/env npx tsx
/**
* Session Start Hook Script
*
* Notifies Letta agent when a new Claude Code session begins.
* This script is designed to run as a Claude Code SessionStart hook.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_AGENT_ID - Agent ID to send messages to
*
* Hook Input (via stdin):
* - session_id: Current session ID
* - cwd: Current working directory
* - hook_event_name: "SessionStart"
*
* Exit Codes:
* 0 - Success
* 1 - Non-blocking error
*
* Log file: /tmp/letta-claude-sync/session_start.log
*/
import * as fs from 'fs';
import * as path from 'path';
import { getAgentId } from './agent_config.js';
import {
fetchAgent,
formatMemoryBlocksAsXml,
updateClaudeMd,
createConversation,
} from './conversation_utils.js';
// Configuration
const LETTA_BASE_URL = process.env.LETTA_BASE_URL || 'https://api.letta.com';
const LETTA_API_BASE = `${LETTA_BASE_URL}/v1`;
const TEMP_STATE_DIR = '/tmp/letta-claude-sync';
const LOG_FILE = path.join(TEMP_STATE_DIR, 'session_start.log');
interface HookInput {
session_id: string;
cwd: string;
hook_event_name?: string;
}
interface ConversationEntry {
conversationId: string;
agentId: string;
}
// Support both old format (string) and new format (object) for backward compatibility
interface ConversationsMap {
[sessionId: string]: string | ConversationEntry;
}
interface Conversation {
id: string;
agent_id: string;
created_at?: string;
}
// Durable storage in .letta directory
function getDurableStateDir(cwd: string): string {
return path.join(cwd, '.letta', 'claude');
}
function getConversationsFile(cwd: string): string {
return path.join(getDurableStateDir(cwd), 'conversations.json');
}
function getSyncStateFile(cwd: string, sessionId: string): string {
return path.join(getDurableStateDir(cwd), `session-${sessionId}.json`);
}
/**
* Ensure directories exist
*/
function ensureLogDir(): void {
if (!fs.existsSync(TEMP_STATE_DIR)) {
fs.mkdirSync(TEMP_STATE_DIR, { recursive: true });
}
}
function ensureDurableStateDir(cwd: string): void {
const dir = getDurableStateDir(cwd);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
/**
* Log message to file
*/
function log(message: string): void {
ensureLogDir();
const timestamp = new Date().toISOString();
const logLine = `[${timestamp}] ${message}\n`;
fs.appendFileSync(LOG_FILE, logLine);
}
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput> {
return new Promise((resolve, reject) => {
let data = '';
process.stdin.setEncoding('utf8');
process.stdin.on('readable', () => {
let chunk;
while ((chunk = process.stdin.read()) !== null) {
data += chunk;
}
});
process.stdin.on('end', () => {
try {
resolve(JSON.parse(data));
} catch (e) {
reject(new Error(`Failed to parse hook input: ${e}`));
}
});
process.stdin.on('error', reject);
});
}
/**
* Load conversations mapping
*/
function loadConversationsMap(cwd: string): ConversationsMap {
const filePath = getConversationsFile(cwd);
if (fs.existsSync(filePath)) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf-8'));
} catch (e) {
log(`Failed to load conversations map: ${e}`);
}
}
return {};
}
/**
* Save conversations mapping
*/
function saveConversationsMap(cwd: string, map: ConversationsMap): void {
ensureDurableStateDir(cwd);
fs.writeFileSync(getConversationsFile(cwd), JSON.stringify(map, null, 2), 'utf-8');
}
/**
* Save session state
*/
function saveSessionState(cwd: string, sessionId: string, conversationId: string): void {
ensureDurableStateDir(cwd);
const state = {
sessionId,
conversationId,
lastProcessedIndex: -1,
startedAt: new Date().toISOString(),
};
fs.writeFileSync(getSyncStateFile(cwd, sessionId), JSON.stringify(state, null, 2), 'utf-8');
}
/**
* Send session start message to Letta
*/
async function sendSessionStartMessage(
apiKey: string,
conversationId: string,
sessionId: string,
cwd: string
): Promise<void> {
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages`;
const projectName = path.basename(cwd);
const timestamp = new Date().toISOString();
const message = `<claude_code_session_start>
<project>${projectName}</project>
<path>${cwd}</path>
<session_id>${sessionId}</session_id>
<timestamp>${timestamp}</timestamp>
<context>
A new Claude Code session has begun. I'll be sending you updates as the session progresses.
You may update your memory blocks with any relevant context for this project.
</context>
</claude_code_session_start>`;
log(`Sending session start message to conversation ${conversationId}`);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [{ role: 'user', content: message }],
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Failed to send message: ${response.status} ${errorText}`);
}
// Consume stream minimally
const reader = response.body?.getReader();
if (reader) {
try {
await reader.read();
} finally {
reader.cancel();
}
}
log(`Session start message sent successfully`);
}
/**
* Main function
*/
async function main(): Promise<void> {
log('='.repeat(60));
log('session_start.ts started');
const apiKey = process.env.LETTA_API_KEY;
if (!apiKey) {
log('ERROR: LETTA_API_KEY not set');
console.error('Error: LETTA_API_KEY must be set');
process.exit(1);
}
try {
// Get agent ID (from env, saved config, or auto-import)
const agentId = await getAgentId(apiKey, log);
// Read hook input
log('Reading hook input from stdin...');
const hookInput = await readHookInput();
log(`Hook input: session_id=${hookInput.session_id}, cwd=${hookInput.cwd}`);
// Check if conversation already exists for this session
const conversationsMap = loadConversationsMap(hookInput.cwd);
let conversationId: string;
const cached = conversationsMap[hookInput.session_id];
if (cached) {
// Parse both old format (string) and new format (object)
const entry = typeof cached === 'string'
? { conversationId: cached, agentId: null as string | null }
: cached;
if (entry.agentId && entry.agentId !== agentId) {
// Agent ID changed - clear stale entry and create new conversation
log(`Agent ID changed (${entry.agentId} -> ${agentId}), clearing stale conversation`);
delete conversationsMap[hookInput.session_id];
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
} else if (!entry.agentId) {
// Old format without agentId - upgrade by recreating
log(`Upgrading old format entry (no agentId stored), creating new conversation`);
delete conversationsMap[hookInput.session_id];
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
} else {
// Valid entry with matching agentId - reuse
conversationId = entry.conversationId;
log(`Reusing existing conversation: ${conversationId}`);
}
} else {
// No existing entry - create new conversation
conversationId = await createConversation(apiKey, agentId, log);
conversationsMap[hookInput.session_id] = { conversationId, agentId };
saveConversationsMap(hookInput.cwd, conversationsMap);
}
// Save session state
saveSessionState(hookInput.cwd, hookInput.session_id, conversationId);
// Sync memory to CLAUDE.md immediately so Claude has fresh agent/conversation IDs
log('Syncing memory to CLAUDE.md...');
const agent = await fetchAgent(apiKey, agentId);
const lettaContent = formatMemoryBlocksAsXml(agent, conversationId);
updateClaudeMd(hookInput.cwd, lettaContent);
log('Memory synced to CLAUDE.md');
// Send session start message
await sendSessionStartMessage(apiKey, conversationId, hookInput.session_id, hookInput.cwd);
log('Completed successfully');
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log(`ERROR: ${errorMessage}`);
console.error(`Error in session start hook: ${errorMessage}`);
process.exit(1);
}
}
main();

View File

@@ -1,458 +1,458 @@
#!/usr/bin/env tsx
/**
* Letta Memory Sync Script
*
* Syncs Letta agent memory blocks to the project's CLAUDE.md file.
* This script is designed to run as a Claude Code UserPromptSubmit hook.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_AGENT_ID - Agent ID to fetch memory blocks from
* CLAUDE_PROJECT_DIR - Project directory (set by Claude Code)
* LETTA_DEBUG - Set to "1" to enable debug logging to stderr
*
* Exit Codes:
* 0 - Success
* 1 - Non-blocking error (logged to stderr)
* 2 - Blocking error (prevents prompt processing)
*/
import * as fs from 'fs';
import * as path from 'path';
import * as readline from 'readline';
import { spawn } from 'child_process';
import { fileURLToPath } from 'url';
import { getAgentId } from './agent_config.js';
import {
loadSyncState,
saveSyncState,
getOrCreateConversation,
getSyncStateFile,
lookupConversation,
SyncState,
Agent,
MemoryBlock,
fetchAgent,
escapeXmlContent,
formatMemoryBlocksAsXml,
updateClaudeMd,
LETTA_API_BASE,
} from './conversation_utils.js';
// ESM-compatible __dirname
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Windows compatibility: npx needs to be npx.cmd on Windows
const NPX_CMD = process.platform === 'win32' ? 'npx.cmd' : 'npx';
// Configuration
const DEBUG = process.env.LETTA_DEBUG === '1';
function debug(...args: unknown[]): void {
if (DEBUG) {
console.error('[sync debug]', ...args);
}
}
interface LettaMessage {
id: string;
message_type: string;
content?: string;
text?: string;
date?: string;
}
interface MessageInfo {
id: string;
text: string;
date: string | null;
}
interface HookInput {
session_id: string;
cwd: string;
prompt?: string; // User's prompt text (available on UserPromptSubmit)
transcript_path?: string; // Path to transcript JSONL
}
// Temp state directory for logs
const TEMP_STATE_DIR = '/tmp/letta-claude-sync';
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput | null> {
return new Promise((resolve) => {
let input = '';
const rl = readline.createInterface({ input: process.stdin });
rl.on('line', (line) => {
input += line;
});
rl.on('close', () => {
if (!input.trim()) {
resolve(null);
return;
}
try {
resolve(JSON.parse(input));
} catch {
resolve(null);
}
});
// Timeout after 100ms if no input
setTimeout(() => {
rl.close();
}, 100);
});
}
/**
* Count lines in transcript file (for tracking lastProcessedIndex)
*/
function countTranscriptLines(transcriptPath: string): number {
if (!fs.existsSync(transcriptPath)) {
return 0;
}
const content = fs.readFileSync(transcriptPath, 'utf-8');
return content.split('\n').filter(line => line.trim()).length;
}
/**
* Detect which blocks have changed since last sync
*/
function detectChangedBlocks(
currentBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): MemoryBlock[] {
// First sync - no previous state, don't show all blocks as "changed"
if (!lastBlockValues) {
return [];
}
return currentBlocks.filter(block => {
const previousValue = lastBlockValues[block.label];
// Changed if: new block (not in previous) or value differs
return previousValue === undefined || previousValue !== block.value;
});
}
/**
* Compute a simple line-based diff between two strings
*/
function computeDiff(oldValue: string, newValue: string): { added: string[], removed: string[] } {
const oldLines = oldValue.split('\n').map(l => l.trim()).filter(l => l);
const newLines = newValue.split('\n').map(l => l.trim()).filter(l => l);
const oldSet = new Set(oldLines);
const newSet = new Set(newLines);
const added = newLines.filter(line => !oldSet.has(line));
const removed = oldLines.filter(line => !newSet.has(line));
return { added, removed };
}
/**
* Format changed blocks for stdout injection with diffs
*/
function formatChangedBlocksForStdout(
changedBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): string {
if (changedBlocks.length === 0) {
return '';
}
const formatted = changedBlocks.map(block => {
const previousValue = lastBlockValues?.[block.label];
// New block - show full content
if (previousValue === undefined) {
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} status="new">\n${escapedContent}\n</${block.label}>`;
}
// Existing block - show diff
const diff = computeDiff(previousValue, block.value || '');
if (diff.added.length === 0 && diff.removed.length === 0) {
// Whitespace-only change, show full content
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} status="modified">\n${escapedContent}\n</${block.label}>`;
}
const diffLines: string[] = [];
for (const line of diff.removed) {
diffLines.push(`- ${escapeXmlContent(line)}`);
}
for (const line of diff.added) {
diffLines.push(`+ ${escapeXmlContent(line)}`);
}
return `<${block.label} status="modified">\n${diffLines.join('\n')}\n</${block.label}>`;
}).join('\n');
return `<letta_memory_update>
<!-- Memory blocks updated since last prompt (showing diff) -->
${formatted}
</letta_memory_update>`;
}
/**
* Fetch all assistant messages from the conversation history since last seen
*/
async function fetchAssistantMessages(
apiKey: string,
conversationId: string | null,
lastSeenMessageId: string | null
): Promise<{ messages: MessageInfo[], lastMessageId: string | null }> {
if (!conversationId) {
// No conversation yet, return empty
return { messages: [], lastMessageId: null };
}
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages?limit=50`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
// Don't fail if we can't fetch messages, just return empty
return { messages: [], lastMessageId: lastSeenMessageId };
}
const allMessages: LettaMessage[] = await response.json();
debug(`Fetched ${allMessages.length} total messages from conversation`);
// Filter to assistant messages only
// NOTE: API returns messages newest-first
const assistantMessages = allMessages.filter(msg => msg.message_type === 'assistant_message');
debug(`Found ${assistantMessages.length} assistant messages`);
// Find the index of the last seen message
// Since messages are newest-first, new messages are BEFORE lastSeenIndex (indices 0 to lastSeenIndex-1)
let endIndex = assistantMessages.length; // Default: return all messages
if (lastSeenMessageId) {
const lastSeenIndex = assistantMessages.findIndex(msg => msg.id === lastSeenMessageId);
debug(`lastSeenMessageId=${lastSeenMessageId}, lastSeenIndex=${lastSeenIndex}`);
if (lastSeenIndex !== -1) {
// Only return messages newer than the last seen one (before it in the array)
endIndex = lastSeenIndex;
}
}
debug(`endIndex=${endIndex}, will return messages from index 0 to ${endIndex - 1}`);
// Get new messages (from 0 to endIndex, which are the newest messages)
const newMessages: MessageInfo[] = [];
for (let i = 0; i < endIndex; i++) {
const msg = assistantMessages[i];
const text = msg.content || msg.text;
if (text && typeof text === 'string') {
newMessages.push({
id: msg.id,
text,
date: msg.date || null,
});
}
}
debug(`Returning ${newMessages.length} new messages`);
// Get the last message ID for tracking (the NEWEST message, which is first in the array)
const lastMessageId = assistantMessages.length > 0
? assistantMessages[0].id
: lastSeenMessageId;
debug(`Setting lastMessageId=${lastMessageId}`);
return { messages: newMessages, lastMessageId };
}
/**
* Format assistant messages for stdout injection
*/
function formatMessagesForStdout(agent: Agent, messages: MessageInfo[]): string {
const agentName = agent.name || 'Letta Agent';
if (messages.length === 0) {
return `<!-- No new messages from ${agentName} -->`;
}
// Format each message
const formattedMessages = messages.map((msg, index) => {
const timestamp = msg.date || 'unknown';
const msgNum = messages.length > 1 ? ` (${index + 1}/${messages.length})` : '';
return `<letta_message from="${agentName}"${msgNum} timestamp="${timestamp}">
${msg.text}
</letta_message>`;
});
return formattedMessages.join('\n\n');
}
/**
* Main function
*/
async function main(): Promise<void> {
// Get environment variables
const apiKey = process.env.LETTA_API_KEY;
const projectDir = process.env.CLAUDE_PROJECT_DIR || process.cwd();
// Validate required environment variables
if (!apiKey) {
console.error('Error: LETTA_API_KEY environment variable is not set');
process.exit(1);
}
try {
// Get agent ID (from env, saved config, or auto-import)
const agentId = await getAgentId(apiKey);
// Read hook input to get session ID for conversation lookup
const hookInput = await readHookInput();
const cwd = hookInput?.cwd || projectDir;
const sessionId = hookInput?.session_id;
// Load state using shared utility
let state: SyncState | null = null;
if (sessionId) {
state = loadSyncState(cwd, sessionId);
}
// Recover conversationId from conversations.json if state doesn't have it
let conversationId = state?.conversationId || null;
if (!conversationId && sessionId) {
conversationId = lookupConversation(cwd, sessionId);
// Update state so we don't have to look it up again
if (conversationId && state) {
state.conversationId = conversationId;
}
}
const lastBlockValues = state?.lastBlockValues || null;
const lastSeenMessageId = state?.lastSeenMessageId || null;
// Fetch agent data and messages in parallel
const [agent, messagesResult] = await Promise.all([
fetchAgent(apiKey, agentId),
fetchAssistantMessages(apiKey, conversationId, lastSeenMessageId),
]);
const { messages: newMessages, lastMessageId } = messagesResult;
// Detect which blocks have changed since last sync
const changedBlocks = detectChangedBlocks(agent.blocks || [], lastBlockValues);
// Format memory blocks as XML (includes context section)
const lettaContent = formatMemoryBlocksAsXml(agent, conversationId);
// Update CLAUDE.md with full memory blocks
updateClaudeMd(cwd, lettaContent);
// Update state with block values and last seen message ID
if (state) {
state.lastBlockValues = {};
for (const block of agent.blocks || []) {
state.lastBlockValues[block.label] = block.value;
}
// Track the last message we've seen
if (lastMessageId) {
state.lastSeenMessageId = lastMessageId;
}
}
// Output to stdout - this gets injected before the user's prompt
// (UserPromptSubmit hooks add stdout to context)
const outputs: string[] = [];
// Add changed blocks if any
const changedBlocksOutput = formatChangedBlocksForStdout(changedBlocks, lastBlockValues);
if (changedBlocksOutput) {
outputs.push(changedBlocksOutput);
}
// Add all new messages from Sub
const messageOutput = formatMessagesForStdout(agent, newMessages);
outputs.push(messageOutput);
// Add instruction to acknowledge messages if there are any
if (newMessages.length > 0) {
const agentName = agent.name || 'Subconscious';
outputs.push(`<instruction>Your Subconscious (${agentName}) sent you a message above. Briefly acknowledge what ${agentName} said - just a short note like "Sub notes: [key point]" so the user knows.</instruction>`);
}
console.log(outputs.join('\n\n'));
// Send user prompt to Letta early (gives Letta a head start while Claude processes)
if (sessionId && hookInput?.prompt && state) {
try {
// Ensure we have a conversation
const convId = await getOrCreateConversation(apiKey, agentId, sessionId, cwd, state);
// Get current transcript length for index tracking
const transcriptLength = hookInput.transcript_path
? countTranscriptLines(hookInput.transcript_path)
: 0;
// Format the prompt message
const promptMessage = `<claude_code_user_prompt>
<session_id>${sessionId}</session_id>
<prompt>${escapeXmlContent(hookInput.prompt)}</prompt>
<note>Early notification - Claude Code is processing this now. Full transcript with response will follow.</note>
</claude_code_user_prompt>`;
// Write payload for background worker
if (!fs.existsSync(TEMP_STATE_DIR)) {
fs.mkdirSync(TEMP_STATE_DIR, { recursive: true });
}
const payloadFile = path.join(TEMP_STATE_DIR, `prompt-${sessionId}-${Date.now()}.json`);
const payload = {
apiKey,
conversationId: convId,
sessionId,
message: promptMessage,
stateFile: getSyncStateFile(cwd, sessionId),
newLastProcessedIndex: transcriptLength > 0 ? transcriptLength - 1 : 0,
};
fs.writeFileSync(payloadFile, JSON.stringify(payload), 'utf-8');
// Spawn background worker
const workerScript = path.join(__dirname, 'send_worker.ts');
const isWindows = process.platform === 'win32';
const child = spawn(NPX_CMD, ['tsx', workerScript, payloadFile], {
detached: true,
stdio: 'ignore',
cwd,
env: process.env,
// Windows requires shell: true for detached processes to work properly
...(isWindows && { shell: true, windowsHide: true }),
});
child.unref();
} catch (promptError) {
// Don't fail the sync if prompt sending fails - just log warning
console.error(`Warning: Failed to send prompt to Letta: ${promptError}`);
}
}
// Save state
if (state && sessionId) {
saveSyncState(cwd, state);
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`Error syncing Letta memory: ${errorMessage}`);
// Exit with code 1 for non-blocking error
// Change to exit(2) if you want to block prompt processing on sync failures
process.exit(1);
}
}
// Run main function
main();
#!/usr/bin/env tsx
/**
* Letta Memory Sync Script
*
* Syncs Letta agent memory blocks to the project's CLAUDE.md file.
* This script is designed to run as a Claude Code UserPromptSubmit hook.
*
* Environment Variables:
* LETTA_API_KEY - API key for Letta authentication
* LETTA_AGENT_ID - Agent ID to fetch memory blocks from
* CLAUDE_PROJECT_DIR - Project directory (set by Claude Code)
* LETTA_DEBUG - Set to "1" to enable debug logging to stderr
*
* Exit Codes:
* 0 - Success
* 1 - Non-blocking error (logged to stderr)
* 2 - Blocking error (prevents prompt processing)
*/
import * as fs from 'fs';
import * as path from 'path';
import * as readline from 'readline';
import { spawn } from 'child_process';
import { fileURLToPath } from 'url';
import { getAgentId } from './agent_config.js';
import {
loadSyncState,
saveSyncState,
getOrCreateConversation,
getSyncStateFile,
lookupConversation,
SyncState,
Agent,
MemoryBlock,
fetchAgent,
escapeXmlContent,
formatMemoryBlocksAsXml,
updateClaudeMd,
LETTA_API_BASE,
} from './conversation_utils.js';
// ESM-compatible __dirname
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Windows compatibility: npx needs to be npx.cmd on Windows
const NPX_CMD = process.platform === 'win32' ? 'npx.cmd' : 'npx';
// Configuration
const DEBUG = process.env.LETTA_DEBUG === '1';
function debug(...args: unknown[]): void {
if (DEBUG) {
console.error('[sync debug]', ...args);
}
}
interface LettaMessage {
id: string;
message_type: string;
content?: string;
text?: string;
date?: string;
}
interface MessageInfo {
id: string;
text: string;
date: string | null;
}
interface HookInput {
session_id: string;
cwd: string;
prompt?: string; // User's prompt text (available on UserPromptSubmit)
transcript_path?: string; // Path to transcript JSONL
}
// Temp state directory for logs
const TEMP_STATE_DIR = '/tmp/letta-claude-sync';
/**
* Read hook input from stdin
*/
async function readHookInput(): Promise<HookInput | null> {
return new Promise((resolve) => {
let input = '';
const rl = readline.createInterface({ input: process.stdin });
rl.on('line', (line) => {
input += line;
});
rl.on('close', () => {
if (!input.trim()) {
resolve(null);
return;
}
try {
resolve(JSON.parse(input));
} catch {
resolve(null);
}
});
// Timeout after 100ms if no input
setTimeout(() => {
rl.close();
}, 100);
});
}
/**
* Count lines in transcript file (for tracking lastProcessedIndex)
*/
function countTranscriptLines(transcriptPath: string): number {
if (!fs.existsSync(transcriptPath)) {
return 0;
}
const content = fs.readFileSync(transcriptPath, 'utf-8');
return content.split('\n').filter(line => line.trim()).length;
}
/**
* Detect which blocks have changed since last sync
*/
function detectChangedBlocks(
currentBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): MemoryBlock[] {
// First sync - no previous state, don't show all blocks as "changed"
if (!lastBlockValues) {
return [];
}
return currentBlocks.filter(block => {
const previousValue = lastBlockValues[block.label];
// Changed if: new block (not in previous) or value differs
return previousValue === undefined || previousValue !== block.value;
});
}
/**
* Compute a simple line-based diff between two strings
*/
function computeDiff(oldValue: string, newValue: string): { added: string[], removed: string[] } {
const oldLines = oldValue.split('\n').map(l => l.trim()).filter(l => l);
const newLines = newValue.split('\n').map(l => l.trim()).filter(l => l);
const oldSet = new Set(oldLines);
const newSet = new Set(newLines);
const added = newLines.filter(line => !oldSet.has(line));
const removed = oldLines.filter(line => !newSet.has(line));
return { added, removed };
}
/**
* Format changed blocks for stdout injection with diffs
*/
function formatChangedBlocksForStdout(
changedBlocks: MemoryBlock[],
lastBlockValues: { [label: string]: string } | null
): string {
if (changedBlocks.length === 0) {
return '';
}
const formatted = changedBlocks.map(block => {
const previousValue = lastBlockValues?.[block.label];
// New block - show full content
if (previousValue === undefined) {
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} status="new">\n${escapedContent}\n</${block.label}>`;
}
// Existing block - show diff
const diff = computeDiff(previousValue, block.value || '');
if (diff.added.length === 0 && diff.removed.length === 0) {
// Whitespace-only change, show full content
const escapedContent = escapeXmlContent(block.value || '');
return `<${block.label} status="modified">\n${escapedContent}\n</${block.label}>`;
}
const diffLines: string[] = [];
for (const line of diff.removed) {
diffLines.push(`- ${escapeXmlContent(line)}`);
}
for (const line of diff.added) {
diffLines.push(`+ ${escapeXmlContent(line)}`);
}
return `<${block.label} status="modified">\n${diffLines.join('\n')}\n</${block.label}>`;
}).join('\n');
return `<letta_memory_update>
<!-- Memory blocks updated since last prompt (showing diff) -->
${formatted}
</letta_memory_update>`;
}
/**
* Fetch all assistant messages from the conversation history since last seen
*/
async function fetchAssistantMessages(
apiKey: string,
conversationId: string | null,
lastSeenMessageId: string | null
): Promise<{ messages: MessageInfo[], lastMessageId: string | null }> {
if (!conversationId) {
// No conversation yet, return empty
return { messages: [], lastMessageId: null };
}
const url = `${LETTA_API_BASE}/conversations/${conversationId}/messages?limit=50`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!response.ok) {
// Don't fail if we can't fetch messages, just return empty
return { messages: [], lastMessageId: lastSeenMessageId };
}
const allMessages: LettaMessage[] = await response.json();
debug(`Fetched ${allMessages.length} total messages from conversation`);
// Filter to assistant messages only
// NOTE: API returns messages newest-first
const assistantMessages = allMessages.filter(msg => msg.message_type === 'assistant_message');
debug(`Found ${assistantMessages.length} assistant messages`);
// Find the index of the last seen message
// Since messages are newest-first, new messages are BEFORE lastSeenIndex (indices 0 to lastSeenIndex-1)
let endIndex = assistantMessages.length; // Default: return all messages
if (lastSeenMessageId) {
const lastSeenIndex = assistantMessages.findIndex(msg => msg.id === lastSeenMessageId);
debug(`lastSeenMessageId=${lastSeenMessageId}, lastSeenIndex=${lastSeenIndex}`);
if (lastSeenIndex !== -1) {
// Only return messages newer than the last seen one (before it in the array)
endIndex = lastSeenIndex;
}
}
debug(`endIndex=${endIndex}, will return messages from index 0 to ${endIndex - 1}`);
// Get new messages (from 0 to endIndex, which are the newest messages)
const newMessages: MessageInfo[] = [];
for (let i = 0; i < endIndex; i++) {
const msg = assistantMessages[i];
const text = msg.content || msg.text;
if (text && typeof text === 'string') {
newMessages.push({
id: msg.id,
text,
date: msg.date || null,
});
}
}
debug(`Returning ${newMessages.length} new messages`);
// Get the last message ID for tracking (the NEWEST message, which is first in the array)
const lastMessageId = assistantMessages.length > 0
? assistantMessages[0].id
: lastSeenMessageId;
debug(`Setting lastMessageId=${lastMessageId}`);
return { messages: newMessages, lastMessageId };
}
/**
* Format assistant messages for stdout injection
*/
function formatMessagesForStdout(agent: Agent, messages: MessageInfo[]): string {
const agentName = agent.name || 'Letta Agent';
if (messages.length === 0) {
return `<!-- No new messages from ${agentName} -->`;
}
// Format each message
const formattedMessages = messages.map((msg, index) => {
const timestamp = msg.date || 'unknown';
const msgNum = messages.length > 1 ? ` (${index + 1}/${messages.length})` : '';
return `<letta_message from="${agentName}"${msgNum} timestamp="${timestamp}">
${msg.text}
</letta_message>`;
});
return formattedMessages.join('\n\n');
}
/**
* Main function
*/
async function main(): Promise<void> {
// Get environment variables
const apiKey = process.env.LETTA_API_KEY;
const projectDir = process.env.CLAUDE_PROJECT_DIR || process.cwd();
// Validate required environment variables
if (!apiKey) {
console.error('Error: LETTA_API_KEY environment variable is not set');
process.exit(1);
}
try {
// Get agent ID (from env, saved config, or auto-import)
const agentId = await getAgentId(apiKey);
// Read hook input to get session ID for conversation lookup
const hookInput = await readHookInput();
const cwd = hookInput?.cwd || projectDir;
const sessionId = hookInput?.session_id;
// Load state using shared utility
let state: SyncState | null = null;
if (sessionId) {
state = loadSyncState(cwd, sessionId);
}
// Recover conversationId from conversations.json if state doesn't have it
let conversationId = state?.conversationId || null;
if (!conversationId && sessionId) {
conversationId = lookupConversation(cwd, sessionId);
// Update state so we don't have to look it up again
if (conversationId && state) {
state.conversationId = conversationId;
}
}
const lastBlockValues = state?.lastBlockValues || null;
const lastSeenMessageId = state?.lastSeenMessageId || null;
// Fetch agent data and messages in parallel
const [agent, messagesResult] = await Promise.all([
fetchAgent(apiKey, agentId),
fetchAssistantMessages(apiKey, conversationId, lastSeenMessageId),
]);
const { messages: newMessages, lastMessageId } = messagesResult;
// Detect which blocks have changed since last sync
const changedBlocks = detectChangedBlocks(agent.blocks || [], lastBlockValues);
// Format memory blocks as XML (includes context section)
const lettaContent = formatMemoryBlocksAsXml(agent, conversationId);
// Update CLAUDE.md with full memory blocks
updateClaudeMd(cwd, lettaContent);
// Update state with block values and last seen message ID
if (state) {
state.lastBlockValues = {};
for (const block of agent.blocks || []) {
state.lastBlockValues[block.label] = block.value;
}
// Track the last message we've seen
if (lastMessageId) {
state.lastSeenMessageId = lastMessageId;
}
}
// Output to stdout - this gets injected before the user's prompt
// (UserPromptSubmit hooks add stdout to context)
const outputs: string[] = [];
// Add changed blocks if any
const changedBlocksOutput = formatChangedBlocksForStdout(changedBlocks, lastBlockValues);
if (changedBlocksOutput) {
outputs.push(changedBlocksOutput);
}
// Add all new messages from Sub
const messageOutput = formatMessagesForStdout(agent, newMessages);
outputs.push(messageOutput);
// Add instruction to acknowledge messages if there are any
if (newMessages.length > 0) {
const agentName = agent.name || 'Subconscious';
outputs.push(`<instruction>Your Subconscious (${agentName}) sent you a message above. Briefly acknowledge what ${agentName} said - just a short note like "Sub notes: [key point]" so the user knows.</instruction>`);
}
console.log(outputs.join('\n\n'));
// Send user prompt to Letta early (gives Letta a head start while Claude processes)
if (sessionId && hookInput?.prompt && state) {
try {
// Ensure we have a conversation
const convId = await getOrCreateConversation(apiKey, agentId, sessionId, cwd, state);
// Get current transcript length for index tracking
const transcriptLength = hookInput.transcript_path
? countTranscriptLines(hookInput.transcript_path)
: 0;
// Format the prompt message
const promptMessage = `<claude_code_user_prompt>
<session_id>${sessionId}</session_id>
<prompt>${escapeXmlContent(hookInput.prompt)}</prompt>
<note>Early notification - Claude Code is processing this now. Full transcript with response will follow.</note>
</claude_code_user_prompt>`;
// Write payload for background worker
if (!fs.existsSync(TEMP_STATE_DIR)) {
fs.mkdirSync(TEMP_STATE_DIR, { recursive: true });
}
const payloadFile = path.join(TEMP_STATE_DIR, `prompt-${sessionId}-${Date.now()}.json`);
const payload = {
apiKey,
conversationId: convId,
sessionId,
message: promptMessage,
stateFile: getSyncStateFile(cwd, sessionId),
newLastProcessedIndex: transcriptLength > 0 ? transcriptLength - 1 : 0,
};
fs.writeFileSync(payloadFile, JSON.stringify(payload), 'utf-8');
// Spawn background worker
const workerScript = path.join(__dirname, 'send_worker.ts');
const isWindows = process.platform === 'win32';
const child = spawn(NPX_CMD, ['tsx', workerScript, payloadFile], {
detached: true,
stdio: 'ignore',
cwd,
env: process.env,
// Windows requires shell: true for detached processes to work properly
...(isWindows && { shell: true, windowsHide: true }),
});
child.unref();
} catch (promptError) {
// Don't fail the sync if prompt sending fails - just log warning
console.error(`Warning: Failed to send prompt to Letta: ${promptError}`);
}
}
// Save state
if (state && sessionId) {
saveSyncState(cwd, state);
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`Error syncing Letta memory: ${errorMessage}`);
// Exit with code 1 for non-blocking error
// Change to exit(2) if you want to block prompt processing on sync failures
process.exit(1);
}
}
// Run main function
main();