Merge branch 'pr-1368' into integration/validation-batch

# Conflicts:
#	plugin/scripts/context-generator.cjs
#	plugin/scripts/mcp-server.cjs
#	plugin/scripts/worker-service.cjs
#	plugin/ui/viewer-bundle.js
This commit is contained in:
Alex Newman
2026-04-06 14:19:23 -07:00
8 changed files with 522 additions and 432 deletions

View File

@@ -358,6 +358,90 @@ describe('GeminiAgent', () => {
}
});
describe('conversation history truncation', () => {
it('should truncate history when message count exceeds limit', async () => {
// Build a history with 25 small messages (limit is 20)
const history: any[] = [];
for (let i = 0; i < 25; i++) {
history.push({ role: i % 2 === 0 ? 'user' : 'assistant', content: `message ${i}` });
}
const session = {
sessionDbId: 1,
contentSessionId: 'test-session',
memorySessionId: 'mem-session-123',
project: 'test-project',
userPrompt: 'test prompt',
conversationHistory: history,
lastPromptNumber: 2,
cumulativeInputTokens: 0,
cumulativeOutputTokens: 0,
pendingMessages: [],
abortController: new AbortController(),
generatorPromise: null,
earliestPendingTimestamp: null,
currentProvider: null,
startTime: Date.now(),
processingMessageIds: []
} as any;
global.fetch = mock(() => Promise.resolve(new Response(JSON.stringify({
candidates: [{ content: { parts: [{ text: 'response' }] } }]
}))));
await agent.startSession(session);
// The request body should have truncated contents (init adds 1 more, so 26 total → truncated to 20)
const body = JSON.parse((global.fetch as any).mock.calls[0][1].body);
expect(body.contents.length).toBeLessThanOrEqual(20);
});
it('should always keep at least the newest message even if it exceeds token limit', async () => {
// Override settings to have a very low token limit
loadFromFileSpy.mockImplementation(() => ({
...SettingsDefaultsManager.getAllDefaults(),
CLAUDE_MEM_GEMINI_API_KEY: 'test-api-key',
CLAUDE_MEM_GEMINI_MODEL: 'gemini-2.5-flash-lite',
CLAUDE_MEM_GEMINI_RATE_LIMITING_ENABLED: 'false',
CLAUDE_MEM_GEMINI_MAX_CONTEXT_MESSAGES: '20',
CLAUDE_MEM_GEMINI_MAX_TOKENS: '1000', // Very low: ~250 chars
CLAUDE_MEM_DATA_DIR: '/tmp/claude-mem-test',
}));
// Create a single large message that exceeds the token limit
const largeContent = 'x'.repeat(8000); // ~2000 tokens, well above 1000 limit
const session = {
sessionDbId: 1,
contentSessionId: 'test-session',
memorySessionId: 'mem-session-123',
project: 'test-project',
userPrompt: largeContent,
conversationHistory: [],
lastPromptNumber: 1,
cumulativeInputTokens: 0,
cumulativeOutputTokens: 0,
pendingMessages: [],
abortController: new AbortController(),
generatorPromise: null,
earliestPendingTimestamp: null,
currentProvider: null,
startTime: Date.now(),
processingMessageIds: []
} as any;
global.fetch = mock(() => Promise.resolve(new Response(JSON.stringify({
candidates: [{ content: { parts: [{ text: 'response' }] } }]
}))));
await agent.startSession(session);
// Should still send at least 1 message (the newest), not empty contents
const body = JSON.parse((global.fetch as any).mock.calls[0][1].body);
expect(body.contents.length).toBeGreaterThanOrEqual(1);
});
});
describe('gemini-3-flash-preview model support', () => {
it('should accept gemini-3-flash-preview as a valid model', async () => {
// The GeminiModel type includes gemini-3-flash-preview - compile-time check