Session-Level Conversation History Management (#1113)

This commit is contained in:
tanzhenxin
2025-12-03 18:04:48 +08:00
committed by GitHub
parent a7abd8d09f
commit 0a75d85ac9
114 changed files with 9257 additions and 4039 deletions

View File

@@ -5,27 +5,20 @@
*/
import { randomUUID } from 'node:crypto';
import fs from 'node:fs';
import path from 'node:path';
import {
afterEach,
beforeEach,
describe,
expect,
it,
type MockInstance,
vi,
} from 'vitest';
import { execSync } from 'node:child_process';
import fs from 'node:fs';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type { Config } from '../config/config.js';
import { getProjectHash } from '../utils/paths.js';
import {
ChatRecordingService,
type ConversationRecord,
type ToolCallRecord,
type ChatRecord,
} from './chatRecordingService.js';
import * as jsonl from '../utils/jsonl-utils.js';
import type { Part } from '@google/genai';
vi.mock('node:fs');
vi.mock('node:path');
vi.mock('node:child_process');
vi.mock('node:crypto', () => ({
randomUUID: vi.fn(),
createHash: vi.fn(() => ({
@@ -34,23 +27,28 @@ vi.mock('node:crypto', () => ({
})),
})),
}));
vi.mock('../utils/paths.js');
vi.mock('../utils/jsonl-utils.js');
describe('ChatRecordingService', () => {
let chatRecordingService: ChatRecordingService;
let mockConfig: Config;
let mkdirSyncSpy: MockInstance<typeof fs.mkdirSync>;
let writeFileSyncSpy: MockInstance<typeof fs.writeFileSync>;
let uuidCounter = 0;
beforeEach(() => {
uuidCounter = 0;
mockConfig = {
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
storage: {
getProjectTempDir: vi
.fn()
.mockReturnValue('/test/project/root/.gemini/tmp'),
.mockReturnValue('/test/project/root/.gemini/tmp/hash'),
getProjectDir: vi
.fn()
.mockReturnValue('/test/project/root/.gemini/projects/test-project'),
},
getModel: vi.fn().mockReturnValue('gemini-pro'),
getDebugMode: vi.fn().mockReturnValue(false),
@@ -61,351 +59,270 @@ describe('ChatRecordingService', () => {
isOutputMarkdown: false,
}),
}),
getResumedSessionData: vi.fn().mockReturnValue(undefined),
} as unknown as Config;
vi.mocked(getProjectHash).mockReturnValue('test-project-hash');
vi.mocked(randomUUID).mockReturnValue('this-is-a-test-uuid');
vi.mocked(randomUUID).mockImplementation(
() =>
`00000000-0000-0000-0000-00000000000${++uuidCounter}` as `${string}-${string}-${string}-${string}-${string}`,
);
vi.mocked(path.join).mockImplementation((...args) => args.join('/'));
vi.mocked(path.dirname).mockImplementation((p) => {
const parts = p.split('/');
parts.pop();
return parts.join('/');
});
vi.mocked(execSync).mockReturnValue('main\n');
vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined);
vi.spyOn(fs, 'writeFileSync').mockImplementation(() => undefined);
vi.spyOn(fs, 'existsSync').mockReturnValue(false);
chatRecordingService = new ChatRecordingService(mockConfig);
mkdirSyncSpy = vi
.spyOn(fs, 'mkdirSync')
.mockImplementation(() => undefined);
writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
// Mock jsonl-utils
vi.mocked(jsonl.writeLineSync).mockImplementation(() => undefined);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('initialize', () => {
it('should create a new session if none is provided', () => {
chatRecordingService.initialize();
describe('recordUserMessage', () => {
it('should record a user message immediately', () => {
const userParts: Part[] = [{ text: 'Hello, world!' }];
chatRecordingService.recordUserMessage(userParts);
expect(mkdirSyncSpy).toHaveBeenCalledWith(
'/test/project/root/.gemini/tmp/chats',
{ recursive: true },
);
expect(writeFileSyncSpy).not.toHaveBeenCalled();
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.uuid).toBe('00000000-0000-0000-0000-000000000001');
expect(record.parentUuid).toBeNull();
expect(record.type).toBe('user');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: userParts });
expect(record.sessionId).toBe('test-session-id');
expect(record.cwd).toBe('/test/project/root');
expect(record.version).toBe('1.0.0');
expect(record.gitBranch).toBe('main');
});
it('should resume from an existing session if provided', () => {
const readFileSyncSpy = vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify({
sessionId: 'old-session-id',
projectHash: 'test-project-hash',
messages: [],
}),
);
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
chatRecordingService.initialize({
filePath: '/test/project/root/.gemini/tmp/chats/session.json',
conversation: {
sessionId: 'old-session-id',
} as ConversationRecord,
});
expect(mkdirSyncSpy).not.toHaveBeenCalled();
expect(readFileSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).not.toHaveBeenCalled();
});
});
describe('recordMessage', () => {
beforeEach(() => {
chatRecordingService.initialize();
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify({
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [],
}),
);
});
it('should record a new message', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
chatRecordingService.recordMessage({
type: 'user',
content: 'Hello',
it('should chain messages correctly with parentUuid', () => {
chatRecordingService.recordUserMessage([{ text: 'First message' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ text: 'Response' }],
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(1);
expect(conversation.messages[0].content).toBe('Hello');
expect(conversation.messages[0].type).toBe('user');
chatRecordingService.recordUserMessage([{ text: 'Second message' }]);
const calls = vi.mocked(jsonl.writeLineSync).mock.calls;
const user1 = calls[0][1] as ChatRecord;
const assistant = calls[1][1] as ChatRecord;
const user2 = calls[2][1] as ChatRecord;
expect(user1.uuid).toBe('00000000-0000-0000-0000-000000000001');
expect(user1.parentUuid).toBeNull();
expect(assistant.uuid).toBe('00000000-0000-0000-0000-000000000002');
expect(assistant.parentUuid).toBe('00000000-0000-0000-0000-000000000001');
expect(user2.uuid).toBe('00000000-0000-0000-0000-000000000003');
expect(user2.parentUuid).toBe('00000000-0000-0000-0000-000000000002');
});
});
it('should create separate messages when recording multiple messages', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'user',
content: 'Hello',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessage({
type: 'user',
content: 'World',
describe('recordAssistantTurn', () => {
it('should record assistant turn with content only', () => {
const parts: Part[] = [{ text: 'Hello!' }];
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: parts,
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(2);
expect(conversation.messages[0].content).toBe('Hello');
expect(conversation.messages[1].content).toBe('World');
});
});
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
describe('recordThought', () => {
it('should queue a thought', () => {
chatRecordingService.initialize();
chatRecordingService.recordThought({
subject: 'Thinking',
description: 'Thinking...',
});
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts).toHaveLength(1);
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts[0].subject).toBe('Thinking');
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts[0].description).toBe(
'Thinking...',
);
});
});
describe('recordMessageTokens', () => {
beforeEach(() => {
chatRecordingService.initialize();
expect(record.type).toBe('assistant');
// The service wraps parts in a Content object using createModelContent
expect(record.message).toEqual({ role: 'model', parts });
expect(record.model).toBe('gemini-pro');
expect(record.usageMetadata).toBeUndefined();
expect(record.toolCallResult).toBeUndefined();
});
it('should update the last message with token info', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: 'Response',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessageTokens({
promptTokenCount: 1,
candidatesTokenCount: 2,
totalTokenCount: 3,
cachedContentTokenCount: 0,
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages[0]).toEqual({
...initialConversation.messages[0],
it('should record assistant turn with all data', () => {
const parts: Part[] = [
{ thought: true, text: 'Thinking...' },
{ text: 'Here is the result.' },
{ functionCall: { name: 'read_file', args: { path: '/test.txt' } } },
];
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: parts,
tokens: {
input: 1,
output: 2,
total: 3,
cached: 0,
thoughts: 0,
tool: 0,
promptTokenCount: 100,
candidatesTokenCount: 50,
cachedContentTokenCount: 10,
totalTokenCount: 160,
},
});
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
// The service wraps parts in a Content object using createModelContent
expect(record.message).toEqual({ role: 'model', parts });
expect(record.model).toBe('gemini-pro');
expect(record.usageMetadata?.totalTokenCount).toBe(160);
});
it('should queue token info if the last message already has tokens', () => {
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: 'Response',
timestamp: new Date().toISOString(),
tokens: { input: 1, output: 1, total: 2, cached: 0 },
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessageTokens({
promptTokenCount: 2,
candidatesTokenCount: 2,
totalTokenCount: 4,
cachedContentTokenCount: 0,
});
// @ts-expect-error private property
expect(chatRecordingService.queuedTokens).toEqual({
input: 2,
output: 2,
total: 4,
cached: 0,
thoughts: 0,
tool: 0,
});
});
});
describe('recordToolCalls', () => {
beforeEach(() => {
chatRecordingService.initialize();
});
it('should add new tool calls to the last message', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: '',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
const toolCall: ToolCallRecord = {
id: 'tool-1',
name: 'testTool',
args: {},
status: 'awaiting_approval',
timestamp: new Date().toISOString(),
};
chatRecordingService.recordToolCalls('gemini-pro', [toolCall]);
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages[0]).toEqual({
...initialConversation.messages[0],
toolCalls: [
{
...toolCall,
displayName: 'Test Tool',
description: 'A test tool',
renderOutputAsMarkdown: false,
},
],
});
});
it('should create a new message if the last message is not from gemini', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: 'a-uuid',
type: 'user',
content: 'call a tool',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
const toolCall: ToolCallRecord = {
id: 'tool-1',
name: 'testTool',
args: {},
status: 'awaiting_approval',
timestamp: new Date().toISOString(),
};
chatRecordingService.recordToolCalls('gemini-pro', [toolCall]);
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(2);
expect(conversation.messages[1]).toEqual({
...conversation.messages[1],
id: 'this-is-a-test-uuid',
it('should record assistant turn with only tokens', () => {
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
type: 'qwen',
thoughts: [],
content: '',
toolCalls: [
{
...toolCall,
displayName: 'Test Tool',
description: 'A test tool',
renderOutputAsMarkdown: false,
},
],
tokens: {
promptTokenCount: 10,
candidatesTokenCount: 20,
cachedContentTokenCount: 0,
totalTokenCount: 30,
},
});
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.message).toBeUndefined();
expect(record.usageMetadata?.totalTokenCount).toBe(30);
});
});
describe('deleteSession', () => {
it('should delete the session file', () => {
const unlinkSyncSpy = vi
.spyOn(fs, 'unlinkSync')
.mockImplementation(() => undefined);
chatRecordingService.deleteSession('test-session-id');
expect(unlinkSyncSpy).toHaveBeenCalledWith(
'/test/project/root/.gemini/tmp/chats/test-session-id.json',
);
describe('recordToolResult', () => {
it('should record tool result with Parts', () => {
// First record a user and assistant message to set up the chain
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ functionCall: { name: 'shell', args: { command: 'ls' } } }],
});
// Now record the tool result (Parts with functionResponse)
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'file1.txt\nfile2.txt' },
},
},
];
chatRecordingService.recordToolResult(toolResultParts);
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(3);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[2][1] as ChatRecord;
expect(record.type).toBe('tool_result');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: toolResultParts });
});
it('should record tool result with toolCallResult metadata', () => {
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'result' },
},
},
];
const metadata = {
callId: 'call-1',
status: 'success',
responseParts: toolResultParts,
resultDisplay: undefined,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
chatRecordingService.recordToolResult(toolResultParts, metadata);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.type).toBe('tool_result');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: toolResultParts });
expect(record.toolCallResult).toBeDefined();
expect(record.toolCallResult?.callId).toBe('call-1');
});
it('should chain tool result correctly with parentUuid', () => {
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ text: 'Using tool' }],
});
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'done' },
},
},
];
chatRecordingService.recordToolResult(toolResultParts);
const userRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
const assistantRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[1][1] as ChatRecord;
const toolResultRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[2][1] as ChatRecord;
expect(userRecord.parentUuid).toBeNull();
expect(assistantRecord.parentUuid).toBe(userRecord.uuid);
expect(toolResultRecord.parentUuid).toBe(assistantRecord.uuid);
});
});
describe('recordSlashCommand', () => {
it('should record slash command with payload and subtype', () => {
chatRecordingService.recordSlashCommand({
phase: 'invocation',
rawCommand: '/about',
});
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.type).toBe('system');
expect(record.subtype).toBe('slash_command');
expect(record.systemPayload).toMatchObject({
phase: 'invocation',
rawCommand: '/about',
});
});
it('should chain slash command after prior records', () => {
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordSlashCommand({
phase: 'result',
rawCommand: '/about',
});
const userRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
const slashRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[1][1] as ChatRecord;
expect(userRecord.parentUuid).toBeNull();
expect(slashRecord.parentUuid).toBe(userRecord.uuid);
});
});
// Note: Session management tests (listSessions, loadSession, deleteSession, etc.)
// have been moved to sessionService.test.ts
// Session resume integration tests should test via SessionService mock
});

View File

@@ -5,96 +5,127 @@
*/
import { type Config } from '../config/config.js';
import { type Status } from '../core/coreToolScheduler.js';
import { type ThoughtSummary } from '../utils/thoughtUtils.js';
import { getProjectHash } from '../utils/paths.js';
import path from 'node:path';
import fs from 'node:fs';
import { randomUUID } from 'node:crypto';
import type {
PartListUnion,
GenerateContentResponseUsageMetadata,
import {
type PartListUnion,
type Content,
type GenerateContentResponseUsageMetadata,
createUserContent,
createModelContent,
} from '@google/genai';
import * as jsonl from '../utils/jsonl-utils.js';
import { getGitBranch } from '../utils/gitUtils.js';
import type {
ChatCompressionInfo,
ToolCallResponseInfo,
} from '../core/turn.js';
import type { Status } from '../core/coreToolScheduler.js';
import type { TaskResultDisplay } from '../tools/tools.js';
import type { UiEvent } from '../telemetry/uiTelemetry.js';
/**
* Token usage summary for a message or conversation.
* A single record stored in the JSONL file.
* Forms a tree structure via uuid/parentUuid for future checkpointing support.
*
* Each record is self-contained with full metadata, enabling:
* - Append-only writes (crash-safe)
* - Tree reconstruction by following parentUuid chain
* - Future checkpointing by branching from any historical record
*/
export interface TokensSummary {
input: number; // promptTokenCount
output: number; // candidatesTokenCount
cached: number; // cachedContentTokenCount
thoughts?: number; // thoughtsTokenCount
tool?: number; // toolUsePromptTokenCount
total: number; // totalTokenCount
}
/**
* Base fields common to all messages.
*/
export interface BaseMessageRecord {
id: string;
timestamp: string;
content: PartListUnion;
}
/**
* Record of a tool call execution within a conversation.
*/
export interface ToolCallRecord {
id: string;
name: string;
args: Record<string, unknown>;
result?: PartListUnion | null;
status: Status;
timestamp: string;
// UI-specific fields for display purposes
displayName?: string;
description?: string;
resultDisplay?: string;
renderOutputAsMarkdown?: boolean;
}
/**
* Message type and message type-specific fields.
*/
export type ConversationRecordExtra =
| {
type: 'user';
}
| {
type: 'qwen';
toolCalls?: ToolCallRecord[];
thoughts?: Array<ThoughtSummary & { timestamp: string }>;
tokens?: TokensSummary | null;
model?: string;
};
/**
* A single message record in a conversation.
*/
export type MessageRecord = BaseMessageRecord & ConversationRecordExtra;
/**
* Complete conversation record stored in session files.
*/
export interface ConversationRecord {
export interface ChatRecord {
/** Unique identifier for this logical message */
uuid: string;
/** UUID of the parent message; null for root (first message in session) */
parentUuid: string | null;
/** Session identifier - groups records into a logical conversation */
sessionId: string;
projectHash: string;
startTime: string;
lastUpdated: string;
messages: MessageRecord[];
/** ISO 8601 timestamp of when the record was created */
timestamp: string;
/**
* Message type: user input, assistant response, tool result, or system event.
* System records are append-only events that can alter how history is reconstructed
* (e.g., chat compression checkpoints) while keeping the original UI history intact.
*/
type: 'user' | 'assistant' | 'tool_result' | 'system';
/** Optional system subtype for distinguishing system behaviors */
subtype?: 'chat_compression' | 'slash_command' | 'ui_telemetry';
/** Working directory at time of message */
cwd: string;
/** CLI version for compatibility tracking */
version: string;
/** Current git branch, if available */
gitBranch?: string;
// Content field - raw API format for history reconstruction
/**
* The actual Content object (role + parts) sent to/from LLM.
* This is stored in the exact format needed for API calls, enabling
* direct aggregation into Content[] for session resumption.
* Contains: text, functionCall, functionResponse, thought parts, etc.
*/
message?: Content;
// Metadata fields (not part of API Content)
/** Token usage statistics */
usageMetadata?: GenerateContentResponseUsageMetadata;
/** Model used for this response */
model?: string;
/**
* Tool call metadata for UI recovery.
* Contains enriched info (displayName, status, result, etc.) not in API format.
*/
toolCallResult?: Partial<ToolCallResponseInfo>;
/**
* Payload for system records. For chat compression, this stores all data needed
* to reconstruct the compressed history without mutating the original UI list.
*/
systemPayload?:
| ChatCompressionRecordPayload
| SlashCommandRecordPayload
| UiTelemetryRecordPayload;
}
/**
* Data structure for resuming an existing session.
* Stored payload for chat compression checkpoints. This allows us to rebuild the
* effective chat history on resume while keeping the original UI-visible history.
*/
export interface ResumedSessionData {
conversation: ConversationRecord;
filePath: string;
export interface ChatCompressionRecordPayload {
/** Compression metrics/status returned by the compression service */
info: ChatCompressionInfo;
/**
* Snapshot of the new history contents that the model should see after
* compression (summary turns + retained tail). Stored as Content[] for
* resume reconstruction.
*/
compressedHistory: Content[];
}
export interface SlashCommandRecordPayload {
/** Whether this record represents the invocation or the resulting output. */
phase: 'invocation' | 'result';
/** Raw user-entered slash command (e.g., "/about"). */
rawCommand: string;
/**
* History items the UI displayed for this command, in the same shape used by
* the CLI (without IDs). Stored as plain objects for replay on resume.
*/
outputHistoryItems?: Array<Record<string, unknown>>;
}
/**
* Service for automatically recording chat conversations to disk.
* Stored payload for UI telemetry replay.
*/
export interface UiTelemetryRecordPayload {
uiEvent: UiEvent;
}
/**
* Service for recording the current chat session to disk.
*
* This service provides comprehensive conversation recording that captures:
* - All user and assistant messages
@@ -102,346 +133,276 @@ export interface ResumedSessionData {
* - Token usage statistics
* - Assistant thoughts and reasoning
*
* Sessions are stored as JSON files in ~/.qwen/tmp/<project_hash>/chats/
* **API Design:**
* - `recordUserMessage()` - Records a user message (immediate write)
* - `recordAssistantTurn()` - Records an assistant turn with all data (immediate write)
* - `recordToolResult()` - Records tool results (immediate write)
*
* **Storage Format:** JSONL files with tree-structured records.
* Each record has uuid/parentUuid fields enabling:
* - Append-only writes (never rewrite the file)
* - Linear history reconstruction
* - Future checkpointing (branch from any historical point)
*
* File location: ~/.qwen/tmp/<project_id>/chats/
*
* For session management (list, load, remove), use SessionService.
*/
export class ChatRecordingService {
private conversationFile: string | null = null;
private cachedLastConvData: string | null = null;
private sessionId: string;
private projectHash: string;
private queuedThoughts: Array<ThoughtSummary & { timestamp: string }> = [];
private queuedTokens: TokensSummary | null = null;
private config: Config;
/** UUID of the last written record in the chain */
private lastRecordUuid: string | null = null;
private readonly config: Config;
constructor(config: Config) {
this.config = config;
this.sessionId = config.getSessionId();
this.projectHash = getProjectHash(config.getProjectRoot());
this.lastRecordUuid =
config.getResumedSessionData()?.lastCompletedUuid ?? null;
}
/**
* Initializes the chat recording service: creates a new conversation file and associates it with
* this service instance, or resumes from an existing session if resumedSessionData is provided.
* Returns the session ID.
* @returns The session ID.
*/
initialize(resumedSessionData?: ResumedSessionData): void {
private getSessionId(): string {
return this.config.getSessionId();
}
/**
* Ensures the chats directory exists, creating it if it doesn't exist.
* @returns The path to the chats directory.
* @throws Error if the directory cannot be created.
*/
private ensureChatsDir(): string {
const projectDir = this.config.storage.getProjectDir();
const chatsDir = path.join(projectDir, 'chats');
try {
if (resumedSessionData) {
// Resume from existing session
this.conversationFile = resumedSessionData.filePath;
this.sessionId = resumedSessionData.conversation.sessionId;
// Update the session ID in the existing file
this.updateConversation((conversation) => {
conversation.sessionId = this.sessionId;
});
// Clear any cached data to force fresh reads
this.cachedLastConvData = null;
} else {
// Create new session
const chatsDir = path.join(
this.config.storage.getProjectTempDir(),
'chats',
);
fs.mkdirSync(chatsDir, { recursive: true });
const timestamp = new Date()
.toISOString()
.slice(0, 16)
.replace(/:/g, '-');
const filename = `session-${timestamp}-${this.sessionId.slice(
0,
8,
)}.json`;
this.conversationFile = path.join(chatsDir, filename);
this.writeConversation({
sessionId: this.sessionId,
projectHash: this.projectHash,
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [],
});
}
// Clear any queued data since this is a fresh start
this.queuedThoughts = [];
this.queuedTokens = null;
} catch (error) {
console.error('Error initializing chat recording service:', error);
throw error;
fs.mkdirSync(chatsDir, { recursive: true });
} catch {
// Ignore errors - directory will be created if it doesn't exist
}
return chatsDir;
}
private getLastMessage(
conversation: ConversationRecord,
): MessageRecord | undefined {
return conversation.messages.at(-1);
/**
* Ensures the conversation file exists, creating it if it doesn't exist.
* Uses atomic file creation to avoid race conditions.
* @returns The path to the conversation file.
* @throws Error if the file cannot be created or accessed.
*/
private ensureConversationFile(): string {
const chatsDir = this.ensureChatsDir();
const sessionId = this.getSessionId();
const safeFilename = `${sessionId}.jsonl`;
const conversationFile = path.join(chatsDir, safeFilename);
if (fs.existsSync(conversationFile)) {
return conversationFile;
}
try {
// Use 'wx' flag for exclusive creation - atomic operation that fails if file exists
// This avoids the TOCTOU race condition of existsSync + writeFileSync
fs.writeFileSync(conversationFile, '', { flag: 'wx', encoding: 'utf8' });
} catch (error) {
const nodeError = error as NodeJS.ErrnoException;
// EEXIST means file already exists, which is expected and fine
if (nodeError.code !== 'EEXIST') {
const message = error instanceof Error ? error.message : String(error);
throw new Error(
`Failed to create conversation file at ${conversationFile}: ${message}`,
);
}
}
return conversationFile;
}
private newMessage(
type: ConversationRecordExtra['type'],
content: PartListUnion,
): MessageRecord {
/**
* Creates base fields for a ChatRecord.
*/
private createBaseRecord(
type: ChatRecord['type'],
): Omit<ChatRecord, 'message' | 'tokens' | 'model' | 'toolCallsMetadata'> {
return {
id: randomUUID(),
uuid: randomUUID(),
parentUuid: this.lastRecordUuid,
sessionId: this.getSessionId(),
timestamp: new Date().toISOString(),
type,
content,
cwd: this.config.getProjectRoot(),
version: this.config.getCliVersion() || 'unknown',
gitBranch: getGitBranch(this.config.getProjectRoot()),
};
}
/**
* Records a message in the conversation.
* Appends a record to the session file and updates lastRecordUuid.
*/
recordMessage(message: {
private appendRecord(record: ChatRecord): void {
try {
const conversationFile = this.ensureConversationFile();
jsonl.writeLineSync(conversationFile, record);
this.lastRecordUuid = record.uuid;
} catch (error) {
console.error('Error appending record:', error);
throw error;
}
}
/**
* Records a user message.
* Writes immediately to disk.
*
* @param message The raw PartListUnion object as used with the API
*/
recordUserMessage(message: PartListUnion): void {
try {
const record: ChatRecord = {
...this.createBaseRecord('user'),
message: createUserContent(message),
};
this.appendRecord(record);
} catch (error) {
console.error('Error saving user message:', error);
}
}
/**
* Records an assistant turn with all available data.
* Writes immediately to disk.
*
* @param data.message The raw PartListUnion object from the model response
* @param data.model The model name
* @param data.tokens Token usage statistics
* @param data.toolCallsMetadata Enriched tool call info for UI recovery
*/
recordAssistantTurn(data: {
model: string;
type: ConversationRecordExtra['type'];
content: PartListUnion;
message?: PartListUnion;
tokens?: GenerateContentResponseUsageMetadata;
}): void {
if (!this.conversationFile) return;
try {
this.updateConversation((conversation) => {
const msg = this.newMessage(message.type, message.content);
if (msg.type === 'qwen') {
// If it's a new Gemini message then incorporate any queued thoughts.
conversation.messages.push({
...msg,
thoughts: this.queuedThoughts,
tokens: this.queuedTokens,
model: message.model,
});
this.queuedThoughts = [];
this.queuedTokens = null;
} else {
// Or else just add it.
conversation.messages.push(msg);
}
});
const record: ChatRecord = {
...this.createBaseRecord('assistant'),
model: data.model,
};
if (data.message !== undefined) {
record.message = createModelContent(data.message);
}
if (data.tokens) {
record.usageMetadata = data.tokens;
}
this.appendRecord(record);
} catch (error) {
console.error('Error saving message:', error);
throw error;
console.error('Error saving assistant turn:', error);
}
}
/**
* Records a thought from the assistant's reasoning process.
* Records tool results (function responses) sent back to the model.
* Writes immediately to disk.
*
* @param message The raw PartListUnion object with functionResponse parts
* @param toolCallResult Optional tool call result info for UI recovery
*/
recordThought(thought: ThoughtSummary): void {
if (!this.conversationFile) return;
try {
this.queuedThoughts.push({
...thought,
timestamp: new Date().toISOString(),
});
} catch (error) {
console.error('Error saving thought:', error);
throw error;
}
}
/**
* Updates the tokens for the last message in the conversation (which should be by Gemini).
*/
recordMessageTokens(
respUsageMetadata: GenerateContentResponseUsageMetadata,
recordToolResult(
message: PartListUnion,
toolCallResult?: Partial<ToolCallResponseInfo> & { status: Status },
): void {
if (!this.conversationFile) return;
try {
const tokens = {
input: respUsageMetadata.promptTokenCount ?? 0,
output: respUsageMetadata.candidatesTokenCount ?? 0,
cached: respUsageMetadata.cachedContentTokenCount ?? 0,
thoughts: respUsageMetadata.thoughtsTokenCount ?? 0,
tool: respUsageMetadata.toolUsePromptTokenCount ?? 0,
total: respUsageMetadata.totalTokenCount ?? 0,
const record: ChatRecord = {
...this.createBaseRecord('tool_result'),
message: createUserContent(message),
};
this.updateConversation((conversation) => {
const lastMsg = this.getLastMessage(conversation);
// If the last message already has token info, it's because this new token info is for a
// new message that hasn't been recorded yet.
if (lastMsg && lastMsg.type === 'qwen' && !lastMsg.tokens) {
lastMsg.tokens = tokens;
this.queuedTokens = null;
} else {
this.queuedTokens = tokens;
}
});
} catch (error) {
console.error('Error updating message tokens:', error);
throw error;
}
}
/**
* Adds tool calls to the last message in the conversation (which should be by Gemini).
* This method enriches tool calls with metadata from the ToolRegistry.
*/
recordToolCalls(model: string, toolCalls: ToolCallRecord[]): void {
if (!this.conversationFile) return;
// Enrich tool calls with metadata from the ToolRegistry
const toolRegistry = this.config.getToolRegistry();
const enrichedToolCalls = toolCalls.map((toolCall) => {
const toolInstance = toolRegistry.getTool(toolCall.name);
return {
...toolCall,
displayName: toolInstance?.displayName || toolCall.name,
description: toolInstance?.description || '',
renderOutputAsMarkdown: toolInstance?.isOutputMarkdown || false,
};
});
try {
this.updateConversation((conversation) => {
const lastMsg = this.getLastMessage(conversation);
// If a tool call was made, but the last message isn't from Gemini, it's because Gemini is
// calling tools without starting the message with text. So the user submits a prompt, and
// Gemini immediately calls a tool (maybe with some thinking first). In that case, create
// a new empty Gemini message.
// Also if there are any queued thoughts, it means this tool call(s) is from a new Gemini
// message--because it's thought some more since we last, if ever, created a new Gemini
// message from tool calls, when we dequeued the thoughts.
if (toolCallResult) {
// special case for task executions - we don't want to record the tool calls
if (
!lastMsg ||
lastMsg.type !== 'qwen' ||
this.queuedThoughts.length > 0
typeof toolCallResult.resultDisplay === 'object' &&
toolCallResult.resultDisplay !== null &&
'type' in toolCallResult.resultDisplay &&
toolCallResult.resultDisplay.type === 'task_execution'
) {
const newMsg: MessageRecord = {
...this.newMessage('qwen' as const, ''),
// This isn't strictly necessary, but TypeScript apparently can't
// tell that the first parameter to newMessage() becomes the
// resulting message's type, and so it thinks that toolCalls may
// not be present. Confirming the type here satisfies it.
type: 'qwen' as const,
toolCalls: enrichedToolCalls,
thoughts: this.queuedThoughts,
model,
const taskResult = toolCallResult.resultDisplay as TaskResultDisplay;
record.toolCallResult = {
...toolCallResult,
resultDisplay: {
...taskResult,
toolCalls: [],
},
};
// If there are any queued thoughts join them to this message.
if (this.queuedThoughts.length > 0) {
newMsg.thoughts = this.queuedThoughts;
this.queuedThoughts = [];
}
// If there's any queued tokens info join it to this message.
if (this.queuedTokens) {
newMsg.tokens = this.queuedTokens;
this.queuedTokens = null;
}
conversation.messages.push(newMsg);
} else {
// The last message is an existing Gemini message that we need to update.
// Update any existing tool call entries.
if (!lastMsg.toolCalls) {
lastMsg.toolCalls = [];
}
lastMsg.toolCalls = lastMsg.toolCalls.map((toolCall) => {
// If there are multiple tool calls with the same ID, this will take the first one.
const incomingToolCall = toolCalls.find(
(tc) => tc.id === toolCall.id,
);
if (incomingToolCall) {
// Merge in the new data to keep preserve thoughts, etc., that were assigned to older
// versions of the tool call.
return { ...toolCall, ...incomingToolCall };
} else {
return toolCall;
}
});
// Add any new tools calls that aren't in the message yet.
for (const toolCall of enrichedToolCalls) {
const existingToolCall = lastMsg.toolCalls.find(
(tc) => tc.id === toolCall.id,
);
if (!existingToolCall) {
lastMsg.toolCalls.push(toolCall);
}
}
record.toolCallResult = toolCallResult;
}
});
}
this.appendRecord(record);
} catch (error) {
console.error('Error adding tool call to message:', error);
throw error;
console.error('Error saving tool result:', error);
}
}
/**
* Loads up the conversation record from disk.
* Records a slash command invocation as a system record. This keeps the model
* history clean while allowing resume to replay UI output for commands like
* /about.
*/
private readConversation(): ConversationRecord {
recordSlashCommand(payload: SlashCommandRecordPayload): void {
try {
this.cachedLastConvData = fs.readFileSync(this.conversationFile!, 'utf8');
return JSON.parse(this.cachedLastConvData);
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error('Error reading conversation file:', error);
throw error;
}
// Placeholder empty conversation if file doesn't exist.
return {
sessionId: this.sessionId,
projectHash: this.projectHash,
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [],
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'slash_command',
systemPayload: payload,
};
this.appendRecord(record);
} catch (error) {
console.error('Error saving slash command record:', error);
}
}
/**
* Saves the conversation record; overwrites the file.
* Records a chat compression checkpoint as a system record. This keeps the UI
* history immutable while allowing resume/continue flows to reconstruct the
* compressed model-facing history from the stored snapshot.
*/
private writeConversation(conversation: ConversationRecord): void {
recordChatCompression(payload: ChatCompressionRecordPayload): void {
try {
if (!this.conversationFile) return;
// Don't write the file yet until there's at least one message.
if (conversation.messages.length === 0) return;
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'chat_compression',
systemPayload: payload,
};
// Only write the file if this change would change the file.
if (this.cachedLastConvData !== JSON.stringify(conversation, null, 2)) {
conversation.lastUpdated = new Date().toISOString();
const newContent = JSON.stringify(conversation, null, 2);
this.cachedLastConvData = newContent;
fs.writeFileSync(this.conversationFile, newContent);
}
this.appendRecord(record);
} catch (error) {
console.error('Error writing conversation file:', error);
throw error;
console.error('Error saving chat compression record:', error);
}
}
/**
* Convenient helper for updating the conversation without file reading and writing and time
* updating boilerplate.
* Records a UI telemetry event for replaying metrics on resume.
*/
private updateConversation(
updateFn: (conversation: ConversationRecord) => void,
) {
const conversation = this.readConversation();
updateFn(conversation);
this.writeConversation(conversation);
}
/**
* Deletes a session file by session ID.
*/
deleteSession(sessionId: string): void {
recordUiTelemetryEvent(uiEvent: UiEvent): void {
try {
const chatsDir = path.join(
this.config.storage.getProjectTempDir(),
'chats',
);
const sessionPath = path.join(chatsDir, `${sessionId}.json`);
fs.unlinkSync(sessionPath);
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'ui_telemetry',
systemPayload: { uiEvent },
};
this.appendRecord(record);
} catch (error) {
console.error('Error deleting session:', error);
throw error;
console.error('Error saving ui telemetry record:', error);
}
}
}

View File

@@ -0,0 +1,721 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'node:fs';
import path from 'node:path';
import {
afterEach,
beforeEach,
describe,
expect,
it,
type MockInstance,
vi,
} from 'vitest';
import { getProjectHash } from '../utils/paths.js';
import {
SessionService,
buildApiHistoryFromConversation,
type ConversationRecord,
} from './sessionService.js';
import { CompressionStatus } from '../core/turn.js';
import type { ChatRecord } from './chatRecordingService.js';
import * as jsonl from '../utils/jsonl-utils.js';
vi.mock('node:path');
vi.mock('../utils/paths.js');
vi.mock('../utils/jsonl-utils.js');
describe('SessionService', () => {
let sessionService: SessionService;
let readdirSyncSpy: MockInstance<typeof fs.readdirSync>;
let statSyncSpy: MockInstance<typeof fs.statSync>;
let unlinkSyncSpy: MockInstance<typeof fs.unlinkSync>;
beforeEach(() => {
vi.mocked(getProjectHash).mockReturnValue('test-project-hash');
vi.mocked(path.join).mockImplementation((...args) => args.join('/'));
vi.mocked(path.dirname).mockImplementation((p) => {
const parts = p.split('/');
parts.pop();
return parts.join('/');
});
sessionService = new SessionService('/test/project/root');
readdirSyncSpy = vi.spyOn(fs, 'readdirSync').mockReturnValue([]);
statSyncSpy = vi.spyOn(fs, 'statSync').mockImplementation(
() =>
({
mtimeMs: Date.now(),
isFile: () => true,
}) as fs.Stats,
);
unlinkSyncSpy = vi
.spyOn(fs, 'unlinkSync')
.mockImplementation(() => undefined);
// Mock jsonl-utils
vi.mocked(jsonl.read).mockResolvedValue([]);
vi.mocked(jsonl.readLines).mockResolvedValue([]);
});
afterEach(() => {
vi.restoreAllMocks();
});
// Test session IDs (UUID-like format)
const sessionIdA = '550e8400-e29b-41d4-a716-446655440000';
const sessionIdB = '6ba7b810-9dad-11d1-80b4-00c04fd430c8';
const sessionIdC = '6ba7b811-9dad-11d1-80b4-00c04fd430c8';
// Test records
const recordA1: ChatRecord = {
uuid: 'a1',
parentUuid: null,
sessionId: sessionIdA,
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'hello session a' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
};
const recordB1: ChatRecord = {
uuid: 'b1',
parentUuid: null,
sessionId: sessionIdB,
timestamp: '2024-01-02T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'hi session b' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'feature',
};
const recordB2: ChatRecord = {
uuid: 'b2',
parentUuid: 'b1',
sessionId: sessionIdB,
timestamp: '2024-01-02T02:00:00Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'hey back' }] },
cwd: '/test/project/root',
version: '1.0.0',
};
describe('listSessions', () => {
it('should return empty list when no sessions exist', async () => {
readdirSyncSpy.mockReturnValue([]);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
expect(result.hasMore).toBe(false);
expect(result.nextCursor).toBeUndefined();
});
it('should return empty list when chats directory does not exist', async () => {
const error = new Error('ENOENT') as NodeJS.ErrnoException;
error.code = 'ENOENT';
readdirSyncSpy.mockImplementation(() => {
throw error;
});
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
expect(result.hasMore).toBe(false);
});
it('should list sessions sorted by mtime descending', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
return {
mtimeMs: path.includes(sessionIdB) ? now : now - 10000,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdA)) {
return [recordA1];
}
return [recordB1];
},
);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(2);
// sessionIdB should be first (more recent mtime)
expect(result.items[0].sessionId).toBe(sessionIdB);
expect(result.items[1].sessionId).toBe(sessionIdA);
});
it('should extract prompt text from first record', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.listSessions();
expect(result.items[0].prompt).toBe('hello session a');
expect(result.items[0].gitBranch).toBe('main');
});
it('should truncate long prompts', async () => {
const longPrompt = 'A'.repeat(300);
const recordWithLongPrompt: ChatRecord = {
...recordA1,
message: { role: 'user', parts: [{ text: longPrompt }] },
};
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordWithLongPrompt]);
const result = await sessionService.listSessions();
expect(result.items[0].prompt.length).toBe(203); // 200 + '...'
expect(result.items[0].prompt.endsWith('...')).toBe(true);
});
it('should paginate with size parameter', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
`${sessionIdC}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
let mtime = now;
if (path.includes(sessionIdB)) mtime = now - 1000;
if (path.includes(sessionIdA)) mtime = now - 2000;
return {
mtimeMs: mtime,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdC)) {
return [{ ...recordA1, sessionId: sessionIdC }];
}
if (filePath.includes(sessionIdB)) {
return [recordB1];
}
return [recordA1];
},
);
const result = await sessionService.listSessions({ size: 2 });
expect(result.items).toHaveLength(2);
expect(result.items[0].sessionId).toBe(sessionIdC); // newest
expect(result.items[1].sessionId).toBe(sessionIdB);
expect(result.hasMore).toBe(true);
expect(result.nextCursor).toBeDefined();
});
it('should paginate with cursor parameter', async () => {
const now = Date.now();
const oldMtime = now - 2000;
const cursorMtime = now - 1000;
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
`${sessionIdC}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
let mtime = now;
if (path.includes(sessionIdB)) mtime = cursorMtime;
if (path.includes(sessionIdA)) mtime = oldMtime;
return {
mtimeMs: mtime,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
// Get items older than cursor (cursorMtime)
const result = await sessionService.listSessions({ cursor: cursorMtime });
expect(result.items).toHaveLength(1);
expect(result.items[0].sessionId).toBe(sessionIdA);
expect(result.hasMore).toBe(false);
});
it('should skip files from different projects', async () => {
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
// This record is from a different cwd (different project)
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
});
it('should skip files that do not match session file pattern', async () => {
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`, // valid
'not-a-uuid.jsonl', // invalid pattern
'readme.txt', // not jsonl
'.hidden.jsonl', // hidden file
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.listSessions();
// Only the valid UUID pattern file should be processed
expect(result.items).toHaveLength(1);
expect(result.items[0].sessionId).toBe(sessionIdA);
});
});
describe('loadSession', () => {
it('should load a session by id and reconstruct history', async () => {
const now = Date.now();
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue([recordB1, recordB2]);
const loaded = await sessionService.loadSession(sessionIdB);
expect(loaded?.conversation.sessionId).toBe(sessionIdB);
expect(loaded?.conversation.messages).toHaveLength(2);
expect(loaded?.conversation.messages[0].uuid).toBe('b1');
expect(loaded?.conversation.messages[1].uuid).toBe('b2');
expect(loaded?.lastCompletedUuid).toBe('b2');
});
it('should return undefined when session file is empty', async () => {
vi.mocked(jsonl.read).mockResolvedValue([]);
const loaded = await sessionService.loadSession('nonexistent');
expect(loaded).toBeUndefined();
});
it('should return undefined when session belongs to different project', async () => {
const now = Date.now();
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.read).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const loaded = await sessionService.loadSession(sessionIdA);
expect(loaded).toBeUndefined();
});
it('should reconstruct tree-structured history correctly', async () => {
const records: ChatRecord[] = [
{
uuid: 'r1',
parentUuid: null,
sessionId: 'test',
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'First' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'r2',
parentUuid: 'r1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:00Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'Second' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'r3',
parentUuid: 'r2',
sessionId: 'test',
timestamp: '2024-01-01T00:02:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'Third' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
];
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue(records);
const loaded = await sessionService.loadSession('test');
expect(loaded?.conversation.messages).toHaveLength(3);
expect(loaded?.conversation.messages.map((m) => m.uuid)).toEqual([
'r1',
'r2',
'r3',
]);
});
it('should aggregate multiple records with same uuid', async () => {
const records: ChatRecord[] = [
{
uuid: 'u1',
parentUuid: null,
sessionId: 'test',
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'Hello' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
// Multiple records for same assistant message
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:00Z',
type: 'assistant',
message: {
role: 'model',
parts: [{ thought: true, text: 'Thinking...' }],
},
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:01Z',
type: 'assistant',
usageMetadata: {
promptTokenCount: 10,
candidatesTokenCount: 20,
cachedContentTokenCount: 0,
totalTokenCount: 30,
},
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:02Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'Response' }] },
model: 'gemini-pro',
cwd: '/test/project/root',
version: '1.0.0',
},
];
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue(records);
const loaded = await sessionService.loadSession('test');
expect(loaded?.conversation.messages).toHaveLength(2);
const assistantMsg = loaded?.conversation.messages[1];
expect(assistantMsg?.uuid).toBe('a1');
expect(assistantMsg?.message?.parts).toHaveLength(2);
expect(assistantMsg?.usageMetadata?.totalTokenCount).toBe(30);
expect(assistantMsg?.model).toBe('gemini-pro');
});
});
describe('removeSession', () => {
it('should remove session file', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.removeSession(sessionIdA);
expect(result).toBe(true);
expect(unlinkSyncSpy).toHaveBeenCalled();
});
it('should return false when session does not exist', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([]);
const result = await sessionService.removeSession(
'00000000-0000-0000-0000-000000000000',
);
expect(result).toBe(false);
expect(unlinkSyncSpy).not.toHaveBeenCalled();
});
it('should return false for session from different project', async () => {
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const result = await sessionService.removeSession(sessionIdA);
expect(result).toBe(false);
expect(unlinkSyncSpy).not.toHaveBeenCalled();
});
it('should handle file not found error', async () => {
const error = new Error('ENOENT') as NodeJS.ErrnoException;
error.code = 'ENOENT';
vi.mocked(jsonl.readLines).mockRejectedValue(error);
const result = await sessionService.removeSession(
'00000000-0000-0000-0000-000000000000',
);
expect(result).toBe(false);
});
});
describe('loadLastSession', () => {
it('should return the most recent session (same as getLatestSession)', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
return {
mtimeMs: path.includes(sessionIdB) ? now : now - 10000,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdB)) {
return [recordB1];
}
return [recordA1];
},
);
vi.mocked(jsonl.read).mockResolvedValue([recordB1, recordB2]);
const latest = await sessionService.loadLastSession();
expect(latest?.conversation.sessionId).toBe(sessionIdB);
});
it('should return undefined when no sessions exist', async () => {
readdirSyncSpy.mockReturnValue([]);
const latest = await sessionService.loadLastSession();
expect(latest).toBeUndefined();
});
});
describe('sessionExists', () => {
it('should return true for existing session', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const exists = await sessionService.sessionExists(sessionIdA);
expect(exists).toBe(true);
});
it('should return false for non-existing session', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([]);
const exists = await sessionService.sessionExists(
'00000000-0000-0000-0000-000000000000',
);
expect(exists).toBe(false);
});
it('should return false for session from different project', async () => {
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const exists = await sessionService.sessionExists(sessionIdA);
expect(exists).toBe(false);
});
});
describe('buildApiHistoryFromConversation', () => {
it('should return linear messages when no compression checkpoint exists', () => {
const assistantA1: ChatRecord = {
...recordB2,
sessionId: sessionIdA,
parentUuid: recordA1.uuid,
};
const conversation: ConversationRecord = {
sessionId: sessionIdA,
projectHash: 'test-project-hash',
startTime: '2024-01-01T00:00:00Z',
lastUpdated: '2024-01-01T00:00:00Z',
messages: [recordA1, assistantA1],
};
const history = buildApiHistoryFromConversation(conversation);
expect(history).toEqual([recordA1.message, assistantA1.message]);
});
it('should use compressedHistory snapshot and append subsequent records after compression', () => {
const compressionRecord: ChatRecord = {
uuid: 'c1',
parentUuid: 'b2',
sessionId: sessionIdA,
timestamp: '2024-01-02T03:00:00Z',
type: 'system',
subtype: 'chat_compression',
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
systemPayload: {
info: {
originalTokenCount: 100,
newTokenCount: 50,
compressionStatus: CompressionStatus.COMPRESSED,
},
compressedHistory: [
{ role: 'user', parts: [{ text: 'summary' }] },
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
recordB2.message!,
],
},
};
const postCompressionRecord: ChatRecord = {
uuid: 'c2',
parentUuid: 'c1',
sessionId: sessionIdA,
timestamp: '2024-01-02T04:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'new question' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
};
const conversation: ConversationRecord = {
sessionId: sessionIdA,
projectHash: 'test-project-hash',
startTime: '2024-01-01T00:00:00Z',
lastUpdated: '2024-01-02T04:00:00Z',
messages: [
recordA1,
recordB2,
compressionRecord,
postCompressionRecord,
],
};
const history = buildApiHistoryFromConversation(conversation);
expect(history).toEqual([
{ role: 'user', parts: [{ text: 'summary' }] },
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
recordB2.message,
postCompressionRecord.message,
]);
});
});
});

View File

@@ -0,0 +1,656 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import { Storage } from '../config/storage.js';
import { getProjectHash } from '../utils/paths.js';
import path from 'node:path';
import fs from 'node:fs';
import readline from 'node:readline';
import type { Content, Part } from '@google/genai';
import * as jsonl from '../utils/jsonl-utils.js';
import type {
ChatCompressionRecordPayload,
ChatRecord,
UiTelemetryRecordPayload,
} from './chatRecordingService.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
/**
* Session item for list display.
* Contains essential info extracted from the first record of a session file.
*/
export interface SessionListItem {
/** Unique session identifier */
sessionId: string;
/** Working directory at session start */
cwd: string;
/** ISO 8601 timestamp when session started */
startTime: string;
/** File modification time (used for ordering and pagination) */
mtime: number;
/** First user prompt text (truncated for display) */
prompt: string;
/** Git branch at session start, if available */
gitBranch?: string;
/** Full path to the session file */
filePath: string;
/** Number of messages in the session (unique message UUIDs) */
messageCount: number;
}
/**
* Pagination options for listing sessions.
*/
export interface ListSessionsOptions {
/**
* Cursor for pagination (mtime of the last item from previous page).
* Items with mtime < cursor will be returned.
* If undefined, starts from the most recent.
*/
cursor?: number;
/**
* Maximum number of items to return.
* @default 20
*/
size?: number;
}
/**
* Result of listing sessions with pagination info.
*/
export interface ListSessionsResult {
/** Session items for this page */
items: SessionListItem[];
/**
* Cursor for next page (mtime of last item).
* Undefined if no more items.
*/
nextCursor?: number;
/** Whether there are more items after this page */
hasMore: boolean;
}
/**
* Complete conversation reconstructed from ChatRecords.
* Used for resuming sessions and API compatibility.
*/
export interface ConversationRecord {
sessionId: string;
projectHash: string;
startTime: string;
lastUpdated: string;
/** Messages in chronological order (reconstructed from tree) */
messages: ChatRecord[];
}
/**
* Data structure for resuming an existing session.
*/
export interface ResumedSessionData {
conversation: ConversationRecord;
filePath: string;
/** UUID of the last completed message - new messages should use this as parentUuid */
lastCompletedUuid: string | null;
}
/**
* Maximum number of files to process when listing sessions.
* This is a safety limit to prevent performance issues with very large chat directories.
*/
const MAX_FILES_TO_PROCESS = 10000;
/**
* Pattern for validating session file names.
* Session files are named as `${sessionId}.jsonl` where sessionId is a UUID-like identifier
* (32-36 hex characters, optionally with hyphens).
*/
const SESSION_FILE_PATTERN = /^[0-9a-fA-F-]{32,36}\.jsonl$/;
/** Maximum number of lines to scan when looking for the first prompt text. */
const MAX_PROMPT_SCAN_LINES = 10;
/**
* Service for managing chat sessions.
*
* This service handles:
* - Listing sessions with pagination (ordered by mtime)
* - Loading full session data for resumption
* - Removing sessions
*
* Sessions are stored as JSONL files, one per session.
* File location: ~/.qwen/tmp/<project_id>/chats/
*/
export class SessionService {
private readonly storage: Storage;
private readonly projectHash: string;
constructor(cwd: string) {
this.storage = new Storage(cwd);
this.projectHash = getProjectHash(cwd);
}
private getChatsDir(): string {
return path.join(this.storage.getProjectDir(), 'chats');
}
/**
* Extracts the first user prompt text from a Content object.
*/
private extractPromptText(message: Content | undefined): string {
if (!message?.parts) return '';
for (const part of message.parts as Part[]) {
if ('text' in part) {
const textPart = part as { text: string };
const text = textPart.text;
// Truncate long prompts for display
return text.length > 200 ? `${text.slice(0, 200)}...` : text;
}
}
return '';
}
/**
* Finds the first available prompt text by scanning the first N records,
* preferring user messages. Returns an empty string if none found.
*/
private extractFirstPromptFromRecords(records: ChatRecord[]): string {
for (const record of records) {
if (record.type !== 'user') continue;
const prompt = this.extractPromptText(record.message);
if (prompt) return prompt;
}
return '';
}
/**
* Counts unique message UUIDs in a session file.
* This gives the number of logical messages in the session.
*/
private async countSessionMessages(filePath: string): Promise<number> {
const uniqueUuids = new Set<string>();
try {
const fileStream = fs.createReadStream(filePath);
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
for await (const line of rl) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
const record = JSON.parse(trimmed) as ChatRecord;
if (record.type === 'user' || record.type === 'assistant') {
uniqueUuids.add(record.uuid);
}
} catch {
// Ignore malformed lines
continue;
}
}
return uniqueUuids.size;
} catch {
return 0;
}
}
/**
* Lists sessions for the current project with pagination.
*
* Sessions are ordered by file modification time (most recent first).
* Uses cursor-based pagination with mtime as the cursor.
*
* Only reads the first line of each JSONL file for efficiency.
* Files are filtered by UUID pattern first, then by project hash.
*
* @param options Pagination options
* @returns Paginated list of sessions
*/
async listSessions(
options: ListSessionsOptions = {},
): Promise<ListSessionsResult> {
const { cursor, size = 20 } = options;
const chatsDir = this.getChatsDir();
// Get all valid session files (matching UUID pattern) with their stats
let files: Array<{ name: string; mtime: number }> = [];
try {
const fileNames = fs.readdirSync(chatsDir);
for (const name of fileNames) {
// Only process files matching session file pattern
if (!SESSION_FILE_PATTERN.test(name)) continue;
const filePath = path.join(chatsDir, name);
try {
const stats = fs.statSync(filePath);
files.push({ name, mtime: stats.mtimeMs });
} catch {
// Skip files we can't stat
continue;
}
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
return { items: [], hasMore: false };
}
throw error;
}
// Sort by mtime descending (most recent first)
files.sort((a, b) => b.mtime - a.mtime);
// Apply cursor filter (items with mtime < cursor)
if (cursor !== undefined) {
files = files.filter((f) => f.mtime < cursor);
}
// Iterate through files until we have enough matching ones.
// Different projects may share the same chats directory due to path sanitization,
// so we need to filter by project hash and continue until we have enough items.
const items: SessionListItem[] = [];
let filesProcessed = 0;
let lastProcessedMtime: number | undefined;
let hasMoreFiles = false;
for (const file of files) {
// Safety limit to prevent performance issues
if (filesProcessed >= MAX_FILES_TO_PROCESS) {
hasMoreFiles = true;
break;
}
// Stop if we have enough items
if (items.length >= size) {
hasMoreFiles = true;
break;
}
filesProcessed++;
lastProcessedMtime = file.mtime;
const filePath = path.join(chatsDir, file.name);
const records = await jsonl.readLines<ChatRecord>(
filePath,
MAX_PROMPT_SCAN_LINES,
);
if (records.length === 0) continue;
const firstRecord = records[0];
// Skip if not matching current project
// We use cwd comparison since first record doesn't have projectHash
const recordProjectHash = getProjectHash(firstRecord.cwd);
if (recordProjectHash !== this.projectHash) continue;
// Count messages for this session
const messageCount = await this.countSessionMessages(filePath);
const prompt = this.extractFirstPromptFromRecords(records);
items.push({
sessionId: firstRecord.sessionId,
cwd: firstRecord.cwd,
startTime: firstRecord.timestamp,
mtime: file.mtime,
prompt,
gitBranch: firstRecord.gitBranch,
filePath,
messageCount,
});
}
// Determine next cursor (mtime of last processed file)
// Only set if there are more files to process
const nextCursor =
hasMoreFiles && lastProcessedMtime !== undefined
? lastProcessedMtime
: undefined;
return {
items,
nextCursor,
hasMore: hasMoreFiles,
};
}
/**
* Reads all records from a session file.
*/
private async readAllRecords(filePath: string): Promise<ChatRecord[]> {
try {
return await jsonl.read<ChatRecord>(filePath);
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error('Error reading session file:', error);
}
return [];
}
}
/**
* Aggregates multiple records with the same uuid into a single ChatRecord.
* Merges content fields (message, tokens, model, toolCallResult).
*/
private aggregateRecords(records: ChatRecord[]): ChatRecord {
if (records.length === 0) {
throw new Error('Cannot aggregate empty records array');
}
const base = { ...records[0] };
for (let i = 1; i < records.length; i++) {
const record = records[i];
// Merge message (Content objects)
if (record.message !== undefined) {
if (base.message === undefined) {
base.message = record.message;
} else {
base.message = {
role: base.message.role,
parts: [
...(base.message.parts || []),
...(record.message.parts || []),
],
};
}
}
// Merge tokens (take the latest)
if (record.usageMetadata) {
base.usageMetadata = record.usageMetadata;
}
// Merge toolCallResult
if (record.toolCallResult && !base.toolCallResult) {
base.toolCallResult = record.toolCallResult;
}
// Merge model (take the first non-empty one)
if (record.model && !base.model) {
base.model = record.model;
}
// Update timestamp to the latest
if (record.timestamp > base.timestamp) {
base.timestamp = record.timestamp;
}
}
return base;
}
/**
* Reconstructs a linear conversation from tree-structured records.
*/
private reconstructHistory(
records: ChatRecord[],
leafUuid?: string,
): ChatRecord[] {
if (records.length === 0) return [];
const recordsByUuid = new Map<string, ChatRecord[]>();
for (const record of records) {
const existing = recordsByUuid.get(record.uuid) || [];
existing.push(record);
recordsByUuid.set(record.uuid, existing);
}
let currentUuid: string | null =
leafUuid ?? records[records.length - 1].uuid;
const uuidChain: string[] = [];
const visited = new Set<string>();
while (currentUuid && !visited.has(currentUuid)) {
visited.add(currentUuid);
uuidChain.push(currentUuid);
const recordsForUuid = recordsByUuid.get(currentUuid);
if (!recordsForUuid || recordsForUuid.length === 0) break;
currentUuid = recordsForUuid[0].parentUuid;
}
uuidChain.reverse();
const messages: ChatRecord[] = [];
for (const uuid of uuidChain) {
const recordsForUuid = recordsByUuid.get(uuid);
if (recordsForUuid && recordsForUuid.length > 0) {
messages.push(this.aggregateRecords(recordsForUuid));
}
}
return messages;
}
/**
* Loads a session by its session ID.
* Reconstructs the full conversation from tree-structured records.
*
* @param sessionId The session ID to load
* @returns Session data for resumption, or null if not found
*/
async loadSession(
sessionId: string,
): Promise<ResumedSessionData | undefined> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
const records = await this.readAllRecords(filePath);
if (records.length === 0) {
return;
}
// Verify this session belongs to the current project
const firstRecord = records[0];
const recordProjectHash = getProjectHash(firstRecord.cwd);
if (recordProjectHash !== this.projectHash) {
return;
}
// Reconstruct linear history
const messages = this.reconstructHistory(records);
if (messages.length === 0) {
return;
}
const lastMessage = messages[messages.length - 1];
const stats = fs.statSync(filePath);
const conversation: ConversationRecord = {
sessionId: firstRecord.sessionId,
projectHash: this.projectHash,
startTime: firstRecord.timestamp,
lastUpdated: new Date(stats.mtimeMs).toISOString(),
messages,
};
return {
conversation,
filePath,
lastCompletedUuid: lastMessage.uuid,
};
}
/**
* Removes a session by its session ID.
*
* @param sessionId The session ID to remove
* @returns true if removed, false if not found
*/
async removeSession(sessionId: string): Promise<boolean> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
try {
// Verify the file exists and belongs to this project
const records = await jsonl.readLines<ChatRecord>(filePath, 1);
if (records.length === 0) {
return false;
}
const recordProjectHash = getProjectHash(records[0].cwd);
if (recordProjectHash !== this.projectHash) {
return false;
}
fs.unlinkSync(filePath);
return true;
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
return false;
}
throw error;
}
}
/**
* Loads the most recent session for the current project.
* Combines listSessions and loadSession for convenience.
*
* @returns Session data for resumption, or undefined if no sessions exist
*/
async loadLastSession(): Promise<ResumedSessionData | undefined> {
const result = await this.listSessions({ size: 1 });
if (result.items.length === 0) {
return;
}
return this.loadSession(result.items[0].sessionId);
}
/**
* Checks if a session exists by its session ID.
*
* @param sessionId The session ID to check
* @returns true if session exists and belongs to current project
*/
async sessionExists(sessionId: string): Promise<boolean> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
try {
const records = await jsonl.readLines<ChatRecord>(filePath, 1);
if (records.length === 0) {
return false;
}
const recordProjectHash = getProjectHash(records[0].cwd);
return recordProjectHash === this.projectHash;
} catch {
return false;
}
}
}
/**
* Builds the model-facing chat history (Content[]) from a reconstructed
* conversation. This keeps UI history intact while applying chat compression
* checkpoints for the API history used on resume.
*
* Strategy:
* - Find the latest system/chat_compression record (if any).
* - Use its compressedHistory snapshot as the base history.
* - Append all messages after that checkpoint (skipping system records).
* - If no checkpoint exists, return the linear message list (message field only).
*/
export function buildApiHistoryFromConversation(
conversation: ConversationRecord,
): Content[] {
const { messages } = conversation;
let lastCompressionIndex = -1;
let compressedHistory: Content[] | undefined;
messages.forEach((record, index) => {
if (record.type === 'system' && record.subtype === 'chat_compression') {
const payload = record.systemPayload as
| ChatCompressionRecordPayload
| undefined;
if (payload?.compressedHistory) {
lastCompressionIndex = index;
compressedHistory = payload.compressedHistory;
}
}
});
if (compressedHistory && lastCompressionIndex >= 0) {
const baseHistory: Content[] = structuredClone(compressedHistory);
// Append everything after the compression record (newer turns)
for (let i = lastCompressionIndex + 1; i < messages.length; i++) {
const record = messages[i];
if (record.type === 'system') continue;
if (record.message) {
baseHistory.push(structuredClone(record.message as Content));
}
}
return baseHistory;
}
// Fallback: return linear messages as Content[]
return messages
.map((record) => record.message)
.filter((message): message is Content => message !== undefined)
.map((message) => structuredClone(message));
}
/**
* Replays stored UI telemetry events to rebuild metrics when resuming a session.
* Also restores the last prompt token count from the best available source.
*/
export function replayUiTelemetryFromConversation(
conversation: ConversationRecord,
): void {
uiTelemetryService.reset();
for (const record of conversation.messages) {
if (record.type !== 'system' || record.subtype !== 'ui_telemetry') {
continue;
}
const payload = record.systemPayload as
| UiTelemetryRecordPayload
| undefined;
const uiEvent = payload?.uiEvent;
if (uiEvent) {
uiTelemetryService.addEvent(uiEvent);
}
}
const resumePromptTokens = getResumePromptTokenCount(conversation);
if (resumePromptTokens !== undefined) {
uiTelemetryService.setLastPromptTokenCount(resumePromptTokens);
}
}
/**
* Returns the best available prompt token count for resuming telemetry:
* - If a chat compression checkpoint exists, use its new token count.
* - Otherwise, use the last assistant usageMetadata input (fallback to total).
*/
export function getResumePromptTokenCount(
conversation: ConversationRecord,
): number | undefined {
let fallback: number | undefined;
for (let i = conversation.messages.length - 1; i >= 0; i--) {
const record = conversation.messages[i];
if (record.type === 'system' && record.subtype === 'chat_compression') {
const payload = record.systemPayload as
| ChatCompressionRecordPayload
| undefined;
if (payload?.info) {
return payload.info.newTokenCount;
}
}
if (fallback === undefined && record.type === 'assistant') {
const usage = record.usageMetadata;
if (usage) {
fallback = usage.totalTokenCount ?? usage.promptTokenCount;
}
}
}
return fallback;
}