Session-Level Conversation History Management (#1113)

This commit is contained in:
tanzhenxin
2025-12-03 18:04:48 +08:00
committed by GitHub
parent a7abd8d09f
commit 0a75d85ac9
114 changed files with 9257 additions and 4039 deletions

View File

@@ -26,6 +26,7 @@
"@google/genai": "1.16.0",
"@modelcontextprotocol/sdk": "^1.11.0",
"@opentelemetry/api": "^1.9.0",
"async-mutex": "^0.5.0",
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
"@opentelemetry/exporter-logs-otlp-http": "^0.203.0",
"@opentelemetry/exporter-metrics-otlp-grpc": "^0.203.0",

View File

@@ -23,19 +23,6 @@ import {
} from '../core/contentGenerator.js';
import { GeminiClient } from '../core/client.js';
import { GitService } from '../services/gitService.js';
vi.mock('fs', async (importOriginal) => {
const actual = await importOriginal<typeof import('fs')>();
return {
...actual,
existsSync: vi.fn().mockReturnValue(true),
statSync: vi.fn().mockReturnValue({
isDirectory: vi.fn().mockReturnValue(true),
}),
realpathSync: vi.fn((path) => path),
};
});
import { ShellTool } from '../tools/shell.js';
import { ReadFileTool } from '../tools/read-file.js';
import { GrepTool } from '../tools/grep.js';
@@ -54,9 +41,9 @@ function createToolMock(toolName: string) {
return ToolMock;
}
vi.mock('fs', async (importOriginal) => {
const actual = await importOriginal<typeof import('fs')>();
return {
vi.mock('node:fs', async (importOriginal) => {
const actual = await importOriginal<typeof import('node:fs')>();
const mocked = {
...actual,
existsSync: vi.fn().mockReturnValue(true),
statSync: vi.fn().mockReturnValue({
@@ -64,6 +51,10 @@ vi.mock('fs', async (importOriginal) => {
}),
realpathSync: vi.fn((path) => path),
};
return {
...mocked,
default: mocked, // Required for ESM default imports (import fs from 'node:fs')
};
});
// Mock dependencies that might be called during Config construction or createServerConfig
@@ -197,7 +188,6 @@ describe('Server Config (config.ts)', () => {
const USER_MEMORY = 'Test User Memory';
const TELEMETRY_SETTINGS = { enabled: false };
const EMBEDDING_MODEL = 'gemini-embedding';
const SESSION_ID = 'test-session-id';
const baseParams: ConfigParameters = {
cwd: '/tmp',
embeddingModel: EMBEDDING_MODEL,
@@ -208,7 +198,6 @@ describe('Server Config (config.ts)', () => {
fullContext: FULL_CONTEXT,
userMemory: USER_MEMORY,
telemetry: TELEMETRY_SETTINGS,
sessionId: SESSION_ID,
model: MODEL,
usageStatisticsEnabled: false,
};
@@ -217,7 +206,7 @@ describe('Server Config (config.ts)', () => {
// Reset mocks if necessary
vi.clearAllMocks();
vi.spyOn(QwenLogger.prototype, 'logStartSessionEvent').mockImplementation(
() => undefined,
async () => undefined,
);
});
@@ -476,7 +465,7 @@ describe('Server Config (config.ts)', () => {
...baseParams,
usageStatisticsEnabled: true,
});
await config.refreshAuth(AuthType.USE_GEMINI);
await config.initialize();
expect(QwenLogger.prototype.logStartSessionEvent).toHaveBeenCalledOnce();
});
@@ -956,7 +945,6 @@ describe('Server Config (config.ts)', () => {
describe('setApprovalMode with folder trust', () => {
const baseParams: ConfigParameters = {
sessionId: 'test',
targetDir: '.',
debugMode: false,
model: 'test-model',
@@ -987,7 +975,6 @@ describe('setApprovalMode with folder trust', () => {
it('should NOT throw an error when setting PLAN mode in an untrusted folder', () => {
const config = new Config({
sessionId: 'test',
targetDir: '.',
debugMode: false,
model: 'test-model',
@@ -1168,7 +1155,6 @@ describe('BaseLlmClient Lifecycle', () => {
const USER_MEMORY = 'Test User Memory';
const TELEMETRY_SETTINGS = { enabled: false };
const EMBEDDING_MODEL = 'gemini-embedding';
const SESSION_ID = 'test-session-id';
const baseParams: ConfigParameters = {
cwd: '/tmp',
embeddingModel: EMBEDDING_MODEL,
@@ -1179,7 +1165,6 @@ describe('BaseLlmClient Lifecycle', () => {
fullContext: FULL_CONTEXT,
userMemory: USER_MEMORY,
telemetry: TELEMETRY_SETTINGS,
sessionId: SESSION_ID,
model: MODEL,
usageStatisticsEnabled: false,
};

View File

@@ -69,7 +69,7 @@ import {
DEFAULT_OTLP_ENDPOINT,
DEFAULT_TELEMETRY_TARGET,
initializeTelemetry,
logCliConfiguration,
logStartSession,
logRipgrepFallback,
RipgrepFallbackEvent,
StartSessionEvent,
@@ -93,6 +93,12 @@ import {
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
import { Storage } from './storage.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import {
SessionService,
type ResumedSessionData,
} from '../services/sessionService.js';
import { randomUUID } from 'node:crypto';
// Re-export types
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
@@ -110,6 +116,42 @@ export enum ApprovalMode {
export const APPROVAL_MODES = Object.values(ApprovalMode);
/**
* Information about an approval mode including display name and description.
*/
export interface ApprovalModeInfo {
id: ApprovalMode;
name: string;
description: string;
}
/**
* Detailed information about each approval mode.
* Used for UI display and protocol responses.
*/
export const APPROVAL_MODE_INFO: Record<ApprovalMode, ApprovalModeInfo> = {
[ApprovalMode.PLAN]: {
id: ApprovalMode.PLAN,
name: 'Plan',
description: 'Analyze only, do not modify files or execute commands',
},
[ApprovalMode.DEFAULT]: {
id: ApprovalMode.DEFAULT,
name: 'Default',
description: 'Require approval for file edits or shell commands',
},
[ApprovalMode.AUTO_EDIT]: {
id: ApprovalMode.AUTO_EDIT,
name: 'Auto Edit',
description: 'Automatically approve file edits',
},
[ApprovalMode.YOLO]: {
id: ApprovalMode.YOLO,
name: 'YOLO',
description: 'Automatically approve all tools',
},
};
export interface AccessibilitySettings {
disableLoadingPhrases?: boolean;
screenReader?: boolean;
@@ -211,7 +253,8 @@ export interface SandboxConfig {
}
export interface ConfigParameters {
sessionId: string;
sessionId?: string;
sessionData?: ResumedSessionData;
embeddingModel?: string;
sandbox?: SandboxConfig;
targetDir: string;
@@ -315,10 +358,11 @@ function normalizeConfigOutputFormat(
}
export class Config {
private sessionId: string;
private sessionData?: ResumedSessionData;
private toolRegistry!: ToolRegistry;
private promptRegistry!: PromptRegistry;
private subagentManager!: SubagentManager;
private readonly sessionId: string;
private fileSystemService: FileSystemService;
private contentGeneratorConfig!: ContentGeneratorConfig;
private contentGenerator!: ContentGenerator;
@@ -358,6 +402,8 @@ export class Config {
};
private fileDiscoveryService: FileDiscoveryService | null = null;
private gitService: GitService | undefined = undefined;
private sessionService: SessionService | undefined = undefined;
private chatRecordingService: ChatRecordingService | undefined = undefined;
private readonly checkpointing: boolean;
private readonly proxy: string | undefined;
private readonly cwd: string;
@@ -415,7 +461,8 @@ export class Config {
private readonly useSmartEdit: boolean;
constructor(params: ConfigParameters) {
this.sessionId = params.sessionId;
this.sessionId = params.sessionId ?? randomUUID();
this.sessionData = params.sessionData;
this.embeddingModel = params.embeddingModel ?? DEFAULT_QWEN_EMBEDDING_MODEL;
this.fileSystemService = new StandardFileSystemService();
this.sandbox = params.sandbox;
@@ -540,6 +587,7 @@ export class Config {
setGlobalDispatcher(new ProxyAgent(this.getProxy() as string));
}
this.geminiClient = new GeminiClient(this);
this.chatRecordingService = new ChatRecordingService(this);
}
/**
@@ -561,6 +609,8 @@ export class Config {
this.toolRegistry = await this.createToolRegistry();
await this.geminiClient.initialize();
logStartSession(this, new StartSessionEvent(this));
}
getContentGenerator(): ContentGenerator {
@@ -606,7 +656,6 @@ export class Config {
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
this.getSessionId(),
isInitialAuth,
);
// Only assign to instance properties after successful initialization
@@ -617,9 +666,6 @@ export class Config {
// Reset the session flag since we're explicitly changing auth and using default model
this.inFallbackMode = false;
// Logging the cli configuration here as the auth related configuration params would have been loaded by this point
logCliConfiguration(this, new StartSessionEvent(this, this.toolRegistry));
}
/**
@@ -646,6 +692,26 @@ export class Config {
return this.sessionId;
}
/**
* Starts a new session and resets session-scoped services.
*/
startNewSession(sessionId?: string): string {
this.sessionId = sessionId ?? randomUUID();
this.sessionData = undefined;
this.chatRecordingService = new ChatRecordingService(this);
if (this.initialized) {
logStartSession(this, new StartSessionEvent(this));
}
return this.sessionId;
}
/**
* Returns the resumed session data if this session was resumed from a previous one.
*/
getResumedSessionData(): ResumedSessionData | undefined {
return this.sessionData;
}
shouldLoadMemoryFromIncludeDirectories(): boolean {
return this.loadMemoryFromIncludeDirectories;
}
@@ -1128,6 +1194,26 @@ export class Config {
return this.gitService;
}
/**
* Returns the chat recording service.
*/
getChatRecordingService(): ChatRecordingService {
if (!this.chatRecordingService) {
this.chatRecordingService = new ChatRecordingService(this);
}
return this.chatRecordingService;
}
/**
* Gets or creates a SessionService for managing chat sessions.
*/
getSessionService(): SessionService {
if (!this.sessionService) {
this.sessionService = new SessionService(this.targetDir);
}
return this.sessionService;
}
getFileExclusions(): FileExclusions {
return this.fileExclusions;
}

View File

@@ -20,7 +20,6 @@ describe('Flash Model Fallback Configuration', () => {
isDirectory: () => true,
} as fs.Stats);
config = new Config({
sessionId: 'test-session',
targetDir: '/test',
debugMode: false,
cwd: '/test',
@@ -44,7 +43,6 @@ describe('Flash Model Fallback Configuration', () => {
it('should only mark as switched if contentGeneratorConfig exists', async () => {
// Create config without initializing contentGeneratorConfig
const newConfig = new Config({
sessionId: 'test-session-2',
targetDir: '/test',
debugMode: false,
cwd: '/test',
@@ -67,7 +65,6 @@ describe('Flash Model Fallback Configuration', () => {
it('should fall back to initial model if contentGeneratorConfig is not available', () => {
// Test with fresh config where contentGeneratorConfig might not be set
const newConfig = new Config({
sessionId: 'test-session-2',
targetDir: '/test',
debugMode: false,
cwd: '/test',

View File

@@ -4,18 +4,9 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi } from 'vitest';
import { describe, it, expect } from 'vitest';
import * as os from 'node:os';
import * as path from 'node:path';
vi.mock('fs', async (importOriginal) => {
const actual = await importOriginal<typeof import('fs')>();
return {
...actual,
mkdirSync: vi.fn(),
};
});
import { Storage } from './storage.js';
describe('Storage getGlobalSettingsPath', () => {

View File

@@ -14,6 +14,7 @@ export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json';
export const OAUTH_FILE = 'oauth_creds.json';
const TMP_DIR_NAME = 'tmp';
const BIN_DIR_NAME = 'bin';
const PROJECT_DIR_NAME = 'projects';
export class Storage {
private readonly targetDir: string;
@@ -66,6 +67,12 @@ export class Storage {
return path.join(this.targetDir, QWEN_DIR);
}
getProjectDir(): string {
const projectId = this.sanitizeCwd(this.getProjectRoot());
const projectsDir = path.join(Storage.getGlobalQwenDir(), PROJECT_DIR_NAME);
return path.join(projectsDir, projectId);
}
getProjectTempDir(): string {
const hash = this.getFilePathHash(this.getProjectRoot());
const tempDir = Storage.getGlobalTempDir();
@@ -117,4 +124,8 @@ export class Storage {
getHistoryFilePath(): string {
return path.join(this.getProjectTempDir(), 'shell_history');
}
private sanitizeCwd(cwd: string): string {
return cwd.replace(/[^a-zA-Z0-9]/g, '-');
}
}

View File

@@ -61,6 +61,7 @@ vi.mock('node:fs', () => {
});
}),
existsSync: vi.fn((path: string) => mockFileSystem.has(path)),
appendFileSync: vi.fn(),
};
return {
@@ -364,6 +365,9 @@ describe('Gemini Client (client.ts)', () => {
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
storage: {
getProjectTempDir: vi.fn().mockReturnValue('/test/temp'),
getProjectDir: vi
.fn()
.mockReturnValue('/test/project/root/.gemini/projects/test-project'),
},
getContentGenerator: vi.fn().mockReturnValue(mockContentGenerator),
getBaseLlmClient: vi.fn().mockReturnValue({
@@ -374,6 +378,8 @@ describe('Gemini Client (client.ts)', () => {
}),
getSubagentManager: vi.fn().mockReturnValue(mockSubagentManager),
getSkipLoopDetection: vi.fn().mockReturnValue(false),
getChatRecordingService: vi.fn().mockReturnValue(undefined),
getResumedSessionData: vi.fn().mockReturnValue(undefined),
} as unknown as Config;
client = new GeminiClient(mockConfig);
@@ -1513,6 +1519,7 @@ ${JSON.stringify(
[{ text: 'Start conversation' }],
signal,
'prompt-id-3',
{ isContinuation: false },
Number.MAX_SAFE_INTEGER, // Bypass the MAX_TURNS protection
);

View File

@@ -39,7 +39,6 @@ import {
} from './turn.js';
// Services
import { type ChatRecordingService } from '../services/chatRecordingService.js';
import {
ChatCompressionService,
COMPRESSION_PRESERVE_THRESHOLD,
@@ -55,12 +54,17 @@ import {
NextSpeakerCheckEvent,
logNextSpeakerCheck,
} from '../telemetry/index.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
// Utilities
import {
getDirectoryContextString,
getInitialChatHistory,
} from '../utils/environmentContext.js';
import {
buildApiHistoryFromConversation,
replayUiTelemetryFromConversation,
} from '../services/sessionService.js';
import { reportError } from '../utils/errorReporting.js';
import { getErrorMessage } from '../utils/errors.js';
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
@@ -96,7 +100,7 @@ export class GeminiClient {
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;
private lastPromptId: string;
private lastPromptId: string | undefined = undefined;
private lastSentIdeContext: IdeContext | undefined;
private forceFullIdeContext = true;
@@ -108,11 +112,24 @@ export class GeminiClient {
constructor(private readonly config: Config) {
this.loopDetector = new LoopDetectionService(config);
this.lastPromptId = this.config.getSessionId();
}
async initialize() {
this.chat = await this.startChat();
this.lastPromptId = this.config.getSessionId();
// Check if we're resuming from a previous session
const resumedSessionData = this.config.getResumedSessionData();
if (resumedSessionData) {
replayUiTelemetryFromConversation(resumedSessionData.conversation);
// Convert resumed session to API history format
// Each ChatRecord's message field is already a Content object
const resumedHistory = buildApiHistoryFromConversation(
resumedSessionData.conversation,
);
this.chat = await this.startChat(resumedHistory);
} else {
this.chat = await this.startChat();
}
}
private getContentGeneratorOrFail(): ContentGenerator {
@@ -161,10 +178,6 @@ export class GeminiClient {
this.chat = await this.startChat();
}
getChatRecordingService(): ChatRecordingService | undefined {
return this.chat?.getChatRecordingService();
}
getLoopDetectionService(): LoopDetectionService {
return this.loopDetector;
}
@@ -212,6 +225,7 @@ export class GeminiClient {
tools,
},
history,
this.config.getChatRecordingService(),
);
} catch (error) {
await reportError(
@@ -396,12 +410,15 @@ export class GeminiClient {
request: PartListUnion,
signal: AbortSignal,
prompt_id: string,
options?: { isContinuation: boolean },
turns: number = MAX_TURNS,
): AsyncGenerator<ServerGeminiStreamEvent, Turn> {
const isNewPrompt = this.lastPromptId !== prompt_id;
if (isNewPrompt) {
if (!options?.isContinuation) {
this.loopDetector.reset(prompt_id);
this.lastPromptId = prompt_id;
// record user message for session management
this.config.getChatRecordingService()?.recordUserMessage(request);
}
this.sessionTurnCount++;
if (
@@ -510,7 +527,7 @@ export class GeminiClient {
// append system reminders to the request
let requestToSent = await flatMapTextParts(request, async (text) => [text]);
if (isNewPrompt) {
if (!options?.isContinuation) {
const systemReminders = [];
// add subagent system reminder if there are subagents
@@ -580,6 +597,7 @@ export class GeminiClient {
nextRequest,
signal,
prompt_id,
options,
boundedTurns - 1,
);
}
@@ -624,7 +642,7 @@ export class GeminiClient {
config: requestConfig,
contents,
},
this.lastPromptId,
this.lastPromptId!,
);
};
const onPersistent429Callback = async (
@@ -678,7 +696,14 @@ export class GeminiClient {
if (info.compressionStatus === CompressionStatus.COMPRESSED) {
// Success: update chat with new compressed history
if (newHistory) {
const chatRecordingService = this.config.getChatRecordingService();
chatRecordingService?.recordChatCompression({
info,
compressedHistory: newHistory,
});
this.chat = await this.startChat(newHistory);
uiTelemetryService.setLastPromptTokenCount(info.newTokenCount);
this.forceFullIdeContext = true;
}
} else if (

View File

@@ -119,7 +119,6 @@ export function createContentGeneratorConfig(
export async function createContentGenerator(
config: ContentGeneratorConfig,
gcConfig: Config,
sessionId?: string,
isInitialAuth?: boolean,
): Promise<ContentGenerator> {
const version = process.env['CLI_VERSION'] || process.version;
@@ -138,7 +137,6 @@ export async function createContentGenerator(
httpOptions,
config.authType,
gcConfig,
sessionId,
),
gcConfig,
);

View File

@@ -256,6 +256,7 @@ describe('CoreToolScheduler', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -333,6 +334,7 @@ describe('CoreToolScheduler', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -511,6 +513,7 @@ describe('CoreToolScheduler', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -597,6 +600,7 @@ describe('CoreToolScheduler', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -689,6 +693,7 @@ describe('CoreToolScheduler with payload', () => {
isInteractive: () => true, // Required to prevent auto-denial of tool calls
getIdeMode: () => false,
getExperimentalZedIntegration: () => false,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1012,6 +1017,7 @@ describe('CoreToolScheduler edit cancellation', () => {
isInteractive: () => true, // Required to prevent auto-denial of tool calls
getIdeMode: () => false,
getExperimentalZedIntegration: () => false,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1118,6 +1124,7 @@ describe('CoreToolScheduler YOLO mode', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1258,6 +1265,7 @@ describe('CoreToolScheduler cancellation during executing with live output', ()
terminalWidth: 90,
terminalHeight: 30,
}),
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1358,6 +1366,7 @@ describe('CoreToolScheduler request queueing', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1490,6 +1499,7 @@ describe('CoreToolScheduler request queueing', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1592,6 +1602,7 @@ describe('CoreToolScheduler request queueing', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1667,6 +1678,7 @@ describe('CoreToolScheduler request queueing', () => {
isInteractive: () => true, // Required to prevent auto-denial of tool calls
getIdeMode: () => false,
getExperimentalZedIntegration: () => false,
getChatRecordingService: () => undefined,
} as unknown as Config;
const testTool = new TestApprovalTool(mockConfig);
@@ -1858,6 +1870,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
@@ -1978,6 +1991,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
getChatRecordingService: () => undefined,
} as unknown as Config;
const scheduler = new CoreToolScheduler({

View File

@@ -16,6 +16,7 @@ import type {
ToolConfirmationPayload,
AnyDeclarativeTool,
AnyToolInvocation,
ChatRecordingService,
} from '../index.js';
import {
ToolConfirmationOutcome,
@@ -321,6 +322,10 @@ interface CoreToolSchedulerOptions {
onToolCallsUpdate?: ToolCallsUpdateHandler;
getPreferredEditor: () => EditorType | undefined;
onEditorClose: () => void;
/**
* Optional recording service. If provided, tool results will be recorded.
*/
chatRecordingService?: ChatRecordingService;
}
export class CoreToolScheduler {
@@ -332,6 +337,7 @@ export class CoreToolScheduler {
private getPreferredEditor: () => EditorType | undefined;
private config: Config;
private onEditorClose: () => void;
private chatRecordingService?: ChatRecordingService;
private isFinalizingToolCalls = false;
private isScheduling = false;
private requestQueue: Array<{
@@ -349,6 +355,7 @@ export class CoreToolScheduler {
this.onToolCallsUpdate = options.onToolCallsUpdate;
this.getPreferredEditor = options.getPreferredEditor;
this.onEditorClose = options.onEditorClose;
this.chatRecordingService = options.chatRecordingService;
}
private setStatusInternal(
@@ -1208,6 +1215,9 @@ export class CoreToolScheduler {
logToolCall(this.config, new ToolCallEvent(call));
}
// Record tool results before notifying completion
this.recordToolResults(completedCalls);
if (this.onAllToolCallsComplete) {
this.isFinalizingToolCalls = true;
await this.onAllToolCallsComplete(completedCalls);
@@ -1224,6 +1234,33 @@ export class CoreToolScheduler {
}
}
/**
* Records tool results to the chat recording service.
* This captures both the raw Content (for API reconstruction) and
* enriched metadata (for UI recovery).
*/
private recordToolResults(completedCalls: CompletedToolCall[]): void {
if (!this.chatRecordingService) return;
// Collect all response parts from completed calls
const responseParts: Part[] = completedCalls.flatMap(
(call) => call.response.responseParts,
);
if (responseParts.length === 0) return;
// Record each tool result individually
for (const call of completedCalls) {
this.chatRecordingService.recordToolResult(call.response.responseParts, {
callId: call.request.callId,
status: call.status,
resultDisplay: call.response.resultDisplay,
error: call.response.error,
errorType: call.response.errorType,
});
}
}
private notifyToolCallsUpdate(): void {
if (this.onToolCallsUpdate) {
this.onToolCallsUpdate([...this.toolCalls]);

View File

@@ -43,6 +43,7 @@ vi.mock('node:fs', () => {
});
}),
existsSync: vi.fn((path: string) => mockFileSystem.has(path)),
appendFileSync: vi.fn(),
};
return {
@@ -120,6 +121,7 @@ describe('GeminiChat', () => {
setQuotaErrorOccurred: vi.fn(),
flashFallbackHandler: undefined,
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
storage: {
getProjectTempDir: vi.fn().mockReturnValue('/test/temp'),
},

View File

@@ -14,10 +14,9 @@ import type {
SendMessageParameters,
Part,
Tool,
GenerateContentResponseUsageMetadata,
} from '@google/genai';
import { ApiError } from '@google/genai';
import { toParts } from '../code_assist/converter.js';
import { createUserContent } from '@google/genai';
import { ApiError, createUserContent } from '@google/genai';
import { retryWithBackoff } from '../utils/retry.js';
import type { Config } from '../config/config.js';
import {
@@ -30,14 +29,12 @@ import {
logContentRetry,
logContentRetryFailure,
} from '../telemetry/loggers.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import { type ChatRecordingService } from '../services/chatRecordingService.js';
import {
ContentRetryEvent,
ContentRetryFailureEvent,
} from '../telemetry/types.js';
import { handleFallback } from '../fallback/handler.js';
import { isFunctionResponse } from '../utils/messageInspectors.js';
import { partListUnionToString } from './geminiRequest.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
export enum StreamEventType {
@@ -200,16 +197,23 @@ export class GeminiChat {
// A promise to represent the current state of the message being sent to the
// model.
private sendPromise: Promise<void> = Promise.resolve();
private readonly chatRecordingService: ChatRecordingService;
/**
* Creates a new GeminiChat instance.
*
* @param config - The configuration object.
* @param generationConfig - Optional generation configuration.
* @param history - Optional initial conversation history.
* @param chatRecordingService - Optional recording service. If provided, chat
* messages will be recorded.
*/
constructor(
private readonly config: Config,
private readonly generationConfig: GenerateContentConfig = {},
private history: Content[] = [],
private readonly chatRecordingService?: ChatRecordingService,
) {
validateHistory(history);
this.chatRecordingService = new ChatRecordingService(config);
this.chatRecordingService.initialize();
}
setSystemInstruction(sysInstr: string) {
@@ -253,20 +257,6 @@ export class GeminiChat {
const userContent = createUserContent(params.message);
// Record user input - capture complete message with all parts (text, files, images, etc.)
// but skip recording function responses (tool call results) as they should be stored in tool call records
if (!isFunctionResponse(userContent)) {
const userMessage = Array.isArray(params.message)
? params.message
: [params.message];
const userMessageContent = partListUnionToString(toParts(userMessage));
this.chatRecordingService.recordMessage({
model,
type: 'user',
content: userMessageContent,
});
}
// Add user content to history ONCE before any attempts.
this.history.push(userContent);
const requestContents = this.getHistory(true);
@@ -505,7 +495,11 @@ export class GeminiChat {
model: string,
streamResponse: AsyncGenerator<GenerateContentResponse>,
): AsyncGenerator<GenerateContentResponse> {
const modelResponseParts: Part[] = [];
// Collect ALL parts from the model response (including thoughts for recording)
const allModelParts: Part[] = [];
// Non-thought parts for history (what we send back to the API)
const historyParts: Part[] = [];
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
let hasToolCall = false;
let hasFinishReason = false;
@@ -516,23 +510,20 @@ export class GeminiChat {
if (isValidResponse(chunk)) {
const content = chunk.candidates?.[0]?.content;
if (content?.parts) {
if (content.parts.some((part) => part.thought)) {
// Record thoughts
this.recordThoughtFromContent(content);
}
if (content.parts.some((part) => part.functionCall)) {
hasToolCall = true;
}
modelResponseParts.push(
...content.parts.filter((part) => !part.thought),
);
// Collect all parts for recording
allModelParts.push(...content.parts);
// Collect non-thought parts for history
historyParts.push(...content.parts.filter((part) => !part.thought));
}
}
// Record token usage if this chunk has usageMetadata
// Collect token usage for consolidated recording
if (chunk.usageMetadata) {
this.chatRecordingService.recordMessageTokens(chunk.usageMetadata);
usageMetadata = chunk.usageMetadata;
if (chunk.usageMetadata.promptTokenCount !== undefined) {
uiTelemetryService.setLastPromptTokenCount(
chunk.usageMetadata.promptTokenCount,
@@ -543,10 +534,11 @@ export class GeminiChat {
yield chunk; // Yield every chunk to the UI immediately.
}
// String thoughts and consolidate text parts.
const consolidatedParts: Part[] = [];
for (const part of modelResponseParts) {
const lastPart = consolidatedParts[consolidatedParts.length - 1];
// Consolidate text parts for history (merges adjacent text parts).
const consolidatedHistoryParts: Part[] = [];
for (const part of historyParts) {
const lastPart =
consolidatedHistoryParts[consolidatedHistoryParts.length - 1];
if (
lastPart?.text &&
isValidNonThoughtTextPart(lastPart) &&
@@ -554,22 +546,29 @@ export class GeminiChat {
) {
lastPart.text += part.text;
} else {
consolidatedParts.push(part);
consolidatedHistoryParts.push(part);
}
}
const responseText = consolidatedParts
const responseText = consolidatedHistoryParts
.filter((part) => part.text)
.map((part) => part.text)
.join('')
.trim();
// Record model response text from the collected parts
if (responseText) {
this.chatRecordingService.recordMessage({
// Record assistant turn with raw Content and metadata
if (responseText || hasToolCall || usageMetadata) {
this.chatRecordingService?.recordAssistantTurn({
model,
type: 'qwen',
content: responseText,
message: [
...(responseText ? [{ text: responseText }] : []),
...(hasToolCall
? historyParts
.filter((part) => part.functionCall)
.map((part) => ({ functionCall: part.functionCall }))
: []),
],
tokens: usageMetadata,
});
}
@@ -594,39 +593,8 @@ export class GeminiChat {
}
}
this.history.push({ role: 'model', parts: consolidatedParts });
}
/**
* Gets the chat recording service instance.
*/
getChatRecordingService(): ChatRecordingService {
return this.chatRecordingService;
}
/**
* Extracts and records thought from thought content.
*/
private recordThoughtFromContent(content: Content): void {
if (!content.parts || content.parts.length === 0) {
return;
}
const thoughtPart = content.parts[0];
if (thoughtPart.text) {
// Extract subject and description using the same logic as turn.ts
const rawText = thoughtPart.text;
const subjectStringMatches = rawText.match(/\*\*(.*?)\*\*/s);
const subject = subjectStringMatches
? subjectStringMatches[1].trim()
: '';
const description = rawText.replace(/\*\*(.*?)\*\*/s, '').trim();
this.chatRecordingService.recordThought({
subject,
description,
});
}
// Add to history (without thoughts, for API calls)
this.history.push({ role: 'model', parts: consolidatedHistoryParts });
}
}

View File

@@ -62,6 +62,7 @@ describe('executeToolCall', () => {
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getChatRecordingService: () => undefined,
} as unknown as Config;
abortController = new AbortController();

View File

@@ -34,6 +34,7 @@ export async function executeToolCall(
return new Promise<ToolCallResponseInfo>((resolve, reject) => {
new CoreToolScheduler({
config,
chatRecordingService: config.getChatRecordingService(),
outputUpdateHandler: options.outputUpdateHandler,
onAllToolCallsComplete: async (completedToolCalls) => {
if (options.onAllToolCallsComplete) {

View File

@@ -63,6 +63,7 @@ export * from './utils/thoughtUtils.js';
export * from './services/fileDiscoveryService.js';
export * from './services/gitService.js';
export * from './services/chatRecordingService.js';
export * from './services/sessionService.js';
export * from './services/fileSystemService.js';
// Export IDE specific logic
@@ -104,6 +105,7 @@ export * from './tools/mcp-client.js';
export * from './tools/mcp-tool.js';
export * from './tools/task.js';
export * from './tools/todoWrite.js';
export * from './tools/exitPlanMode.js';
// MCP OAuth
export { MCPOAuthProvider } from './mcp/oauth-provider.js';
@@ -121,7 +123,6 @@ export { OAuthUtils } from './mcp/oauth-utils.js';
// Export telemetry functions
export * from './telemetry/index.js';
export { sessionId } from './utils/session.js';
export * from './utils/browser.js';
// OpenAI Logging Utilities
export { OpenAILogger, openaiLogger } from './utils/openaiLogger.js';

View File

@@ -5,27 +5,20 @@
*/
import { randomUUID } from 'node:crypto';
import fs from 'node:fs';
import path from 'node:path';
import {
afterEach,
beforeEach,
describe,
expect,
it,
type MockInstance,
vi,
} from 'vitest';
import { execSync } from 'node:child_process';
import fs from 'node:fs';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type { Config } from '../config/config.js';
import { getProjectHash } from '../utils/paths.js';
import {
ChatRecordingService,
type ConversationRecord,
type ToolCallRecord,
type ChatRecord,
} from './chatRecordingService.js';
import * as jsonl from '../utils/jsonl-utils.js';
import type { Part } from '@google/genai';
vi.mock('node:fs');
vi.mock('node:path');
vi.mock('node:child_process');
vi.mock('node:crypto', () => ({
randomUUID: vi.fn(),
createHash: vi.fn(() => ({
@@ -34,23 +27,28 @@ vi.mock('node:crypto', () => ({
})),
})),
}));
vi.mock('../utils/paths.js');
vi.mock('../utils/jsonl-utils.js');
describe('ChatRecordingService', () => {
let chatRecordingService: ChatRecordingService;
let mockConfig: Config;
let mkdirSyncSpy: MockInstance<typeof fs.mkdirSync>;
let writeFileSyncSpy: MockInstance<typeof fs.writeFileSync>;
let uuidCounter = 0;
beforeEach(() => {
uuidCounter = 0;
mockConfig = {
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
storage: {
getProjectTempDir: vi
.fn()
.mockReturnValue('/test/project/root/.gemini/tmp'),
.mockReturnValue('/test/project/root/.gemini/tmp/hash'),
getProjectDir: vi
.fn()
.mockReturnValue('/test/project/root/.gemini/projects/test-project'),
},
getModel: vi.fn().mockReturnValue('gemini-pro'),
getDebugMode: vi.fn().mockReturnValue(false),
@@ -61,351 +59,270 @@ describe('ChatRecordingService', () => {
isOutputMarkdown: false,
}),
}),
getResumedSessionData: vi.fn().mockReturnValue(undefined),
} as unknown as Config;
vi.mocked(getProjectHash).mockReturnValue('test-project-hash');
vi.mocked(randomUUID).mockReturnValue('this-is-a-test-uuid');
vi.mocked(randomUUID).mockImplementation(
() =>
`00000000-0000-0000-0000-00000000000${++uuidCounter}` as `${string}-${string}-${string}-${string}-${string}`,
);
vi.mocked(path.join).mockImplementation((...args) => args.join('/'));
vi.mocked(path.dirname).mockImplementation((p) => {
const parts = p.split('/');
parts.pop();
return parts.join('/');
});
vi.mocked(execSync).mockReturnValue('main\n');
vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined);
vi.spyOn(fs, 'writeFileSync').mockImplementation(() => undefined);
vi.spyOn(fs, 'existsSync').mockReturnValue(false);
chatRecordingService = new ChatRecordingService(mockConfig);
mkdirSyncSpy = vi
.spyOn(fs, 'mkdirSync')
.mockImplementation(() => undefined);
writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
// Mock jsonl-utils
vi.mocked(jsonl.writeLineSync).mockImplementation(() => undefined);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('initialize', () => {
it('should create a new session if none is provided', () => {
chatRecordingService.initialize();
describe('recordUserMessage', () => {
it('should record a user message immediately', () => {
const userParts: Part[] = [{ text: 'Hello, world!' }];
chatRecordingService.recordUserMessage(userParts);
expect(mkdirSyncSpy).toHaveBeenCalledWith(
'/test/project/root/.gemini/tmp/chats',
{ recursive: true },
);
expect(writeFileSyncSpy).not.toHaveBeenCalled();
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.uuid).toBe('00000000-0000-0000-0000-000000000001');
expect(record.parentUuid).toBeNull();
expect(record.type).toBe('user');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: userParts });
expect(record.sessionId).toBe('test-session-id');
expect(record.cwd).toBe('/test/project/root');
expect(record.version).toBe('1.0.0');
expect(record.gitBranch).toBe('main');
});
it('should resume from an existing session if provided', () => {
const readFileSyncSpy = vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify({
sessionId: 'old-session-id',
projectHash: 'test-project-hash',
messages: [],
}),
);
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
chatRecordingService.initialize({
filePath: '/test/project/root/.gemini/tmp/chats/session.json',
conversation: {
sessionId: 'old-session-id',
} as ConversationRecord,
});
expect(mkdirSyncSpy).not.toHaveBeenCalled();
expect(readFileSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).not.toHaveBeenCalled();
});
});
describe('recordMessage', () => {
beforeEach(() => {
chatRecordingService.initialize();
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify({
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [],
}),
);
});
it('should record a new message', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
chatRecordingService.recordMessage({
type: 'user',
content: 'Hello',
it('should chain messages correctly with parentUuid', () => {
chatRecordingService.recordUserMessage([{ text: 'First message' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ text: 'Response' }],
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(1);
expect(conversation.messages[0].content).toBe('Hello');
expect(conversation.messages[0].type).toBe('user');
chatRecordingService.recordUserMessage([{ text: 'Second message' }]);
const calls = vi.mocked(jsonl.writeLineSync).mock.calls;
const user1 = calls[0][1] as ChatRecord;
const assistant = calls[1][1] as ChatRecord;
const user2 = calls[2][1] as ChatRecord;
expect(user1.uuid).toBe('00000000-0000-0000-0000-000000000001');
expect(user1.parentUuid).toBeNull();
expect(assistant.uuid).toBe('00000000-0000-0000-0000-000000000002');
expect(assistant.parentUuid).toBe('00000000-0000-0000-0000-000000000001');
expect(user2.uuid).toBe('00000000-0000-0000-0000-000000000003');
expect(user2.parentUuid).toBe('00000000-0000-0000-0000-000000000002');
});
});
it('should create separate messages when recording multiple messages', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'user',
content: 'Hello',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessage({
type: 'user',
content: 'World',
describe('recordAssistantTurn', () => {
it('should record assistant turn with content only', () => {
const parts: Part[] = [{ text: 'Hello!' }];
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: parts,
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(2);
expect(conversation.messages[0].content).toBe('Hello');
expect(conversation.messages[1].content).toBe('World');
});
});
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
describe('recordThought', () => {
it('should queue a thought', () => {
chatRecordingService.initialize();
chatRecordingService.recordThought({
subject: 'Thinking',
description: 'Thinking...',
});
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts).toHaveLength(1);
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts[0].subject).toBe('Thinking');
// @ts-expect-error private property
expect(chatRecordingService.queuedThoughts[0].description).toBe(
'Thinking...',
);
});
});
describe('recordMessageTokens', () => {
beforeEach(() => {
chatRecordingService.initialize();
expect(record.type).toBe('assistant');
// The service wraps parts in a Content object using createModelContent
expect(record.message).toEqual({ role: 'model', parts });
expect(record.model).toBe('gemini-pro');
expect(record.usageMetadata).toBeUndefined();
expect(record.toolCallResult).toBeUndefined();
});
it('should update the last message with token info', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: 'Response',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessageTokens({
promptTokenCount: 1,
candidatesTokenCount: 2,
totalTokenCount: 3,
cachedContentTokenCount: 0,
});
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages[0]).toEqual({
...initialConversation.messages[0],
it('should record assistant turn with all data', () => {
const parts: Part[] = [
{ thought: true, text: 'Thinking...' },
{ text: 'Here is the result.' },
{ functionCall: { name: 'read_file', args: { path: '/test.txt' } } },
];
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: parts,
tokens: {
input: 1,
output: 2,
total: 3,
cached: 0,
thoughts: 0,
tool: 0,
promptTokenCount: 100,
candidatesTokenCount: 50,
cachedContentTokenCount: 10,
totalTokenCount: 160,
},
});
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
// The service wraps parts in a Content object using createModelContent
expect(record.message).toEqual({ role: 'model', parts });
expect(record.model).toBe('gemini-pro');
expect(record.usageMetadata?.totalTokenCount).toBe(160);
});
it('should queue token info if the last message already has tokens', () => {
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: 'Response',
timestamp: new Date().toISOString(),
tokens: { input: 1, output: 1, total: 2, cached: 0 },
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
chatRecordingService.recordMessageTokens({
promptTokenCount: 2,
candidatesTokenCount: 2,
totalTokenCount: 4,
cachedContentTokenCount: 0,
});
// @ts-expect-error private property
expect(chatRecordingService.queuedTokens).toEqual({
input: 2,
output: 2,
total: 4,
cached: 0,
thoughts: 0,
tool: 0,
});
});
});
describe('recordToolCalls', () => {
beforeEach(() => {
chatRecordingService.initialize();
});
it('should add new tool calls to the last message', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: '1',
type: 'qwen',
content: '',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
const toolCall: ToolCallRecord = {
id: 'tool-1',
name: 'testTool',
args: {},
status: 'awaiting_approval',
timestamp: new Date().toISOString(),
};
chatRecordingService.recordToolCalls('gemini-pro', [toolCall]);
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages[0]).toEqual({
...initialConversation.messages[0],
toolCalls: [
{
...toolCall,
displayName: 'Test Tool',
description: 'A test tool',
renderOutputAsMarkdown: false,
},
],
});
});
it('should create a new message if the last message is not from gemini', () => {
const writeFileSyncSpy = vi
.spyOn(fs, 'writeFileSync')
.mockImplementation(() => undefined);
const initialConversation = {
sessionId: 'test-session-id',
projectHash: 'test-project-hash',
messages: [
{
id: 'a-uuid',
type: 'user',
content: 'call a tool',
timestamp: new Date().toISOString(),
},
],
};
vi.spyOn(fs, 'readFileSync').mockReturnValue(
JSON.stringify(initialConversation),
);
const toolCall: ToolCallRecord = {
id: 'tool-1',
name: 'testTool',
args: {},
status: 'awaiting_approval',
timestamp: new Date().toISOString(),
};
chatRecordingService.recordToolCalls('gemini-pro', [toolCall]);
expect(mkdirSyncSpy).toHaveBeenCalled();
expect(writeFileSyncSpy).toHaveBeenCalled();
const conversation = JSON.parse(
writeFileSyncSpy.mock.calls[0][1] as string,
) as ConversationRecord;
expect(conversation.messages).toHaveLength(2);
expect(conversation.messages[1]).toEqual({
...conversation.messages[1],
id: 'this-is-a-test-uuid',
it('should record assistant turn with only tokens', () => {
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
type: 'qwen',
thoughts: [],
content: '',
toolCalls: [
{
...toolCall,
displayName: 'Test Tool',
description: 'A test tool',
renderOutputAsMarkdown: false,
},
],
tokens: {
promptTokenCount: 10,
candidatesTokenCount: 20,
cachedContentTokenCount: 0,
totalTokenCount: 30,
},
});
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.message).toBeUndefined();
expect(record.usageMetadata?.totalTokenCount).toBe(30);
});
});
describe('deleteSession', () => {
it('should delete the session file', () => {
const unlinkSyncSpy = vi
.spyOn(fs, 'unlinkSync')
.mockImplementation(() => undefined);
chatRecordingService.deleteSession('test-session-id');
expect(unlinkSyncSpy).toHaveBeenCalledWith(
'/test/project/root/.gemini/tmp/chats/test-session-id.json',
);
describe('recordToolResult', () => {
it('should record tool result with Parts', () => {
// First record a user and assistant message to set up the chain
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ functionCall: { name: 'shell', args: { command: 'ls' } } }],
});
// Now record the tool result (Parts with functionResponse)
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'file1.txt\nfile2.txt' },
},
},
];
chatRecordingService.recordToolResult(toolResultParts);
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(3);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[2][1] as ChatRecord;
expect(record.type).toBe('tool_result');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: toolResultParts });
});
it('should record tool result with toolCallResult metadata', () => {
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'result' },
},
},
];
const metadata = {
callId: 'call-1',
status: 'success',
responseParts: toolResultParts,
resultDisplay: undefined,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
chatRecordingService.recordToolResult(toolResultParts, metadata);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.type).toBe('tool_result');
// The service wraps parts in a Content object using createUserContent
expect(record.message).toEqual({ role: 'user', parts: toolResultParts });
expect(record.toolCallResult).toBeDefined();
expect(record.toolCallResult?.callId).toBe('call-1');
});
it('should chain tool result correctly with parentUuid', () => {
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordAssistantTurn({
model: 'gemini-pro',
message: [{ text: 'Using tool' }],
});
const toolResultParts: Part[] = [
{
functionResponse: {
id: 'call-1',
name: 'shell',
response: { output: 'done' },
},
},
];
chatRecordingService.recordToolResult(toolResultParts);
const userRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
const assistantRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[1][1] as ChatRecord;
const toolResultRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[2][1] as ChatRecord;
expect(userRecord.parentUuid).toBeNull();
expect(assistantRecord.parentUuid).toBe(userRecord.uuid);
expect(toolResultRecord.parentUuid).toBe(assistantRecord.uuid);
});
});
describe('recordSlashCommand', () => {
it('should record slash command with payload and subtype', () => {
chatRecordingService.recordSlashCommand({
phase: 'invocation',
rawCommand: '/about',
});
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(1);
const record = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
expect(record.type).toBe('system');
expect(record.subtype).toBe('slash_command');
expect(record.systemPayload).toMatchObject({
phase: 'invocation',
rawCommand: '/about',
});
});
it('should chain slash command after prior records', () => {
chatRecordingService.recordUserMessage([{ text: 'Hello' }]);
chatRecordingService.recordSlashCommand({
phase: 'result',
rawCommand: '/about',
});
const userRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[0][1] as ChatRecord;
const slashRecord = vi.mocked(jsonl.writeLineSync).mock
.calls[1][1] as ChatRecord;
expect(userRecord.parentUuid).toBeNull();
expect(slashRecord.parentUuid).toBe(userRecord.uuid);
});
});
// Note: Session management tests (listSessions, loadSession, deleteSession, etc.)
// have been moved to sessionService.test.ts
// Session resume integration tests should test via SessionService mock
});

View File

@@ -5,96 +5,127 @@
*/
import { type Config } from '../config/config.js';
import { type Status } from '../core/coreToolScheduler.js';
import { type ThoughtSummary } from '../utils/thoughtUtils.js';
import { getProjectHash } from '../utils/paths.js';
import path from 'node:path';
import fs from 'node:fs';
import { randomUUID } from 'node:crypto';
import type {
PartListUnion,
GenerateContentResponseUsageMetadata,
import {
type PartListUnion,
type Content,
type GenerateContentResponseUsageMetadata,
createUserContent,
createModelContent,
} from '@google/genai';
import * as jsonl from '../utils/jsonl-utils.js';
import { getGitBranch } from '../utils/gitUtils.js';
import type {
ChatCompressionInfo,
ToolCallResponseInfo,
} from '../core/turn.js';
import type { Status } from '../core/coreToolScheduler.js';
import type { TaskResultDisplay } from '../tools/tools.js';
import type { UiEvent } from '../telemetry/uiTelemetry.js';
/**
* Token usage summary for a message or conversation.
* A single record stored in the JSONL file.
* Forms a tree structure via uuid/parentUuid for future checkpointing support.
*
* Each record is self-contained with full metadata, enabling:
* - Append-only writes (crash-safe)
* - Tree reconstruction by following parentUuid chain
* - Future checkpointing by branching from any historical record
*/
export interface TokensSummary {
input: number; // promptTokenCount
output: number; // candidatesTokenCount
cached: number; // cachedContentTokenCount
thoughts?: number; // thoughtsTokenCount
tool?: number; // toolUsePromptTokenCount
total: number; // totalTokenCount
}
/**
* Base fields common to all messages.
*/
export interface BaseMessageRecord {
id: string;
timestamp: string;
content: PartListUnion;
}
/**
* Record of a tool call execution within a conversation.
*/
export interface ToolCallRecord {
id: string;
name: string;
args: Record<string, unknown>;
result?: PartListUnion | null;
status: Status;
timestamp: string;
// UI-specific fields for display purposes
displayName?: string;
description?: string;
resultDisplay?: string;
renderOutputAsMarkdown?: boolean;
}
/**
* Message type and message type-specific fields.
*/
export type ConversationRecordExtra =
| {
type: 'user';
}
| {
type: 'qwen';
toolCalls?: ToolCallRecord[];
thoughts?: Array<ThoughtSummary & { timestamp: string }>;
tokens?: TokensSummary | null;
model?: string;
};
/**
* A single message record in a conversation.
*/
export type MessageRecord = BaseMessageRecord & ConversationRecordExtra;
/**
* Complete conversation record stored in session files.
*/
export interface ConversationRecord {
export interface ChatRecord {
/** Unique identifier for this logical message */
uuid: string;
/** UUID of the parent message; null for root (first message in session) */
parentUuid: string | null;
/** Session identifier - groups records into a logical conversation */
sessionId: string;
projectHash: string;
startTime: string;
lastUpdated: string;
messages: MessageRecord[];
/** ISO 8601 timestamp of when the record was created */
timestamp: string;
/**
* Message type: user input, assistant response, tool result, or system event.
* System records are append-only events that can alter how history is reconstructed
* (e.g., chat compression checkpoints) while keeping the original UI history intact.
*/
type: 'user' | 'assistant' | 'tool_result' | 'system';
/** Optional system subtype for distinguishing system behaviors */
subtype?: 'chat_compression' | 'slash_command' | 'ui_telemetry';
/** Working directory at time of message */
cwd: string;
/** CLI version for compatibility tracking */
version: string;
/** Current git branch, if available */
gitBranch?: string;
// Content field - raw API format for history reconstruction
/**
* The actual Content object (role + parts) sent to/from LLM.
* This is stored in the exact format needed for API calls, enabling
* direct aggregation into Content[] for session resumption.
* Contains: text, functionCall, functionResponse, thought parts, etc.
*/
message?: Content;
// Metadata fields (not part of API Content)
/** Token usage statistics */
usageMetadata?: GenerateContentResponseUsageMetadata;
/** Model used for this response */
model?: string;
/**
* Tool call metadata for UI recovery.
* Contains enriched info (displayName, status, result, etc.) not in API format.
*/
toolCallResult?: Partial<ToolCallResponseInfo>;
/**
* Payload for system records. For chat compression, this stores all data needed
* to reconstruct the compressed history without mutating the original UI list.
*/
systemPayload?:
| ChatCompressionRecordPayload
| SlashCommandRecordPayload
| UiTelemetryRecordPayload;
}
/**
* Data structure for resuming an existing session.
* Stored payload for chat compression checkpoints. This allows us to rebuild the
* effective chat history on resume while keeping the original UI-visible history.
*/
export interface ResumedSessionData {
conversation: ConversationRecord;
filePath: string;
export interface ChatCompressionRecordPayload {
/** Compression metrics/status returned by the compression service */
info: ChatCompressionInfo;
/**
* Snapshot of the new history contents that the model should see after
* compression (summary turns + retained tail). Stored as Content[] for
* resume reconstruction.
*/
compressedHistory: Content[];
}
export interface SlashCommandRecordPayload {
/** Whether this record represents the invocation or the resulting output. */
phase: 'invocation' | 'result';
/** Raw user-entered slash command (e.g., "/about"). */
rawCommand: string;
/**
* History items the UI displayed for this command, in the same shape used by
* the CLI (without IDs). Stored as plain objects for replay on resume.
*/
outputHistoryItems?: Array<Record<string, unknown>>;
}
/**
* Service for automatically recording chat conversations to disk.
* Stored payload for UI telemetry replay.
*/
export interface UiTelemetryRecordPayload {
uiEvent: UiEvent;
}
/**
* Service for recording the current chat session to disk.
*
* This service provides comprehensive conversation recording that captures:
* - All user and assistant messages
@@ -102,346 +133,276 @@ export interface ResumedSessionData {
* - Token usage statistics
* - Assistant thoughts and reasoning
*
* Sessions are stored as JSON files in ~/.qwen/tmp/<project_hash>/chats/
* **API Design:**
* - `recordUserMessage()` - Records a user message (immediate write)
* - `recordAssistantTurn()` - Records an assistant turn with all data (immediate write)
* - `recordToolResult()` - Records tool results (immediate write)
*
* **Storage Format:** JSONL files with tree-structured records.
* Each record has uuid/parentUuid fields enabling:
* - Append-only writes (never rewrite the file)
* - Linear history reconstruction
* - Future checkpointing (branch from any historical point)
*
* File location: ~/.qwen/tmp/<project_id>/chats/
*
* For session management (list, load, remove), use SessionService.
*/
export class ChatRecordingService {
private conversationFile: string | null = null;
private cachedLastConvData: string | null = null;
private sessionId: string;
private projectHash: string;
private queuedThoughts: Array<ThoughtSummary & { timestamp: string }> = [];
private queuedTokens: TokensSummary | null = null;
private config: Config;
/** UUID of the last written record in the chain */
private lastRecordUuid: string | null = null;
private readonly config: Config;
constructor(config: Config) {
this.config = config;
this.sessionId = config.getSessionId();
this.projectHash = getProjectHash(config.getProjectRoot());
this.lastRecordUuid =
config.getResumedSessionData()?.lastCompletedUuid ?? null;
}
/**
* Initializes the chat recording service: creates a new conversation file and associates it with
* this service instance, or resumes from an existing session if resumedSessionData is provided.
* Returns the session ID.
* @returns The session ID.
*/
initialize(resumedSessionData?: ResumedSessionData): void {
private getSessionId(): string {
return this.config.getSessionId();
}
/**
* Ensures the chats directory exists, creating it if it doesn't exist.
* @returns The path to the chats directory.
* @throws Error if the directory cannot be created.
*/
private ensureChatsDir(): string {
const projectDir = this.config.storage.getProjectDir();
const chatsDir = path.join(projectDir, 'chats');
try {
if (resumedSessionData) {
// Resume from existing session
this.conversationFile = resumedSessionData.filePath;
this.sessionId = resumedSessionData.conversation.sessionId;
// Update the session ID in the existing file
this.updateConversation((conversation) => {
conversation.sessionId = this.sessionId;
});
// Clear any cached data to force fresh reads
this.cachedLastConvData = null;
} else {
// Create new session
const chatsDir = path.join(
this.config.storage.getProjectTempDir(),
'chats',
);
fs.mkdirSync(chatsDir, { recursive: true });
const timestamp = new Date()
.toISOString()
.slice(0, 16)
.replace(/:/g, '-');
const filename = `session-${timestamp}-${this.sessionId.slice(
0,
8,
)}.json`;
this.conversationFile = path.join(chatsDir, filename);
this.writeConversation({
sessionId: this.sessionId,
projectHash: this.projectHash,
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [],
});
}
// Clear any queued data since this is a fresh start
this.queuedThoughts = [];
this.queuedTokens = null;
} catch (error) {
console.error('Error initializing chat recording service:', error);
throw error;
fs.mkdirSync(chatsDir, { recursive: true });
} catch {
// Ignore errors - directory will be created if it doesn't exist
}
return chatsDir;
}
private getLastMessage(
conversation: ConversationRecord,
): MessageRecord | undefined {
return conversation.messages.at(-1);
/**
* Ensures the conversation file exists, creating it if it doesn't exist.
* Uses atomic file creation to avoid race conditions.
* @returns The path to the conversation file.
* @throws Error if the file cannot be created or accessed.
*/
private ensureConversationFile(): string {
const chatsDir = this.ensureChatsDir();
const sessionId = this.getSessionId();
const safeFilename = `${sessionId}.jsonl`;
const conversationFile = path.join(chatsDir, safeFilename);
if (fs.existsSync(conversationFile)) {
return conversationFile;
}
try {
// Use 'wx' flag for exclusive creation - atomic operation that fails if file exists
// This avoids the TOCTOU race condition of existsSync + writeFileSync
fs.writeFileSync(conversationFile, '', { flag: 'wx', encoding: 'utf8' });
} catch (error) {
const nodeError = error as NodeJS.ErrnoException;
// EEXIST means file already exists, which is expected and fine
if (nodeError.code !== 'EEXIST') {
const message = error instanceof Error ? error.message : String(error);
throw new Error(
`Failed to create conversation file at ${conversationFile}: ${message}`,
);
}
}
return conversationFile;
}
private newMessage(
type: ConversationRecordExtra['type'],
content: PartListUnion,
): MessageRecord {
/**
* Creates base fields for a ChatRecord.
*/
private createBaseRecord(
type: ChatRecord['type'],
): Omit<ChatRecord, 'message' | 'tokens' | 'model' | 'toolCallsMetadata'> {
return {
id: randomUUID(),
uuid: randomUUID(),
parentUuid: this.lastRecordUuid,
sessionId: this.getSessionId(),
timestamp: new Date().toISOString(),
type,
content,
cwd: this.config.getProjectRoot(),
version: this.config.getCliVersion() || 'unknown',
gitBranch: getGitBranch(this.config.getProjectRoot()),
};
}
/**
* Records a message in the conversation.
* Appends a record to the session file and updates lastRecordUuid.
*/
recordMessage(message: {
private appendRecord(record: ChatRecord): void {
try {
const conversationFile = this.ensureConversationFile();
jsonl.writeLineSync(conversationFile, record);
this.lastRecordUuid = record.uuid;
} catch (error) {
console.error('Error appending record:', error);
throw error;
}
}
/**
* Records a user message.
* Writes immediately to disk.
*
* @param message The raw PartListUnion object as used with the API
*/
recordUserMessage(message: PartListUnion): void {
try {
const record: ChatRecord = {
...this.createBaseRecord('user'),
message: createUserContent(message),
};
this.appendRecord(record);
} catch (error) {
console.error('Error saving user message:', error);
}
}
/**
* Records an assistant turn with all available data.
* Writes immediately to disk.
*
* @param data.message The raw PartListUnion object from the model response
* @param data.model The model name
* @param data.tokens Token usage statistics
* @param data.toolCallsMetadata Enriched tool call info for UI recovery
*/
recordAssistantTurn(data: {
model: string;
type: ConversationRecordExtra['type'];
content: PartListUnion;
message?: PartListUnion;
tokens?: GenerateContentResponseUsageMetadata;
}): void {
if (!this.conversationFile) return;
try {
this.updateConversation((conversation) => {
const msg = this.newMessage(message.type, message.content);
if (msg.type === 'qwen') {
// If it's a new Gemini message then incorporate any queued thoughts.
conversation.messages.push({
...msg,
thoughts: this.queuedThoughts,
tokens: this.queuedTokens,
model: message.model,
});
this.queuedThoughts = [];
this.queuedTokens = null;
} else {
// Or else just add it.
conversation.messages.push(msg);
}
});
const record: ChatRecord = {
...this.createBaseRecord('assistant'),
model: data.model,
};
if (data.message !== undefined) {
record.message = createModelContent(data.message);
}
if (data.tokens) {
record.usageMetadata = data.tokens;
}
this.appendRecord(record);
} catch (error) {
console.error('Error saving message:', error);
throw error;
console.error('Error saving assistant turn:', error);
}
}
/**
* Records a thought from the assistant's reasoning process.
* Records tool results (function responses) sent back to the model.
* Writes immediately to disk.
*
* @param message The raw PartListUnion object with functionResponse parts
* @param toolCallResult Optional tool call result info for UI recovery
*/
recordThought(thought: ThoughtSummary): void {
if (!this.conversationFile) return;
try {
this.queuedThoughts.push({
...thought,
timestamp: new Date().toISOString(),
});
} catch (error) {
console.error('Error saving thought:', error);
throw error;
}
}
/**
* Updates the tokens for the last message in the conversation (which should be by Gemini).
*/
recordMessageTokens(
respUsageMetadata: GenerateContentResponseUsageMetadata,
recordToolResult(
message: PartListUnion,
toolCallResult?: Partial<ToolCallResponseInfo> & { status: Status },
): void {
if (!this.conversationFile) return;
try {
const tokens = {
input: respUsageMetadata.promptTokenCount ?? 0,
output: respUsageMetadata.candidatesTokenCount ?? 0,
cached: respUsageMetadata.cachedContentTokenCount ?? 0,
thoughts: respUsageMetadata.thoughtsTokenCount ?? 0,
tool: respUsageMetadata.toolUsePromptTokenCount ?? 0,
total: respUsageMetadata.totalTokenCount ?? 0,
const record: ChatRecord = {
...this.createBaseRecord('tool_result'),
message: createUserContent(message),
};
this.updateConversation((conversation) => {
const lastMsg = this.getLastMessage(conversation);
// If the last message already has token info, it's because this new token info is for a
// new message that hasn't been recorded yet.
if (lastMsg && lastMsg.type === 'qwen' && !lastMsg.tokens) {
lastMsg.tokens = tokens;
this.queuedTokens = null;
} else {
this.queuedTokens = tokens;
}
});
} catch (error) {
console.error('Error updating message tokens:', error);
throw error;
}
}
/**
* Adds tool calls to the last message in the conversation (which should be by Gemini).
* This method enriches tool calls with metadata from the ToolRegistry.
*/
recordToolCalls(model: string, toolCalls: ToolCallRecord[]): void {
if (!this.conversationFile) return;
// Enrich tool calls with metadata from the ToolRegistry
const toolRegistry = this.config.getToolRegistry();
const enrichedToolCalls = toolCalls.map((toolCall) => {
const toolInstance = toolRegistry.getTool(toolCall.name);
return {
...toolCall,
displayName: toolInstance?.displayName || toolCall.name,
description: toolInstance?.description || '',
renderOutputAsMarkdown: toolInstance?.isOutputMarkdown || false,
};
});
try {
this.updateConversation((conversation) => {
const lastMsg = this.getLastMessage(conversation);
// If a tool call was made, but the last message isn't from Gemini, it's because Gemini is
// calling tools without starting the message with text. So the user submits a prompt, and
// Gemini immediately calls a tool (maybe with some thinking first). In that case, create
// a new empty Gemini message.
// Also if there are any queued thoughts, it means this tool call(s) is from a new Gemini
// message--because it's thought some more since we last, if ever, created a new Gemini
// message from tool calls, when we dequeued the thoughts.
if (toolCallResult) {
// special case for task executions - we don't want to record the tool calls
if (
!lastMsg ||
lastMsg.type !== 'qwen' ||
this.queuedThoughts.length > 0
typeof toolCallResult.resultDisplay === 'object' &&
toolCallResult.resultDisplay !== null &&
'type' in toolCallResult.resultDisplay &&
toolCallResult.resultDisplay.type === 'task_execution'
) {
const newMsg: MessageRecord = {
...this.newMessage('qwen' as const, ''),
// This isn't strictly necessary, but TypeScript apparently can't
// tell that the first parameter to newMessage() becomes the
// resulting message's type, and so it thinks that toolCalls may
// not be present. Confirming the type here satisfies it.
type: 'qwen' as const,
toolCalls: enrichedToolCalls,
thoughts: this.queuedThoughts,
model,
const taskResult = toolCallResult.resultDisplay as TaskResultDisplay;
record.toolCallResult = {
...toolCallResult,
resultDisplay: {
...taskResult,
toolCalls: [],
},
};
// If there are any queued thoughts join them to this message.
if (this.queuedThoughts.length > 0) {
newMsg.thoughts = this.queuedThoughts;
this.queuedThoughts = [];
}
// If there's any queued tokens info join it to this message.
if (this.queuedTokens) {
newMsg.tokens = this.queuedTokens;
this.queuedTokens = null;
}
conversation.messages.push(newMsg);
} else {
// The last message is an existing Gemini message that we need to update.
// Update any existing tool call entries.
if (!lastMsg.toolCalls) {
lastMsg.toolCalls = [];
}
lastMsg.toolCalls = lastMsg.toolCalls.map((toolCall) => {
// If there are multiple tool calls with the same ID, this will take the first one.
const incomingToolCall = toolCalls.find(
(tc) => tc.id === toolCall.id,
);
if (incomingToolCall) {
// Merge in the new data to keep preserve thoughts, etc., that were assigned to older
// versions of the tool call.
return { ...toolCall, ...incomingToolCall };
} else {
return toolCall;
}
});
// Add any new tools calls that aren't in the message yet.
for (const toolCall of enrichedToolCalls) {
const existingToolCall = lastMsg.toolCalls.find(
(tc) => tc.id === toolCall.id,
);
if (!existingToolCall) {
lastMsg.toolCalls.push(toolCall);
}
}
record.toolCallResult = toolCallResult;
}
});
}
this.appendRecord(record);
} catch (error) {
console.error('Error adding tool call to message:', error);
throw error;
console.error('Error saving tool result:', error);
}
}
/**
* Loads up the conversation record from disk.
* Records a slash command invocation as a system record. This keeps the model
* history clean while allowing resume to replay UI output for commands like
* /about.
*/
private readConversation(): ConversationRecord {
recordSlashCommand(payload: SlashCommandRecordPayload): void {
try {
this.cachedLastConvData = fs.readFileSync(this.conversationFile!, 'utf8');
return JSON.parse(this.cachedLastConvData);
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error('Error reading conversation file:', error);
throw error;
}
// Placeholder empty conversation if file doesn't exist.
return {
sessionId: this.sessionId,
projectHash: this.projectHash,
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [],
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'slash_command',
systemPayload: payload,
};
this.appendRecord(record);
} catch (error) {
console.error('Error saving slash command record:', error);
}
}
/**
* Saves the conversation record; overwrites the file.
* Records a chat compression checkpoint as a system record. This keeps the UI
* history immutable while allowing resume/continue flows to reconstruct the
* compressed model-facing history from the stored snapshot.
*/
private writeConversation(conversation: ConversationRecord): void {
recordChatCompression(payload: ChatCompressionRecordPayload): void {
try {
if (!this.conversationFile) return;
// Don't write the file yet until there's at least one message.
if (conversation.messages.length === 0) return;
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'chat_compression',
systemPayload: payload,
};
// Only write the file if this change would change the file.
if (this.cachedLastConvData !== JSON.stringify(conversation, null, 2)) {
conversation.lastUpdated = new Date().toISOString();
const newContent = JSON.stringify(conversation, null, 2);
this.cachedLastConvData = newContent;
fs.writeFileSync(this.conversationFile, newContent);
}
this.appendRecord(record);
} catch (error) {
console.error('Error writing conversation file:', error);
throw error;
console.error('Error saving chat compression record:', error);
}
}
/**
* Convenient helper for updating the conversation without file reading and writing and time
* updating boilerplate.
* Records a UI telemetry event for replaying metrics on resume.
*/
private updateConversation(
updateFn: (conversation: ConversationRecord) => void,
) {
const conversation = this.readConversation();
updateFn(conversation);
this.writeConversation(conversation);
}
/**
* Deletes a session file by session ID.
*/
deleteSession(sessionId: string): void {
recordUiTelemetryEvent(uiEvent: UiEvent): void {
try {
const chatsDir = path.join(
this.config.storage.getProjectTempDir(),
'chats',
);
const sessionPath = path.join(chatsDir, `${sessionId}.json`);
fs.unlinkSync(sessionPath);
const record: ChatRecord = {
...this.createBaseRecord('system'),
type: 'system',
subtype: 'ui_telemetry',
systemPayload: { uiEvent },
};
this.appendRecord(record);
} catch (error) {
console.error('Error deleting session:', error);
throw error;
console.error('Error saving ui telemetry record:', error);
}
}
}

View File

@@ -0,0 +1,721 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'node:fs';
import path from 'node:path';
import {
afterEach,
beforeEach,
describe,
expect,
it,
type MockInstance,
vi,
} from 'vitest';
import { getProjectHash } from '../utils/paths.js';
import {
SessionService,
buildApiHistoryFromConversation,
type ConversationRecord,
} from './sessionService.js';
import { CompressionStatus } from '../core/turn.js';
import type { ChatRecord } from './chatRecordingService.js';
import * as jsonl from '../utils/jsonl-utils.js';
vi.mock('node:path');
vi.mock('../utils/paths.js');
vi.mock('../utils/jsonl-utils.js');
describe('SessionService', () => {
let sessionService: SessionService;
let readdirSyncSpy: MockInstance<typeof fs.readdirSync>;
let statSyncSpy: MockInstance<typeof fs.statSync>;
let unlinkSyncSpy: MockInstance<typeof fs.unlinkSync>;
beforeEach(() => {
vi.mocked(getProjectHash).mockReturnValue('test-project-hash');
vi.mocked(path.join).mockImplementation((...args) => args.join('/'));
vi.mocked(path.dirname).mockImplementation((p) => {
const parts = p.split('/');
parts.pop();
return parts.join('/');
});
sessionService = new SessionService('/test/project/root');
readdirSyncSpy = vi.spyOn(fs, 'readdirSync').mockReturnValue([]);
statSyncSpy = vi.spyOn(fs, 'statSync').mockImplementation(
() =>
({
mtimeMs: Date.now(),
isFile: () => true,
}) as fs.Stats,
);
unlinkSyncSpy = vi
.spyOn(fs, 'unlinkSync')
.mockImplementation(() => undefined);
// Mock jsonl-utils
vi.mocked(jsonl.read).mockResolvedValue([]);
vi.mocked(jsonl.readLines).mockResolvedValue([]);
});
afterEach(() => {
vi.restoreAllMocks();
});
// Test session IDs (UUID-like format)
const sessionIdA = '550e8400-e29b-41d4-a716-446655440000';
const sessionIdB = '6ba7b810-9dad-11d1-80b4-00c04fd430c8';
const sessionIdC = '6ba7b811-9dad-11d1-80b4-00c04fd430c8';
// Test records
const recordA1: ChatRecord = {
uuid: 'a1',
parentUuid: null,
sessionId: sessionIdA,
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'hello session a' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
};
const recordB1: ChatRecord = {
uuid: 'b1',
parentUuid: null,
sessionId: sessionIdB,
timestamp: '2024-01-02T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'hi session b' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'feature',
};
const recordB2: ChatRecord = {
uuid: 'b2',
parentUuid: 'b1',
sessionId: sessionIdB,
timestamp: '2024-01-02T02:00:00Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'hey back' }] },
cwd: '/test/project/root',
version: '1.0.0',
};
describe('listSessions', () => {
it('should return empty list when no sessions exist', async () => {
readdirSyncSpy.mockReturnValue([]);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
expect(result.hasMore).toBe(false);
expect(result.nextCursor).toBeUndefined();
});
it('should return empty list when chats directory does not exist', async () => {
const error = new Error('ENOENT') as NodeJS.ErrnoException;
error.code = 'ENOENT';
readdirSyncSpy.mockImplementation(() => {
throw error;
});
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
expect(result.hasMore).toBe(false);
});
it('should list sessions sorted by mtime descending', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
return {
mtimeMs: path.includes(sessionIdB) ? now : now - 10000,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdA)) {
return [recordA1];
}
return [recordB1];
},
);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(2);
// sessionIdB should be first (more recent mtime)
expect(result.items[0].sessionId).toBe(sessionIdB);
expect(result.items[1].sessionId).toBe(sessionIdA);
});
it('should extract prompt text from first record', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.listSessions();
expect(result.items[0].prompt).toBe('hello session a');
expect(result.items[0].gitBranch).toBe('main');
});
it('should truncate long prompts', async () => {
const longPrompt = 'A'.repeat(300);
const recordWithLongPrompt: ChatRecord = {
...recordA1,
message: { role: 'user', parts: [{ text: longPrompt }] },
};
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordWithLongPrompt]);
const result = await sessionService.listSessions();
expect(result.items[0].prompt.length).toBe(203); // 200 + '...'
expect(result.items[0].prompt.endsWith('...')).toBe(true);
});
it('should paginate with size parameter', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
`${sessionIdC}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
let mtime = now;
if (path.includes(sessionIdB)) mtime = now - 1000;
if (path.includes(sessionIdA)) mtime = now - 2000;
return {
mtimeMs: mtime,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdC)) {
return [{ ...recordA1, sessionId: sessionIdC }];
}
if (filePath.includes(sessionIdB)) {
return [recordB1];
}
return [recordA1];
},
);
const result = await sessionService.listSessions({ size: 2 });
expect(result.items).toHaveLength(2);
expect(result.items[0].sessionId).toBe(sessionIdC); // newest
expect(result.items[1].sessionId).toBe(sessionIdB);
expect(result.hasMore).toBe(true);
expect(result.nextCursor).toBeDefined();
});
it('should paginate with cursor parameter', async () => {
const now = Date.now();
const oldMtime = now - 2000;
const cursorMtime = now - 1000;
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
`${sessionIdC}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
let mtime = now;
if (path.includes(sessionIdB)) mtime = cursorMtime;
if (path.includes(sessionIdA)) mtime = oldMtime;
return {
mtimeMs: mtime,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
// Get items older than cursor (cursorMtime)
const result = await sessionService.listSessions({ cursor: cursorMtime });
expect(result.items).toHaveLength(1);
expect(result.items[0].sessionId).toBe(sessionIdA);
expect(result.hasMore).toBe(false);
});
it('should skip files from different projects', async () => {
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
// This record is from a different cwd (different project)
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const result = await sessionService.listSessions();
expect(result.items).toHaveLength(0);
});
it('should skip files that do not match session file pattern', async () => {
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`, // valid
'not-a-uuid.jsonl', // invalid pattern
'readme.txt', // not jsonl
'.hidden.jsonl', // hidden file
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.listSessions();
// Only the valid UUID pattern file should be processed
expect(result.items).toHaveLength(1);
expect(result.items[0].sessionId).toBe(sessionIdA);
});
});
describe('loadSession', () => {
it('should load a session by id and reconstruct history', async () => {
const now = Date.now();
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue([recordB1, recordB2]);
const loaded = await sessionService.loadSession(sessionIdB);
expect(loaded?.conversation.sessionId).toBe(sessionIdB);
expect(loaded?.conversation.messages).toHaveLength(2);
expect(loaded?.conversation.messages[0].uuid).toBe('b1');
expect(loaded?.conversation.messages[1].uuid).toBe('b2');
expect(loaded?.lastCompletedUuid).toBe('b2');
});
it('should return undefined when session file is empty', async () => {
vi.mocked(jsonl.read).mockResolvedValue([]);
const loaded = await sessionService.loadSession('nonexistent');
expect(loaded).toBeUndefined();
});
it('should return undefined when session belongs to different project', async () => {
const now = Date.now();
statSyncSpy.mockReturnValue({
mtimeMs: now,
isFile: () => true,
} as fs.Stats);
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.read).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const loaded = await sessionService.loadSession(sessionIdA);
expect(loaded).toBeUndefined();
});
it('should reconstruct tree-structured history correctly', async () => {
const records: ChatRecord[] = [
{
uuid: 'r1',
parentUuid: null,
sessionId: 'test',
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'First' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'r2',
parentUuid: 'r1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:00Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'Second' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'r3',
parentUuid: 'r2',
sessionId: 'test',
timestamp: '2024-01-01T00:02:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'Third' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
];
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue(records);
const loaded = await sessionService.loadSession('test');
expect(loaded?.conversation.messages).toHaveLength(3);
expect(loaded?.conversation.messages.map((m) => m.uuid)).toEqual([
'r1',
'r2',
'r3',
]);
});
it('should aggregate multiple records with same uuid', async () => {
const records: ChatRecord[] = [
{
uuid: 'u1',
parentUuid: null,
sessionId: 'test',
timestamp: '2024-01-01T00:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'Hello' }] },
cwd: '/test/project/root',
version: '1.0.0',
},
// Multiple records for same assistant message
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:00Z',
type: 'assistant',
message: {
role: 'model',
parts: [{ thought: true, text: 'Thinking...' }],
},
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:01Z',
type: 'assistant',
usageMetadata: {
promptTokenCount: 10,
candidatesTokenCount: 20,
cachedContentTokenCount: 0,
totalTokenCount: 30,
},
cwd: '/test/project/root',
version: '1.0.0',
},
{
uuid: 'a1',
parentUuid: 'u1',
sessionId: 'test',
timestamp: '2024-01-01T00:01:02Z',
type: 'assistant',
message: { role: 'model', parts: [{ text: 'Response' }] },
model: 'gemini-pro',
cwd: '/test/project/root',
version: '1.0.0',
},
];
statSyncSpy.mockReturnValue({
mtimeMs: Date.now(),
isFile: () => true,
} as fs.Stats);
vi.mocked(jsonl.read).mockResolvedValue(records);
const loaded = await sessionService.loadSession('test');
expect(loaded?.conversation.messages).toHaveLength(2);
const assistantMsg = loaded?.conversation.messages[1];
expect(assistantMsg?.uuid).toBe('a1');
expect(assistantMsg?.message?.parts).toHaveLength(2);
expect(assistantMsg?.usageMetadata?.totalTokenCount).toBe(30);
expect(assistantMsg?.model).toBe('gemini-pro');
});
});
describe('removeSession', () => {
it('should remove session file', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const result = await sessionService.removeSession(sessionIdA);
expect(result).toBe(true);
expect(unlinkSyncSpy).toHaveBeenCalled();
});
it('should return false when session does not exist', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([]);
const result = await sessionService.removeSession(
'00000000-0000-0000-0000-000000000000',
);
expect(result).toBe(false);
expect(unlinkSyncSpy).not.toHaveBeenCalled();
});
it('should return false for session from different project', async () => {
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const result = await sessionService.removeSession(sessionIdA);
expect(result).toBe(false);
expect(unlinkSyncSpy).not.toHaveBeenCalled();
});
it('should handle file not found error', async () => {
const error = new Error('ENOENT') as NodeJS.ErrnoException;
error.code = 'ENOENT';
vi.mocked(jsonl.readLines).mockRejectedValue(error);
const result = await sessionService.removeSession(
'00000000-0000-0000-0000-000000000000',
);
expect(result).toBe(false);
});
});
describe('loadLastSession', () => {
it('should return the most recent session (same as getLatestSession)', async () => {
const now = Date.now();
readdirSyncSpy.mockReturnValue([
`${sessionIdA}.jsonl`,
`${sessionIdB}.jsonl`,
] as unknown as Array<fs.Dirent<Buffer>>);
statSyncSpy.mockImplementation((filePath: fs.PathLike) => {
const path = filePath.toString();
return {
mtimeMs: path.includes(sessionIdB) ? now : now - 10000,
isFile: () => true,
} as fs.Stats;
});
vi.mocked(jsonl.readLines).mockImplementation(
async (filePath: string) => {
if (filePath.includes(sessionIdB)) {
return [recordB1];
}
return [recordA1];
},
);
vi.mocked(jsonl.read).mockResolvedValue([recordB1, recordB2]);
const latest = await sessionService.loadLastSession();
expect(latest?.conversation.sessionId).toBe(sessionIdB);
});
it('should return undefined when no sessions exist', async () => {
readdirSyncSpy.mockReturnValue([]);
const latest = await sessionService.loadLastSession();
expect(latest).toBeUndefined();
});
});
describe('sessionExists', () => {
it('should return true for existing session', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([recordA1]);
const exists = await sessionService.sessionExists(sessionIdA);
expect(exists).toBe(true);
});
it('should return false for non-existing session', async () => {
vi.mocked(jsonl.readLines).mockResolvedValue([]);
const exists = await sessionService.sessionExists(
'00000000-0000-0000-0000-000000000000',
);
expect(exists).toBe(false);
});
it('should return false for session from different project', async () => {
const differentProjectRecord: ChatRecord = {
...recordA1,
cwd: '/different/project',
};
vi.mocked(jsonl.readLines).mockResolvedValue([differentProjectRecord]);
vi.mocked(getProjectHash).mockImplementation((cwd: string) =>
cwd === '/test/project/root'
? 'test-project-hash'
: 'other-project-hash',
);
const exists = await sessionService.sessionExists(sessionIdA);
expect(exists).toBe(false);
});
});
describe('buildApiHistoryFromConversation', () => {
it('should return linear messages when no compression checkpoint exists', () => {
const assistantA1: ChatRecord = {
...recordB2,
sessionId: sessionIdA,
parentUuid: recordA1.uuid,
};
const conversation: ConversationRecord = {
sessionId: sessionIdA,
projectHash: 'test-project-hash',
startTime: '2024-01-01T00:00:00Z',
lastUpdated: '2024-01-01T00:00:00Z',
messages: [recordA1, assistantA1],
};
const history = buildApiHistoryFromConversation(conversation);
expect(history).toEqual([recordA1.message, assistantA1.message]);
});
it('should use compressedHistory snapshot and append subsequent records after compression', () => {
const compressionRecord: ChatRecord = {
uuid: 'c1',
parentUuid: 'b2',
sessionId: sessionIdA,
timestamp: '2024-01-02T03:00:00Z',
type: 'system',
subtype: 'chat_compression',
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
systemPayload: {
info: {
originalTokenCount: 100,
newTokenCount: 50,
compressionStatus: CompressionStatus.COMPRESSED,
},
compressedHistory: [
{ role: 'user', parts: [{ text: 'summary' }] },
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
recordB2.message!,
],
},
};
const postCompressionRecord: ChatRecord = {
uuid: 'c2',
parentUuid: 'c1',
sessionId: sessionIdA,
timestamp: '2024-01-02T04:00:00Z',
type: 'user',
message: { role: 'user', parts: [{ text: 'new question' }] },
cwd: '/test/project/root',
version: '1.0.0',
gitBranch: 'main',
};
const conversation: ConversationRecord = {
sessionId: sessionIdA,
projectHash: 'test-project-hash',
startTime: '2024-01-01T00:00:00Z',
lastUpdated: '2024-01-02T04:00:00Z',
messages: [
recordA1,
recordB2,
compressionRecord,
postCompressionRecord,
],
};
const history = buildApiHistoryFromConversation(conversation);
expect(history).toEqual([
{ role: 'user', parts: [{ text: 'summary' }] },
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
recordB2.message,
postCompressionRecord.message,
]);
});
});
});

View File

@@ -0,0 +1,656 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import { Storage } from '../config/storage.js';
import { getProjectHash } from '../utils/paths.js';
import path from 'node:path';
import fs from 'node:fs';
import readline from 'node:readline';
import type { Content, Part } from '@google/genai';
import * as jsonl from '../utils/jsonl-utils.js';
import type {
ChatCompressionRecordPayload,
ChatRecord,
UiTelemetryRecordPayload,
} from './chatRecordingService.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
/**
* Session item for list display.
* Contains essential info extracted from the first record of a session file.
*/
export interface SessionListItem {
/** Unique session identifier */
sessionId: string;
/** Working directory at session start */
cwd: string;
/** ISO 8601 timestamp when session started */
startTime: string;
/** File modification time (used for ordering and pagination) */
mtime: number;
/** First user prompt text (truncated for display) */
prompt: string;
/** Git branch at session start, if available */
gitBranch?: string;
/** Full path to the session file */
filePath: string;
/** Number of messages in the session (unique message UUIDs) */
messageCount: number;
}
/**
* Pagination options for listing sessions.
*/
export interface ListSessionsOptions {
/**
* Cursor for pagination (mtime of the last item from previous page).
* Items with mtime < cursor will be returned.
* If undefined, starts from the most recent.
*/
cursor?: number;
/**
* Maximum number of items to return.
* @default 20
*/
size?: number;
}
/**
* Result of listing sessions with pagination info.
*/
export interface ListSessionsResult {
/** Session items for this page */
items: SessionListItem[];
/**
* Cursor for next page (mtime of last item).
* Undefined if no more items.
*/
nextCursor?: number;
/** Whether there are more items after this page */
hasMore: boolean;
}
/**
* Complete conversation reconstructed from ChatRecords.
* Used for resuming sessions and API compatibility.
*/
export interface ConversationRecord {
sessionId: string;
projectHash: string;
startTime: string;
lastUpdated: string;
/** Messages in chronological order (reconstructed from tree) */
messages: ChatRecord[];
}
/**
* Data structure for resuming an existing session.
*/
export interface ResumedSessionData {
conversation: ConversationRecord;
filePath: string;
/** UUID of the last completed message - new messages should use this as parentUuid */
lastCompletedUuid: string | null;
}
/**
* Maximum number of files to process when listing sessions.
* This is a safety limit to prevent performance issues with very large chat directories.
*/
const MAX_FILES_TO_PROCESS = 10000;
/**
* Pattern for validating session file names.
* Session files are named as `${sessionId}.jsonl` where sessionId is a UUID-like identifier
* (32-36 hex characters, optionally with hyphens).
*/
const SESSION_FILE_PATTERN = /^[0-9a-fA-F-]{32,36}\.jsonl$/;
/** Maximum number of lines to scan when looking for the first prompt text. */
const MAX_PROMPT_SCAN_LINES = 10;
/**
* Service for managing chat sessions.
*
* This service handles:
* - Listing sessions with pagination (ordered by mtime)
* - Loading full session data for resumption
* - Removing sessions
*
* Sessions are stored as JSONL files, one per session.
* File location: ~/.qwen/tmp/<project_id>/chats/
*/
export class SessionService {
private readonly storage: Storage;
private readonly projectHash: string;
constructor(cwd: string) {
this.storage = new Storage(cwd);
this.projectHash = getProjectHash(cwd);
}
private getChatsDir(): string {
return path.join(this.storage.getProjectDir(), 'chats');
}
/**
* Extracts the first user prompt text from a Content object.
*/
private extractPromptText(message: Content | undefined): string {
if (!message?.parts) return '';
for (const part of message.parts as Part[]) {
if ('text' in part) {
const textPart = part as { text: string };
const text = textPart.text;
// Truncate long prompts for display
return text.length > 200 ? `${text.slice(0, 200)}...` : text;
}
}
return '';
}
/**
* Finds the first available prompt text by scanning the first N records,
* preferring user messages. Returns an empty string if none found.
*/
private extractFirstPromptFromRecords(records: ChatRecord[]): string {
for (const record of records) {
if (record.type !== 'user') continue;
const prompt = this.extractPromptText(record.message);
if (prompt) return prompt;
}
return '';
}
/**
* Counts unique message UUIDs in a session file.
* This gives the number of logical messages in the session.
*/
private async countSessionMessages(filePath: string): Promise<number> {
const uniqueUuids = new Set<string>();
try {
const fileStream = fs.createReadStream(filePath);
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
for await (const line of rl) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
const record = JSON.parse(trimmed) as ChatRecord;
if (record.type === 'user' || record.type === 'assistant') {
uniqueUuids.add(record.uuid);
}
} catch {
// Ignore malformed lines
continue;
}
}
return uniqueUuids.size;
} catch {
return 0;
}
}
/**
* Lists sessions for the current project with pagination.
*
* Sessions are ordered by file modification time (most recent first).
* Uses cursor-based pagination with mtime as the cursor.
*
* Only reads the first line of each JSONL file for efficiency.
* Files are filtered by UUID pattern first, then by project hash.
*
* @param options Pagination options
* @returns Paginated list of sessions
*/
async listSessions(
options: ListSessionsOptions = {},
): Promise<ListSessionsResult> {
const { cursor, size = 20 } = options;
const chatsDir = this.getChatsDir();
// Get all valid session files (matching UUID pattern) with their stats
let files: Array<{ name: string; mtime: number }> = [];
try {
const fileNames = fs.readdirSync(chatsDir);
for (const name of fileNames) {
// Only process files matching session file pattern
if (!SESSION_FILE_PATTERN.test(name)) continue;
const filePath = path.join(chatsDir, name);
try {
const stats = fs.statSync(filePath);
files.push({ name, mtime: stats.mtimeMs });
} catch {
// Skip files we can't stat
continue;
}
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
return { items: [], hasMore: false };
}
throw error;
}
// Sort by mtime descending (most recent first)
files.sort((a, b) => b.mtime - a.mtime);
// Apply cursor filter (items with mtime < cursor)
if (cursor !== undefined) {
files = files.filter((f) => f.mtime < cursor);
}
// Iterate through files until we have enough matching ones.
// Different projects may share the same chats directory due to path sanitization,
// so we need to filter by project hash and continue until we have enough items.
const items: SessionListItem[] = [];
let filesProcessed = 0;
let lastProcessedMtime: number | undefined;
let hasMoreFiles = false;
for (const file of files) {
// Safety limit to prevent performance issues
if (filesProcessed >= MAX_FILES_TO_PROCESS) {
hasMoreFiles = true;
break;
}
// Stop if we have enough items
if (items.length >= size) {
hasMoreFiles = true;
break;
}
filesProcessed++;
lastProcessedMtime = file.mtime;
const filePath = path.join(chatsDir, file.name);
const records = await jsonl.readLines<ChatRecord>(
filePath,
MAX_PROMPT_SCAN_LINES,
);
if (records.length === 0) continue;
const firstRecord = records[0];
// Skip if not matching current project
// We use cwd comparison since first record doesn't have projectHash
const recordProjectHash = getProjectHash(firstRecord.cwd);
if (recordProjectHash !== this.projectHash) continue;
// Count messages for this session
const messageCount = await this.countSessionMessages(filePath);
const prompt = this.extractFirstPromptFromRecords(records);
items.push({
sessionId: firstRecord.sessionId,
cwd: firstRecord.cwd,
startTime: firstRecord.timestamp,
mtime: file.mtime,
prompt,
gitBranch: firstRecord.gitBranch,
filePath,
messageCount,
});
}
// Determine next cursor (mtime of last processed file)
// Only set if there are more files to process
const nextCursor =
hasMoreFiles && lastProcessedMtime !== undefined
? lastProcessedMtime
: undefined;
return {
items,
nextCursor,
hasMore: hasMoreFiles,
};
}
/**
* Reads all records from a session file.
*/
private async readAllRecords(filePath: string): Promise<ChatRecord[]> {
try {
return await jsonl.read<ChatRecord>(filePath);
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error('Error reading session file:', error);
}
return [];
}
}
/**
* Aggregates multiple records with the same uuid into a single ChatRecord.
* Merges content fields (message, tokens, model, toolCallResult).
*/
private aggregateRecords(records: ChatRecord[]): ChatRecord {
if (records.length === 0) {
throw new Error('Cannot aggregate empty records array');
}
const base = { ...records[0] };
for (let i = 1; i < records.length; i++) {
const record = records[i];
// Merge message (Content objects)
if (record.message !== undefined) {
if (base.message === undefined) {
base.message = record.message;
} else {
base.message = {
role: base.message.role,
parts: [
...(base.message.parts || []),
...(record.message.parts || []),
],
};
}
}
// Merge tokens (take the latest)
if (record.usageMetadata) {
base.usageMetadata = record.usageMetadata;
}
// Merge toolCallResult
if (record.toolCallResult && !base.toolCallResult) {
base.toolCallResult = record.toolCallResult;
}
// Merge model (take the first non-empty one)
if (record.model && !base.model) {
base.model = record.model;
}
// Update timestamp to the latest
if (record.timestamp > base.timestamp) {
base.timestamp = record.timestamp;
}
}
return base;
}
/**
* Reconstructs a linear conversation from tree-structured records.
*/
private reconstructHistory(
records: ChatRecord[],
leafUuid?: string,
): ChatRecord[] {
if (records.length === 0) return [];
const recordsByUuid = new Map<string, ChatRecord[]>();
for (const record of records) {
const existing = recordsByUuid.get(record.uuid) || [];
existing.push(record);
recordsByUuid.set(record.uuid, existing);
}
let currentUuid: string | null =
leafUuid ?? records[records.length - 1].uuid;
const uuidChain: string[] = [];
const visited = new Set<string>();
while (currentUuid && !visited.has(currentUuid)) {
visited.add(currentUuid);
uuidChain.push(currentUuid);
const recordsForUuid = recordsByUuid.get(currentUuid);
if (!recordsForUuid || recordsForUuid.length === 0) break;
currentUuid = recordsForUuid[0].parentUuid;
}
uuidChain.reverse();
const messages: ChatRecord[] = [];
for (const uuid of uuidChain) {
const recordsForUuid = recordsByUuid.get(uuid);
if (recordsForUuid && recordsForUuid.length > 0) {
messages.push(this.aggregateRecords(recordsForUuid));
}
}
return messages;
}
/**
* Loads a session by its session ID.
* Reconstructs the full conversation from tree-structured records.
*
* @param sessionId The session ID to load
* @returns Session data for resumption, or null if not found
*/
async loadSession(
sessionId: string,
): Promise<ResumedSessionData | undefined> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
const records = await this.readAllRecords(filePath);
if (records.length === 0) {
return;
}
// Verify this session belongs to the current project
const firstRecord = records[0];
const recordProjectHash = getProjectHash(firstRecord.cwd);
if (recordProjectHash !== this.projectHash) {
return;
}
// Reconstruct linear history
const messages = this.reconstructHistory(records);
if (messages.length === 0) {
return;
}
const lastMessage = messages[messages.length - 1];
const stats = fs.statSync(filePath);
const conversation: ConversationRecord = {
sessionId: firstRecord.sessionId,
projectHash: this.projectHash,
startTime: firstRecord.timestamp,
lastUpdated: new Date(stats.mtimeMs).toISOString(),
messages,
};
return {
conversation,
filePath,
lastCompletedUuid: lastMessage.uuid,
};
}
/**
* Removes a session by its session ID.
*
* @param sessionId The session ID to remove
* @returns true if removed, false if not found
*/
async removeSession(sessionId: string): Promise<boolean> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
try {
// Verify the file exists and belongs to this project
const records = await jsonl.readLines<ChatRecord>(filePath, 1);
if (records.length === 0) {
return false;
}
const recordProjectHash = getProjectHash(records[0].cwd);
if (recordProjectHash !== this.projectHash) {
return false;
}
fs.unlinkSync(filePath);
return true;
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
return false;
}
throw error;
}
}
/**
* Loads the most recent session for the current project.
* Combines listSessions and loadSession for convenience.
*
* @returns Session data for resumption, or undefined if no sessions exist
*/
async loadLastSession(): Promise<ResumedSessionData | undefined> {
const result = await this.listSessions({ size: 1 });
if (result.items.length === 0) {
return;
}
return this.loadSession(result.items[0].sessionId);
}
/**
* Checks if a session exists by its session ID.
*
* @param sessionId The session ID to check
* @returns true if session exists and belongs to current project
*/
async sessionExists(sessionId: string): Promise<boolean> {
const chatsDir = this.getChatsDir();
const filePath = path.join(chatsDir, `${sessionId}.jsonl`);
try {
const records = await jsonl.readLines<ChatRecord>(filePath, 1);
if (records.length === 0) {
return false;
}
const recordProjectHash = getProjectHash(records[0].cwd);
return recordProjectHash === this.projectHash;
} catch {
return false;
}
}
}
/**
* Builds the model-facing chat history (Content[]) from a reconstructed
* conversation. This keeps UI history intact while applying chat compression
* checkpoints for the API history used on resume.
*
* Strategy:
* - Find the latest system/chat_compression record (if any).
* - Use its compressedHistory snapshot as the base history.
* - Append all messages after that checkpoint (skipping system records).
* - If no checkpoint exists, return the linear message list (message field only).
*/
export function buildApiHistoryFromConversation(
conversation: ConversationRecord,
): Content[] {
const { messages } = conversation;
let lastCompressionIndex = -1;
let compressedHistory: Content[] | undefined;
messages.forEach((record, index) => {
if (record.type === 'system' && record.subtype === 'chat_compression') {
const payload = record.systemPayload as
| ChatCompressionRecordPayload
| undefined;
if (payload?.compressedHistory) {
lastCompressionIndex = index;
compressedHistory = payload.compressedHistory;
}
}
});
if (compressedHistory && lastCompressionIndex >= 0) {
const baseHistory: Content[] = structuredClone(compressedHistory);
// Append everything after the compression record (newer turns)
for (let i = lastCompressionIndex + 1; i < messages.length; i++) {
const record = messages[i];
if (record.type === 'system') continue;
if (record.message) {
baseHistory.push(structuredClone(record.message as Content));
}
}
return baseHistory;
}
// Fallback: return linear messages as Content[]
return messages
.map((record) => record.message)
.filter((message): message is Content => message !== undefined)
.map((message) => structuredClone(message));
}
/**
* Replays stored UI telemetry events to rebuild metrics when resuming a session.
* Also restores the last prompt token count from the best available source.
*/
export function replayUiTelemetryFromConversation(
conversation: ConversationRecord,
): void {
uiTelemetryService.reset();
for (const record of conversation.messages) {
if (record.type !== 'system' || record.subtype !== 'ui_telemetry') {
continue;
}
const payload = record.systemPayload as
| UiTelemetryRecordPayload
| undefined;
const uiEvent = payload?.uiEvent;
if (uiEvent) {
uiTelemetryService.addEvent(uiEvent);
}
}
const resumePromptTokens = getResumePromptTokenCount(conversation);
if (resumePromptTokens !== undefined) {
uiTelemetryService.setLastPromptTokenCount(resumePromptTokens);
}
}
/**
* Returns the best available prompt token count for resuming telemetry:
* - If a chat compression checkpoint exists, use its new token count.
* - Otherwise, use the last assistant usageMetadata input (fallback to total).
*/
export function getResumePromptTokenCount(
conversation: ConversationRecord,
): number | undefined {
let fallback: number | undefined;
for (let i = conversation.messages.length - 1; i >= 0; i--) {
const record = conversation.messages[i];
if (record.type === 'system' && record.subtype === 'chat_compression') {
const payload = record.systemPayload as
| ChatCompressionRecordPayload
| undefined;
if (payload?.info) {
return payload.info.newTokenCount;
}
}
if (fallback === undefined && record.type === 'assistant') {
const usage = record.usageMetadata;
if (usage) {
fallback = usage.totalTokenCount ?? usage.promptTokenCount;
}
}
}
return fallback;
}

View File

@@ -55,9 +55,7 @@ describe('SubagentManager', () => {
} as unknown as ToolRegistry;
// Create mock Config object using test utility
mockConfig = makeFakeConfig({
sessionId: 'test-session-id',
});
mockConfig = makeFakeConfig({});
// Mock the tool registry and project root methods
vi.spyOn(mockConfig, 'getToolRegistry').mockReturnValue(mockToolRegistry);

View File

@@ -65,7 +65,6 @@ async function createMockConfig(
toolRegistryMocks = {},
): Promise<{ config: Config; toolRegistry: ToolRegistry }> {
const configParams: ConfigParameters = {
sessionId: 'test-session',
model: DEFAULT_GEMINI_MODEL,
targetDir: '.',
debugMode: false,

View File

@@ -572,6 +572,7 @@ export class SubAgentScope {
const responded = new Set<string>();
let resolveBatch: (() => void) | null = null;
const scheduler = new CoreToolScheduler({
config: this.runtimeContext,
outputUpdateHandler: undefined,
onAllToolCallsComplete: async (completedCalls) => {
for (const call of completedCalls) {
@@ -710,7 +711,6 @@ export class SubAgentScope {
}
},
getPreferredEditor: () => undefined,
config: this.runtimeContext,
onEditorClose: () => {},
});

View File

@@ -142,6 +142,7 @@ describe('ClearcutLogger', () => {
const loggerConfig = makeFakeConfig({
...config,
sessionId: 'test-session-id',
});
ClearcutLogger.clearInstance();
@@ -248,7 +249,7 @@ describe('ClearcutLogger', () => {
it('logs default metadata', () => {
// Define expected values
const session_id = 'my-session-id';
const session_id = 'test-session-id';
const auth_type = AuthType.USE_GEMINI;
const google_accounts = 123;
const surface = 'ide-1234';
@@ -260,7 +261,7 @@ describe('ClearcutLogger', () => {
// Setup logger with expected values
const { logger, loggerConfig } = setup({
lifetimeGoogleAccounts: google_accounts,
config: { sessionId: session_id },
config: {},
});
vi.spyOn(loggerConfig, 'getContentGeneratorConfig').mockReturnValue({
authType: auth_type,

View File

@@ -25,7 +25,7 @@ export {
parseTelemetryTargetValue,
} from './config.js';
export {
logCliConfiguration,
logStartSession,
logUserPrompt,
logToolCall,
logApiRequest,

View File

@@ -41,7 +41,7 @@ import {
import {
logApiRequest,
logApiResponse,
logCliConfiguration,
logStartSession,
logUserPrompt,
logToolCall,
logFlashFallback,
@@ -116,7 +116,7 @@ describe('loggers', () => {
});
it('logs the chat compression event to QwenLogger', () => {
const mockConfig = makeFakeConfig();
const mockConfig = makeFakeConfig({ sessionId: 'test-session-id' });
const event = makeChatCompressionEvent({
tokens_before: 9001,
@@ -131,7 +131,7 @@ describe('loggers', () => {
});
it('records the chat compression event to OTEL', () => {
const mockConfig = makeFakeConfig();
const mockConfig = makeFakeConfig({ sessionId: 'test-session-id' });
logChatCompression(
mockConfig,
@@ -177,10 +177,12 @@ describe('loggers', () => {
getTargetDir: () => 'target-dir',
getProxy: () => 'http://test.proxy.com:8080',
getOutputFormat: () => OutputFormat.JSON,
getToolRegistry: () => undefined,
getChatRecordingService: () => undefined,
} as unknown as Config;
const startSessionEvent = new StartSessionEvent(mockConfig);
logCliConfiguration(mockConfig, startSessionEvent);
logStartSession(mockConfig, startSessionEvent);
expect(mockLogger.emit).toHaveBeenCalledWith({
body: 'CLI configuration loaded.',
@@ -281,7 +283,8 @@ describe('loggers', () => {
getUsageStatisticsEnabled: () => true,
getTelemetryEnabled: () => true,
getTelemetryLogPromptsEnabled: () => true,
} as Config;
getChatRecordingService: () => undefined,
} as unknown as Config;
const mockMetrics = {
recordApiResponseMetrics: vi.fn(),
@@ -368,7 +371,7 @@ describe('loggers', () => {
getUsageStatisticsEnabled: () => true,
getTelemetryEnabled: () => true,
getTelemetryLogPromptsEnabled: () => true,
} as Config;
} as unknown as Config;
it('should log an API request with request_text', () => {
const event = new ApiRequestEvent(
@@ -498,6 +501,7 @@ describe('loggers', () => {
const cfg2 = {
getSessionId: () => 'test-session-id',
getTargetDir: () => 'target-dir',
getProjectRoot: () => '/test/project/root',
getProxy: () => 'http://test.proxy.com:8080',
getContentGeneratorConfig: () =>
({ model: 'test-model' }) as ContentGeneratorConfig,
@@ -530,7 +534,8 @@ describe('loggers', () => {
getUsageStatisticsEnabled: () => true,
getTelemetryEnabled: () => true,
getTelemetryLogPromptsEnabled: () => true,
} as Config;
getChatRecordingService: () => undefined,
} as unknown as Config;
const mockMetrics = {
recordToolCallMetrics: vi.fn(),
@@ -1029,7 +1034,7 @@ describe('loggers', () => {
});
it('logs the event to Clearcut and OTEL', () => {
const mockConfig = makeFakeConfig();
const mockConfig = makeFakeConfig({ sessionId: 'test-session-id' });
const event = new MalformedJsonResponseEvent('test-model');
logMalformedJsonResponse(mockConfig, event);

View File

@@ -101,7 +101,7 @@ function getCommonAttributes(config: Config): LogAttributes {
};
}
export function logCliConfiguration(
export function logStartSession(
config: Config,
event: StartSessionEvent,
): void {
@@ -172,6 +172,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
config.getChatRecordingService()?.recordUiTelemetryEvent(uiEvent);
QwenLogger.getInstance(config)?.logToolCallEvent(event);
if (!isTelemetrySdkInitialized()) return;
@@ -339,6 +340,7 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
config.getChatRecordingService()?.recordUiTelemetryEvent(uiEvent);
QwenLogger.getInstance(config)?.logApiErrorEvent(event);
if (!isTelemetrySdkInitialized()) return;
@@ -405,6 +407,7 @@ export function logApiResponse(config: Config, event: ApiResponseEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
config.getChatRecordingService()?.recordUiTelemetryEvent(uiEvent);
QwenLogger.getInstance(config)?.logApiResponseEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {

View File

@@ -134,7 +134,9 @@ describe('Telemetry Metrics', () => {
});
it('records token compression with the correct attributes', () => {
const config = makeFakeConfig({});
const config = makeFakeConfig({
sessionId: 'test-session-id',
});
initializeMetricsModule(config);
recordChatCompressionMetricsModule(config, {

View File

@@ -59,6 +59,7 @@ const makeFakeConfig = (overrides: Partial<Config> = {}): Config => {
getTelemetryLogPromptsEnabled: () => false,
getFileFilteringRespectGitIgnore: () => true,
getOutputFormat: () => 'text',
getToolRegistry: () => undefined,
...overrides,
};
return defaults as Config;

View File

@@ -39,8 +39,8 @@ import type {
ExtensionDisableEvent,
AuthEvent,
RipgrepFallbackEvent,
EndSessionEvent,
} from '../types.js';
import { EndSessionEvent } from '../types.js';
import type {
RumEvent,
RumViewEvent,
@@ -102,6 +102,7 @@ export class QwenLogger {
private lastFlushTime: number = Date.now();
private userId: string;
private sessionId: string;
/**
@@ -115,17 +116,12 @@ export class QwenLogger {
*/
private pendingFlush: boolean = false;
private isShutdown: boolean = false;
private constructor(config?: Config) {
private constructor(config: Config) {
this.config = config;
this.events = new FixedDeque<RumEvent>(Array, MAX_EVENTS);
this.installationManager = new InstallationManager();
this.userId = this.generateUserId();
this.sessionId =
typeof this.config?.getSessionId === 'function'
? this.config.getSessionId()
: '';
this.sessionId = config.getSessionId();
}
private generateUserId(): string {
@@ -139,10 +135,6 @@ export class QwenLogger {
return undefined;
if (!QwenLogger.instance) {
QwenLogger.instance = new QwenLogger(config);
process.on(
'exit',
QwenLogger.instance.shutdown.bind(QwenLogger.instance),
);
}
return QwenLogger.instance;
@@ -241,10 +233,10 @@ export class QwenLogger {
id: this.userId,
},
session: {
id: this.sessionId,
id: this.sessionId || this.config?.getSessionId(),
},
view: {
id: this.sessionId,
id: this.sessionId || this.config?.getSessionId(),
name: 'qwen-code-cli',
},
os: osMetadata,
@@ -364,7 +356,24 @@ export class QwenLogger {
}
// session events
logStartSessionEvent(event: StartSessionEvent): void {
async logStartSessionEvent(event: StartSessionEvent): Promise<void> {
// Flush all pending events with the old session ID first.
// If flush fails, discard the pending events to avoid mixing sessions.
await this.flushToRum().catch((error: unknown) => {
if (this.config?.getDebugMode()) {
console.debug(
'Error flushing pending events before session start:',
error,
);
}
});
// Clear any remaining events (discard if flush failed)
this.events.clear();
// Now set the new session ID
this.sessionId = event.session_id;
const applicationEvent = this.createViewEvent('session', 'session_start', {
properties: {
model: event.model,
@@ -852,14 +861,6 @@ export class QwenLogger {
}
}
shutdown() {
if (this.isShutdown) return;
this.isShutdown = true;
const event = new EndSessionEvent(this.config);
this.logEndSessionEvent(event);
}
private requeueFailedEvents(eventsToSend: RumEvent[]): void {
// Add the events back to the front of the queue to be retried, but limit retry queue size
const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events

View File

@@ -24,7 +24,6 @@ describe('telemetry', () => {
vi.resetAllMocks();
mockConfig = new Config({
sessionId: 'test-session-id',
model: 'test-model',
targetDir: '/test/dir',
debugMode: false,

View File

@@ -17,7 +17,6 @@ import {
} from './tool-call-decision.js';
import type { FileOperation } from './metrics.js';
export { ToolCallDecision };
import type { ToolRegistry } from '../tools/tool-registry.js';
import type { OutputFormat } from '../output/types.js';
export interface BaseTelemetryEvent {
@@ -31,6 +30,7 @@ type CommonFields = keyof BaseTelemetryEvent;
export class StartSessionEvent implements BaseTelemetryEvent {
'event.name': 'cli_config';
'event.timestamp': string;
session_id: string;
model: string;
embedding_model: string;
sandbox_enabled: boolean;
@@ -48,9 +48,10 @@ export class StartSessionEvent implements BaseTelemetryEvent {
mcp_tools?: string;
output_format: OutputFormat;
constructor(config: Config, toolRegistry?: ToolRegistry) {
constructor(config: Config) {
const generatorConfig = config.getContentGeneratorConfig();
const mcpServers = config.getMcpServers();
const toolRegistry = config.getToolRegistry();
let useGemini = false;
let useVertex = false;
@@ -60,6 +61,7 @@ export class StartSessionEvent implements BaseTelemetryEvent {
}
this['event.name'] = 'cli_config';
this.session_id = config.getSessionId();
this.model = config.getModel();
this.embedding_model = config.getEmbeddingModel();
this.sandbox_enabled =

View File

@@ -152,6 +152,18 @@ export class UiTelemetryService extends EventEmitter {
});
}
/**
* Resets metrics to the initial state (used when resuming a session).
*/
reset(): void {
this.#metrics = createInitialMetrics();
this.#lastPromptTokenCount = 0;
this.emit('update', {
metrics: this.#metrics,
lastPromptTokenCount: this.#lastPromptTokenCount,
});
}
private getOrCreateModelMetrics(modelName: string): ModelMetrics {
if (!this.#metrics.models[modelName]) {
this.#metrics.models[modelName] = createInitialModelMetrics();

View File

@@ -13,7 +13,6 @@ import { Config } from '../config/config.js';
export const DEFAULT_CONFIG_PARAMETERS: ConfigParameters = {
usageStatisticsEnabled: true,
debugMode: false,
sessionId: 'test-session-id',
proxy: undefined,
model: 'qwen-9001-super-duper',
targetDir: '/',

View File

@@ -205,9 +205,7 @@ describe('ExitPlanModeTool', () => {
};
const invocation = tool.build(params);
expect(invocation.getDescription()).toBe(
'Present implementation plan for user approval',
);
expect(invocation.getDescription()).toBe('Plan:');
});
it('should return empty tool locations', () => {

View File

@@ -60,7 +60,7 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
}
getDescription(): string {
return 'Present implementation plan for user approval';
return 'Plan:';
}
override async shouldConfirmExecute(

View File

@@ -29,10 +29,6 @@ vi.mock(import('node:fs/promises'), async (importOriginal) => {
};
});
vi.mock('fs', () => ({
mkdirSync: vi.fn(),
}));
vi.mock('os');
const MEMORY_SECTION_HEADER = '## Qwen Added Memories';

View File

@@ -144,30 +144,6 @@ describe('ReadFileTool', () => {
).toBe(path.join('sub', 'dir', 'file.txt'));
});
it('should return shortened path when file path is deep', () => {
const deepPath = path.join(
tempRootDir,
'very',
'deep',
'directory',
'structure',
'that',
'exceeds',
'the',
'normal',
'limit',
'file.txt',
);
const params: ReadFileToolParams = { absolute_path: deepPath };
const invocation = tool.build(params);
expect(typeof invocation).not.toBe('string');
const desc = (
invocation as ToolInvocation<ReadFileToolParams, ToolResult>
).getDescription();
expect(desc).toContain('...');
expect(desc).toContain('file.txt');
});
it('should handle non-normalized file paths correctly', () => {
const subDir = path.join(tempRootDir, 'sub', 'dir');
const params: ReadFileToolParams = {

View File

@@ -57,7 +57,18 @@ class ReadFileToolInvocation extends BaseToolInvocation<
this.params.absolute_path,
this.config.getTargetDir(),
);
return shortenPath(relativePath);
const shortPath = shortenPath(relativePath);
const { offset, limit } = this.params;
if (offset !== undefined && limit !== undefined) {
return `${shortPath} (lines ${offset + 1}-${offset + limit})`;
} else if (offset !== undefined) {
return `${shortPath} (from line ${offset + 1})`;
} else if (limit !== undefined) {
return `${shortPath} (first ${limit} lines)`;
}
return shortPath;
}
override toolLocations(): ToolLocation[] {

View File

@@ -104,7 +104,6 @@ const baseConfigParams: ConfigParameters = {
userMemory: '',
geminiMdFileCount: 0,
approvalMode: ApprovalMode.DEFAULT,
sessionId: 'test-session-id',
};
describe('ToolRegistry', () => {

View File

@@ -32,7 +32,6 @@ describe('Retry Utility Fallback Integration', () => {
isDirectory: () => true,
} as fs.Stats);
config = new Config({
sessionId: 'test-session',
targetDir: '/test',
debugMode: false,
cwd: '/test',

View File

@@ -6,6 +6,7 @@
import * as fs from 'node:fs';
import * as path from 'node:path';
import { execSync } from 'node:child_process';
/**
* Checks if a directory is within a git repository
@@ -71,3 +72,19 @@ export function findGitRoot(directory: string): string | null {
return null;
}
}
/**
* Gets the current git branch, if in a git repository.
*/
export const getGitBranch = (cwd: string): string | undefined => {
try {
const branch = execSync('git rev-parse --abbrev-ref HEAD', {
cwd,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
}).trim();
return branch || undefined;
} catch {
return undefined;
}
};

View File

@@ -0,0 +1,193 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Efficient JSONL (JSON Lines) file utilities.
*
* Reading operations:
* - readLines() - Reads the first N lines efficiently using buffered I/O
* - read() - Reads entire file into memory as array
*
* Writing operations:
* - writeLine() - Async append with mutex-based concurrency control
* - writeLineSync() - Sync append (use in non-async contexts)
* - write() - Overwrites entire file with array of objects
*
* Utility operations:
* - countLines() - Counts non-empty lines
* - exists() - Checks if file exists and is non-empty
*/
import fs from 'node:fs';
import path from 'node:path';
import readline from 'node:readline';
import { Mutex } from 'async-mutex';
/**
* A map of file paths to mutexes for preventing concurrent writes.
*/
const fileLocks = new Map<string, Mutex>();
/**
* Gets or creates a mutex for a specific file path.
*/
function getFileLock(filePath: string): Mutex {
if (!fileLocks.has(filePath)) {
fileLocks.set(filePath, new Mutex());
}
return fileLocks.get(filePath)!;
}
/**
* Reads the first N lines from a JSONL file efficiently.
* Returns an array of parsed objects.
*/
export async function readLines<T = unknown>(
filePath: string,
count: number,
): Promise<T[]> {
try {
const fileStream = fs.createReadStream(filePath);
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
const results: T[] = [];
for await (const line of rl) {
if (results.length >= count) break;
const trimmed = line.trim();
if (trimmed.length > 0) {
results.push(JSON.parse(trimmed) as T);
}
}
return results;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error(
`Error reading first ${count} lines from ${filePath}:`,
error,
);
}
return [];
}
}
/**
* Reads all lines from a JSONL file.
* Returns an array of parsed objects.
*/
export async function read<T = unknown>(filePath: string): Promise<T[]> {
try {
const fileStream = fs.createReadStream(filePath);
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
const results: T[] = [];
for await (const line of rl) {
const trimmed = line.trim();
if (trimmed.length > 0) {
results.push(JSON.parse(trimmed) as T);
}
}
return results;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error(`Error reading ${filePath}:`, error);
}
return [];
}
}
/**
* Appends a line to a JSONL file with concurrency control.
* This method uses a mutex to ensure only one write happens at a time per file.
*/
export async function writeLine(
filePath: string,
data: unknown,
): Promise<void> {
const lock = getFileLock(filePath);
await lock.runExclusive(() => {
const line = `${JSON.stringify(data)}\n`;
// Ensure directory exists before writing
const dir = path.dirname(filePath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.appendFileSync(filePath, line, 'utf8');
});
}
/**
* Synchronous version of writeLine for use in non-async contexts.
* Uses a simple flag-based locking mechanism (less robust than async version).
*/
export function writeLineSync(filePath: string, data: unknown): void {
const line = `${JSON.stringify(data)}\n`;
// Ensure directory exists before writing
const dir = path.dirname(filePath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.appendFileSync(filePath, line, 'utf8');
}
/**
* Overwrites a JSONL file with an array of objects.
* Each object will be written as a separate line.
*/
export function write(filePath: string, data: unknown[]): void {
const lines = data.map((item) => JSON.stringify(item)).join('\n');
// Ensure directory exists before writing
const dir = path.dirname(filePath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.writeFileSync(filePath, `${lines}\n`, 'utf8');
}
/**
* Counts the number of non-empty lines in a JSONL file.
*/
export async function countLines(filePath: string): Promise<number> {
try {
const fileStream = fs.createReadStream(filePath);
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
let count = 0;
for await (const line of rl) {
if (line.trim().length > 0) {
count++;
}
}
return count;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
console.error(`Error counting lines in ${filePath}:`, error);
}
return 0;
}
}
/**
* Checks if a JSONL file exists and is not empty.
*/
export function exists(filePath: string): boolean {
try {
const stats = fs.statSync(filePath);
return stats.isFile() && stats.size > 0;
} catch {
return false;
}
}

View File

@@ -32,6 +32,7 @@ vi.mock('node:fs', () => {
});
}),
existsSync: vi.fn((path: string) => mockFileSystem.has(path)),
appendFileSync: vi.fn(),
};
return {

View File

@@ -38,7 +38,7 @@ export function tildeifyPath(path: string): string {
* Shortens a path string if it exceeds maxLen, prioritizing the start and end segments.
* Example: /path/to/a/very/long/file.txt -> /path/.../long/file.txt
*/
export function shortenPath(filePath: string, maxLen: number = 35): string {
export function shortenPath(filePath: string, maxLen: number = 80): string {
if (filePath.length <= maxLen) {
return filePath;
}

View File

@@ -1,9 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { randomUUID } from 'node:crypto';
export const sessionId = randomUUID();