🎯 Feature: Customizable Model Training and Tool Output Management (#981)

This commit is contained in:
tanzhenxin
2025-11-07 17:28:16 +08:00
committed by GitHub
parent 21fba6eb89
commit c3d427730e
32 changed files with 795 additions and 607 deletions

View File

@@ -791,6 +791,7 @@ export async function loadCliConfig(
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
skipLoopDetection: settings.model?.skipLoopDetection ?? false,
skipStartupContext: settings.model?.skipStartupContext ?? false,
vlmSwitchMode,
truncateToolOutputThreshold: settings.tools?.truncateToolOutputThreshold,
truncateToolOutputLines: settings.tools?.truncateToolOutputLines,

View File

@@ -131,6 +131,7 @@ const MIGRATION_MAP: Record<string, string> = {
sessionTokenLimit: 'model.sessionTokenLimit',
contentGenerator: 'model.generationConfig',
skipLoopDetection: 'model.skipLoopDetection',
skipStartupContext: 'model.skipStartupContext',
enableOpenAILogging: 'model.enableOpenAILogging',
tavilyApiKey: 'advanced.tavilyApiKey',
vlmSwitchMode: 'experimental.vlmSwitchMode',

View File

@@ -549,6 +549,16 @@ const SETTINGS_SCHEMA = {
description: 'Disable all loop detection checks (streaming and LLM).',
showInDialog: true,
},
skipStartupContext: {
type: 'boolean',
label: 'Skip Startup Context',
category: 'Model',
requiresRestart: true,
default: false,
description:
'Avoid sending the workspace startup context at the beginning of each session.',
showInDialog: true,
},
enableOpenAILogging: {
type: 'boolean',
label: 'Enable OpenAI Logging',

View File

@@ -80,6 +80,8 @@ describe('handleAtCommand', () => {
getReadManyFilesExcludes: () => [],
}),
getUsageStatisticsEnabled: () => false,
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
} as unknown as Config;
const registry = new ToolRegistry(mockConfig);

View File

@@ -738,13 +738,13 @@ describe('Server Config (config.ts)', () => {
it('should return the calculated threshold when it is smaller than the default', () => {
const config = new Config(baseParams);
vi.mocked(tokenLimit).mockReturnValue(32000);
vi.mocked(tokenLimit).mockReturnValue(8000);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
1000,
2000,
);
// 4 * (32000 - 1000) = 4 * 31000 = 124000
// default is 4_000_000
expect(config.getTruncateToolOutputThreshold()).toBe(124000);
// 4 * (8000 - 2000) = 4 * 6000 = 24000
// default is 25_000
expect(config.getTruncateToolOutputThreshold()).toBe(24000);
});
it('should return the default threshold when the calculated value is larger', () => {
@@ -754,8 +754,8 @@ describe('Server Config (config.ts)', () => {
500_000,
);
// 4 * (2_000_000 - 500_000) = 4 * 1_500_000 = 6_000_000
// default is 4_000_000
expect(config.getTruncateToolOutputThreshold()).toBe(4_000_000);
// default is 25_000
expect(config.getTruncateToolOutputThreshold()).toBe(25_000);
});
it('should use a custom truncateToolOutputThreshold if provided', () => {

View File

@@ -161,7 +161,7 @@ export interface ExtensionInstallMetadata {
autoUpdate?: boolean;
}
export const DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD = 4_000_000;
export const DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD = 25_000;
export const DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES = 1000;
export class MCPServerConfig {
@@ -288,6 +288,7 @@ export interface ConfigParameters {
eventEmitter?: EventEmitter;
useSmartEdit?: boolean;
output?: OutputSettings;
skipStartupContext?: boolean;
}
export class Config {
@@ -377,6 +378,7 @@ export class Config {
private readonly extensionManagement: boolean = true;
private readonly enablePromptCompletion: boolean = false;
private readonly skipLoopDetection: boolean;
private readonly skipStartupContext: boolean;
private readonly vlmSwitchMode: string | undefined;
private initialized: boolean = false;
readonly storage: Storage;
@@ -469,6 +471,7 @@ export class Config {
this.interactive = params.interactive ?? false;
this.trustedFolder = params.trustedFolder;
this.skipLoopDetection = params.skipLoopDetection ?? false;
this.skipStartupContext = params.skipStartupContext ?? false;
// Web search
this.webSearch = params.webSearch;
@@ -1041,6 +1044,10 @@ export class Config {
return this.skipLoopDetection;
}
getSkipStartupContext(): boolean {
return this.skipStartupContext;
}
getVlmSwitchMode(): string | undefined {
return this.vlmSwitchMode;
}
@@ -1050,6 +1057,13 @@ export class Config {
}
getTruncateToolOutputThreshold(): number {
if (
!this.enableToolOutputTruncation ||
this.truncateToolOutputThreshold <= 0
) {
return Number.POSITIVE_INFINITY;
}
return Math.min(
// Estimate remaining context window in characters (1 token ~= 4 chars).
4 *
@@ -1060,6 +1074,10 @@ export class Config {
}
getTruncateToolOutputLines(): number {
if (!this.enableToolOutputTruncation || this.truncateToolOutputLines <= 0) {
return Number.POSITIVE_INFINITY;
}
return this.truncateToolOutputLines;
}

View File

@@ -1540,6 +1540,268 @@ describe('CoreToolScheduler request queueing', () => {
});
});
describe('CoreToolScheduler Sequential Execution', () => {
it('should execute tool calls in a batch sequentially', async () => {
// Arrange
let firstCallFinished = false;
const executeFn = vi
.fn()
.mockImplementation(async (args: { call: number }) => {
if (args.call === 1) {
// First call, wait for a bit to simulate work
await new Promise((resolve) => setTimeout(resolve, 50));
firstCallFinished = true;
return { llmContent: 'First call done' };
}
if (args.call === 2) {
// Second call, should only happen after the first is finished
if (!firstCallFinished) {
throw new Error(
'Second tool call started before the first one finished!',
);
}
return { llmContent: 'Second call done' };
}
return { llmContent: 'default' };
});
const mockTool = new MockTool({ name: 'mockTool', execute: executeFn });
const declarativeTool = mockTool;
const mockToolRegistry = {
getTool: () => declarativeTool,
getToolByName: () => declarativeTool,
getFunctionDeclarations: () => [],
tools: new Map(),
discovery: {},
registerTool: () => {},
getToolByDisplayName: () => declarativeTool,
getTools: () => [],
discoverTools: async () => {},
getAllTools: () => [],
getToolsByServer: () => [],
} as unknown as ToolRegistry;
const onAllToolCallsComplete = vi.fn();
const onToolCallsUpdate = vi.fn();
const mockConfig = {
getSessionId: () => 'test-session-id',
getUsageStatisticsEnabled: () => true,
getDebugMode: () => false,
getApprovalMode: () => ApprovalMode.YOLO, // Use YOLO to avoid confirmation prompts
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'oauth-personal',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
terminalHeight: 30,
}),
storage: {
getProjectTempDir: () => '/tmp',
},
getToolRegistry: () => mockToolRegistry,
getTruncateToolOutputThreshold: () =>
DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD,
getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES,
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
config: mockConfig,
onAllToolCallsComplete,
onToolCallsUpdate,
getPreferredEditor: () => 'vscode',
onEditorClose: vi.fn(),
});
const abortController = new AbortController();
const requests = [
{
callId: '1',
name: 'mockTool',
args: { call: 1 },
isClientInitiated: false,
prompt_id: 'prompt-1',
},
{
callId: '2',
name: 'mockTool',
args: { call: 2 },
isClientInitiated: false,
prompt_id: 'prompt-1',
},
];
// Act
await scheduler.schedule(requests, abortController.signal);
// Assert
await vi.waitFor(() => {
expect(onAllToolCallsComplete).toHaveBeenCalled();
});
// Check that execute was called twice
expect(executeFn).toHaveBeenCalledTimes(2);
// Check the order of calls
const calls = executeFn.mock.calls;
expect(calls[0][0]).toEqual({ call: 1 });
expect(calls[1][0]).toEqual({ call: 2 });
// The onAllToolCallsComplete should be called once with both results
const completedCalls = onAllToolCallsComplete.mock
.calls[0][0] as ToolCall[];
expect(completedCalls).toHaveLength(2);
expect(completedCalls[0].status).toBe('success');
expect(completedCalls[1].status).toBe('success');
});
it('should cancel subsequent tools when the signal is aborted.', async () => {
// Arrange
const abortController = new AbortController();
let secondCallStarted = false;
const executeFn = vi
.fn()
.mockImplementation(async (args: { call: number }) => {
if (args.call === 1) {
return { llmContent: 'First call done' };
}
if (args.call === 2) {
secondCallStarted = true;
// This call will be cancelled while it's "running".
await new Promise((resolve) => setTimeout(resolve, 100));
// It should not return a value because it will be cancelled.
return { llmContent: 'Second call should not complete' };
}
if (args.call === 3) {
return { llmContent: 'Third call done' };
}
return { llmContent: 'default' };
});
const mockTool = new MockTool({ name: 'mockTool', execute: executeFn });
const declarativeTool = mockTool;
const mockToolRegistry = {
getTool: () => declarativeTool,
getToolByName: () => declarativeTool,
getFunctionDeclarations: () => [],
tools: new Map(),
discovery: {},
registerTool: () => {},
getToolByDisplayName: () => declarativeTool,
getTools: () => [],
discoverTools: async () => {},
getAllTools: () => [],
getToolsByServer: () => [],
} as unknown as ToolRegistry;
const onAllToolCallsComplete = vi.fn();
const onToolCallsUpdate = vi.fn();
const mockConfig = {
getSessionId: () => 'test-session-id',
getUsageStatisticsEnabled: () => true,
getDebugMode: () => false,
getApprovalMode: () => ApprovalMode.YOLO,
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'oauth-personal',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
terminalHeight: 30,
}),
storage: {
getProjectTempDir: () => '/tmp',
},
getToolRegistry: () => mockToolRegistry,
getTruncateToolOutputThreshold: () =>
DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD,
getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES,
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null,
} as unknown as Config;
const scheduler = new CoreToolScheduler({
config: mockConfig,
onAllToolCallsComplete,
onToolCallsUpdate,
getPreferredEditor: () => 'vscode',
onEditorClose: vi.fn(),
});
const requests = [
{
callId: '1',
name: 'mockTool',
args: { call: 1 },
isClientInitiated: false,
prompt_id: 'prompt-1',
},
{
callId: '2',
name: 'mockTool',
args: { call: 2 },
isClientInitiated: false,
prompt_id: 'prompt-1',
},
{
callId: '3',
name: 'mockTool',
args: { call: 3 },
isClientInitiated: false,
prompt_id: 'prompt-1',
},
];
// Act
const schedulePromise = scheduler.schedule(
requests,
abortController.signal,
);
// Wait for the second call to start, then abort.
await vi.waitFor(() => {
expect(secondCallStarted).toBe(true);
});
abortController.abort();
await schedulePromise;
// Assert
await vi.waitFor(() => {
expect(onAllToolCallsComplete).toHaveBeenCalled();
});
// Check that execute was called for all three tools initially
expect(executeFn).toHaveBeenCalledTimes(3);
expect(executeFn).toHaveBeenCalledWith({ call: 1 });
expect(executeFn).toHaveBeenCalledWith({ call: 2 });
expect(executeFn).toHaveBeenCalledWith({ call: 3 });
const completedCalls = onAllToolCallsComplete.mock
.calls[0][0] as ToolCall[];
expect(completedCalls).toHaveLength(3);
const call1 = completedCalls.find((c) => c.request.callId === '1');
const call2 = completedCalls.find((c) => c.request.callId === '2');
const call3 = completedCalls.find((c) => c.request.callId === '3');
expect(call1?.status).toBe('success');
expect(call2?.status).toBe('cancelled');
expect(call3?.status).toBe('cancelled');
});
});
describe('truncateAndSaveToFile', () => {
const mockWriteFile = vi.mocked(fs.writeFile);
const THRESHOLD = 40_000;
@@ -1719,14 +1981,14 @@ describe('truncateAndSaveToFile', () => {
);
expect(result.content).toContain(
'read_file tool with the absolute file path above',
'Tool output was too large and has been truncated',
);
expect(result.content).toContain('read_file tool with offset=0, limit=100');
expect(result.content).toContain('The full output has been saved to:');
expect(result.content).toContain(
'read_file tool with offset=N to skip N lines',
'To read the complete output, use the read_file tool with the absolute file path above',
);
expect(result.content).toContain(
'read_file tool with limit=M to read only M lines',
'The truncated output below shows the beginning and end of the content',
);
});

View File

@@ -299,10 +299,7 @@ export async function truncateAndSaveToFile(
return {
content: `Tool output was too large and has been truncated.
The full output has been saved to: ${outputFile}
To read the complete output, use the ${ReadFileTool.Name} tool with the absolute file path above. For large files, you can use the offset and limit parameters to read specific sections:
- ${ReadFileTool.Name} tool with offset=0, limit=100 to see the first 100 lines
- ${ReadFileTool.Name} tool with offset=N to skip N lines from the beginning
- ${ReadFileTool.Name} tool with limit=M to read only M lines at a time
To read the complete output, use the ${ReadFileTool.Name} tool with the absolute file path above.
The truncated output below shows the beginning and end of the content. The marker '... [CONTENT TRUNCATED] ...' indicates where content was removed.
This allows you to efficiently examine different parts of the output without loading the entire file.
Truncated part of the output:
@@ -846,7 +843,7 @@ export class CoreToolScheduler {
);
}
}
this.attemptExecutionOfScheduledCalls(signal);
await this.attemptExecutionOfScheduledCalls(signal);
void this.checkAndNotifyCompletion();
} finally {
this.isScheduling = false;
@@ -921,7 +918,7 @@ export class CoreToolScheduler {
}
this.setStatusInternal(callId, 'scheduled');
}
this.attemptExecutionOfScheduledCalls(signal);
await this.attemptExecutionOfScheduledCalls(signal);
}
/**
@@ -967,7 +964,9 @@ export class CoreToolScheduler {
});
}
private attemptExecutionOfScheduledCalls(signal: AbortSignal): void {
private async attemptExecutionOfScheduledCalls(
signal: AbortSignal,
): Promise<void> {
const allCallsFinalOrScheduled = this.toolCalls.every(
(call) =>
call.status === 'scheduled' ||
@@ -981,8 +980,8 @@ export class CoreToolScheduler {
(call) => call.status === 'scheduled',
);
callsToExecute.forEach((toolCall) => {
if (toolCall.status !== 'scheduled') return;
for (const toolCall of callsToExecute) {
if (toolCall.status !== 'scheduled') continue;
const scheduledCall = toolCall;
const { callId, name: toolName } = scheduledCall.request;
@@ -1033,107 +1032,106 @@ export class CoreToolScheduler {
);
}
promise
.then(async (toolResult: ToolResult) => {
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
'User cancelled tool execution.',
);
return;
}
try {
const toolResult: ToolResult = await promise;
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
'User cancelled tool execution.',
);
continue;
}
if (toolResult.error === undefined) {
let content = toolResult.llmContent;
let outputFile: string | undefined = undefined;
const contentLength =
typeof content === 'string' ? content.length : undefined;
if (
typeof content === 'string' &&
toolName === ShellTool.Name &&
this.config.getEnableToolOutputTruncation() &&
this.config.getTruncateToolOutputThreshold() > 0 &&
this.config.getTruncateToolOutputLines() > 0
) {
const originalContentLength = content.length;
const threshold = this.config.getTruncateToolOutputThreshold();
const lines = this.config.getTruncateToolOutputLines();
const truncatedResult = await truncateAndSaveToFile(
content,
callId,
this.config.storage.getProjectTempDir(),
threshold,
lines,
);
content = truncatedResult.content;
outputFile = truncatedResult.outputFile;
if (outputFile) {
logToolOutputTruncated(
this.config,
new ToolOutputTruncatedEvent(
scheduledCall.request.prompt_id,
{
toolName,
originalContentLength,
truncatedContentLength: content.length,
threshold,
lines,
},
),
);
}
}
const response = convertToFunctionResponse(
toolName,
callId,
if (toolResult.error === undefined) {
let content = toolResult.llmContent;
let outputFile: string | undefined = undefined;
const contentLength =
typeof content === 'string' ? content.length : undefined;
if (
typeof content === 'string' &&
toolName === ShellTool.Name &&
this.config.getEnableToolOutputTruncation() &&
this.config.getTruncateToolOutputThreshold() > 0 &&
this.config.getTruncateToolOutputLines() > 0
) {
const originalContentLength = content.length;
const threshold = this.config.getTruncateToolOutputThreshold();
const lines = this.config.getTruncateToolOutputLines();
const truncatedResult = await truncateAndSaveToFile(
content,
);
const successResponse: ToolCallResponseInfo = {
callId,
responseParts: response,
resultDisplay: toolResult.returnDisplay,
error: undefined,
errorType: undefined,
outputFile,
contentLength,
};
this.setStatusInternal(callId, 'success', successResponse);
} else {
// It is a failure
const error = new Error(toolResult.error.message);
const errorResponse = createErrorResponse(
this.config.storage.getProjectTempDir(),
threshold,
lines,
);
content = truncatedResult.content;
outputFile = truncatedResult.outputFile;
if (outputFile) {
logToolOutputTruncated(
this.config,
new ToolOutputTruncatedEvent(
scheduledCall.request.prompt_id,
{
toolName,
originalContentLength,
truncatedContentLength: content.length,
threshold,
lines,
},
),
);
}
}
const response = convertToFunctionResponse(
toolName,
callId,
content,
);
const successResponse: ToolCallResponseInfo = {
callId,
responseParts: response,
resultDisplay: toolResult.returnDisplay,
error: undefined,
errorType: undefined,
outputFile,
contentLength,
};
this.setStatusInternal(callId, 'success', successResponse);
} else {
// It is a failure
const error = new Error(toolResult.error.message);
const errorResponse = createErrorResponse(
scheduledCall.request,
error,
toolResult.error.type,
);
this.setStatusInternal(callId, 'error', errorResponse);
}
} catch (executionError: unknown) {
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
'User cancelled tool execution.',
);
} else {
this.setStatusInternal(
callId,
'error',
createErrorResponse(
scheduledCall.request,
error,
toolResult.error.type,
);
this.setStatusInternal(callId, 'error', errorResponse);
}
})
.catch((executionError: Error) => {
if (signal.aborted) {
this.setStatusInternal(
callId,
'cancelled',
'User cancelled tool execution.',
);
} else {
this.setStatusInternal(
callId,
'error',
createErrorResponse(
scheduledCall.request,
executionError instanceof Error
? executionError
: new Error(String(executionError)),
ToolErrorType.UNHANDLED_EXCEPTION,
),
);
}
});
});
executionError instanceof Error
? executionError
: new Error(String(executionError)),
ToolErrorType.UNHANDLED_EXCEPTION,
),
);
}
}
}
}
}

View File

@@ -23,8 +23,6 @@ import { setSimulate429 } from '../utils/testUtils.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { AuthType } from './contentGenerator.js';
import { type RetryOptions } from '../utils/retry.js';
import type { ToolRegistry } from '../tools/tool-registry.js';
import { Kind } from '../tools/tools.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
// Mock fs module to prevent actual file system operations during tests
@@ -1305,259 +1303,6 @@ describe('GeminiChat', () => {
expect(turn4.parts[0].text).toBe('second response');
});
describe('stopBeforeSecondMutator', () => {
beforeEach(() => {
// Common setup for these tests: mock the tool registry.
const mockToolRegistry = {
getTool: vi.fn((toolName: string) => {
if (toolName === 'edit') {
return { kind: Kind.Edit };
}
return { kind: Kind.Other };
}),
} as unknown as ToolRegistry;
vi.mocked(mockConfig.getToolRegistry).mockReturnValue(mockToolRegistry);
});
it('should stop streaming before a second mutator tool call', async () => {
const responses = [
{
candidates: [
{ content: { role: 'model', parts: [{ text: 'First part. ' }] } },
],
},
{
candidates: [
{
content: {
role: 'model',
parts: [{ functionCall: { name: 'edit', args: {} } }],
},
},
],
},
{
candidates: [
{
content: {
role: 'model',
parts: [{ functionCall: { name: 'fetch', args: {} } }],
},
},
],
},
// This chunk contains the second mutator and should be clipped.
{
candidates: [
{
content: {
role: 'model',
parts: [
{ functionCall: { name: 'edit', args: {} } },
{ text: 'some trailing text' },
],
},
},
],
},
// This chunk should never be reached.
{
candidates: [
{
content: {
role: 'model',
parts: [{ text: 'This should not appear.' }],
},
},
],
},
] as unknown as GenerateContentResponse[];
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
(async function* () {
for (const response of responses) {
yield response;
}
})(),
);
const stream = await chat.sendMessageStream(
'test-model',
{ message: 'test message' },
'prompt-id-mutator-test',
);
for await (const _ of stream) {
// Consume the stream to trigger history recording.
}
const history = chat.getHistory();
expect(history.length).toBe(2);
const modelTurn = history[1]!;
expect(modelTurn.role).toBe('model');
expect(modelTurn?.parts?.length).toBe(3);
expect(modelTurn?.parts![0]!.text).toBe('First part. ');
expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit');
expect(modelTurn.parts![2]!.functionCall?.name).toBe('fetch');
});
it('should not stop streaming if only one mutator is present', async () => {
const responses = [
{
candidates: [
{ content: { role: 'model', parts: [{ text: 'Part 1. ' }] } },
],
},
{
candidates: [
{
content: {
role: 'model',
parts: [{ functionCall: { name: 'edit', args: {} } }],
},
},
],
},
{
candidates: [
{
content: {
role: 'model',
parts: [{ text: 'Part 2.' }],
},
finishReason: 'STOP',
},
],
},
] as unknown as GenerateContentResponse[];
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
(async function* () {
for (const response of responses) {
yield response;
}
})(),
);
const stream = await chat.sendMessageStream(
'test-model',
{ message: 'test message' },
'prompt-id-one-mutator',
);
for await (const _ of stream) {
/* consume */
}
const history = chat.getHistory();
const modelTurn = history[1]!;
expect(modelTurn?.parts?.length).toBe(3);
expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit');
expect(modelTurn.parts![2]!.text).toBe('Part 2.');
});
it('should clip the chunk containing the second mutator, preserving prior parts', async () => {
const responses = [
{
candidates: [
{
content: {
role: 'model',
parts: [{ functionCall: { name: 'edit', args: {} } }],
},
},
],
},
// This chunk has a valid part before the second mutator.
// The valid part should be kept, the rest of the chunk discarded.
{
candidates: [
{
content: {
role: 'model',
parts: [
{ text: 'Keep this text. ' },
{ functionCall: { name: 'edit', args: {} } },
{ text: 'Discard this text.' },
],
},
finishReason: 'STOP',
},
],
},
] as unknown as GenerateContentResponse[];
const stream = (async function* () {
for (const response of responses) {
yield response;
}
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream,
);
const resultStream = await chat.sendMessageStream(
'test-model',
{ message: 'test' },
'prompt-id-clip-chunk',
);
for await (const _ of resultStream) {
/* consume */
}
const history = chat.getHistory();
const modelTurn = history[1]!;
expect(modelTurn?.parts?.length).toBe(2);
expect(modelTurn.parts![0]!.functionCall?.name).toBe('edit');
expect(modelTurn.parts![1]!.text).toBe('Keep this text. ');
});
it('should handle two mutators in the same chunk (parallel call scenario)', async () => {
const responses = [
{
candidates: [
{
content: {
role: 'model',
parts: [
{ text: 'Some text. ' },
{ functionCall: { name: 'edit', args: {} } },
{ functionCall: { name: 'edit', args: {} } },
],
},
finishReason: 'STOP',
},
],
},
] as unknown as GenerateContentResponse[];
const stream = (async function* () {
for (const response of responses) {
yield response;
}
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream,
);
const resultStream = await chat.sendMessageStream(
'test-model',
{ message: 'test' },
'prompt-id-parallel-mutators',
);
for await (const _ of resultStream) {
/* consume */
}
const history = chat.getHistory();
const modelTurn = history[1]!;
expect(modelTurn?.parts?.length).toBe(2);
expect(modelTurn.parts![0]!.text).toBe('Some text. ');
expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit');
});
});
describe('Model Resolution', () => {
const mockResponse = {
candidates: [

View File

@@ -7,16 +7,15 @@
// DISCLAIMER: This is a copied version of https://github.com/googleapis/js-genai/blob/main/src/chats.ts with the intention of working around a key bug
// where function responses are not treated as "valid" responses: https://b.corp.google.com/issues/420354090
import {
import type {
GenerateContentResponse,
type Content,
type GenerateContentConfig,
type SendMessageParameters,
type Part,
type Tool,
FinishReason,
ApiError,
Content,
GenerateContentConfig,
SendMessageParameters,
Part,
Tool,
} from '@google/genai';
import { ApiError } from '@google/genai';
import { toParts } from '../code_assist/converter.js';
import { createUserContent } from '@google/genai';
import { retryWithBackoff } from '../utils/retry.js';
@@ -25,7 +24,7 @@ import {
DEFAULT_GEMINI_FLASH_MODEL,
getEffectiveModel,
} from '../config/models.js';
import { hasCycleInSchema, MUTATOR_KINDS } from '../tools/tools.js';
import { hasCycleInSchema } from '../tools/tools.js';
import type { StructuredError } from './turn.js';
import {
logContentRetry,
@@ -511,7 +510,7 @@ export class GeminiChat {
let hasToolCall = false;
let hasFinishReason = false;
for await (const chunk of this.stopBeforeSecondMutator(streamResponse)) {
for await (const chunk of streamResponse) {
hasFinishReason =
chunk?.candidates?.some((candidate) => candidate.finishReason) ?? false;
if (isValidResponse(chunk)) {
@@ -629,64 +628,6 @@ export class GeminiChat {
});
}
}
/**
* Truncates the chunkStream right before the second function call to a
* function that mutates state. This may involve trimming parts from a chunk
* as well as omtting some chunks altogether.
*
* We do this because it improves tool call quality if the model gets
* feedback from one mutating function call before it makes the next one.
*/
private async *stopBeforeSecondMutator(
chunkStream: AsyncGenerator<GenerateContentResponse>,
): AsyncGenerator<GenerateContentResponse> {
let foundMutatorFunctionCall = false;
for await (const chunk of chunkStream) {
const candidate = chunk.candidates?.[0];
const content = candidate?.content;
if (!candidate || !content?.parts) {
yield chunk;
continue;
}
const truncatedParts: Part[] = [];
for (const part of content.parts) {
if (this.isMutatorFunctionCall(part)) {
if (foundMutatorFunctionCall) {
// This is the second mutator call.
// Truncate and return immedaitely.
const newChunk = new GenerateContentResponse();
newChunk.candidates = [
{
...candidate,
content: {
...content,
parts: truncatedParts,
},
finishReason: FinishReason.STOP,
},
];
yield newChunk;
return;
}
foundMutatorFunctionCall = true;
}
truncatedParts.push(part);
}
yield chunk;
}
}
private isMutatorFunctionCall(part: Part): boolean {
if (!part?.functionCall?.name) {
return false;
}
const tool = this.config.getToolRegistry().getTool(part.functionCall.name);
return !!tool && MUTATOR_KINDS.includes(tool.kind);
}
}
/** Visible for Testing */

View File

@@ -181,6 +181,56 @@ describe('ChatCompressionService', () => {
expect(result.newHistory).toBeNull();
});
it('should return NOOP when contextPercentageThreshold is 0', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800);
vi.mocked(mockConfig.getChatCompression).mockReturnValue({
contextPercentageThreshold: 0,
});
const mockGenerateContent = vi.fn();
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
false,
mockModel,
mockConfig,
false,
);
expect(result.info).toMatchObject({
compressionStatus: CompressionStatus.NOOP,
originalTokenCount: 0,
newTokenCount: 0,
});
expect(mockGenerateContent).not.toHaveBeenCalled();
expect(tokenLimit).not.toHaveBeenCalled();
const forcedResult = await service.compress(
mockChat,
mockPromptId,
true,
mockModel,
mockConfig,
false,
);
expect(forcedResult.info).toMatchObject({
compressionStatus: CompressionStatus.NOOP,
originalTokenCount: 0,
newTokenCount: 0,
});
expect(mockGenerateContent).not.toHaveBeenCalled();
expect(tokenLimit).not.toHaveBeenCalled();
});
it('should compress if over token threshold', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },

View File

@@ -86,10 +86,14 @@ export class ChatCompressionService {
hasFailedCompressionAttempt: boolean,
): Promise<{ newHistory: Content[] | null; info: ChatCompressionInfo }> {
const curatedHistory = chat.getHistory(true);
const threshold =
config.getChatCompression()?.contextPercentageThreshold ??
COMPRESSION_TOKEN_THRESHOLD;
// Regardless of `force`, don't do anything if the history is empty.
if (
curatedHistory.length === 0 ||
threshold <= 0 ||
(hasFailedCompressionAttempt && !force)
) {
return {
@@ -104,13 +108,8 @@ export class ChatCompressionService {
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
const contextPercentageThreshold =
config.getChatCompression()?.contextPercentageThreshold;
// Don't compress if not forced and we are under the limit.
if (!force) {
const threshold =
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
if (originalTokenCount < threshold * tokenLimit(model)) {
return {
newHistory: null,

View File

@@ -37,6 +37,7 @@ describe('GlobTool', () => {
getFileExclusions: () => ({
getGlobExcludes: () => [],
}),
getTruncateToolOutputLines: () => 1000,
} as unknown as Config;
beforeEach(async () => {

View File

@@ -161,11 +161,15 @@ class GlobToolInvocation extends BaseToolInvocation<
);
const totalFileCount = sortedEntries.length;
const truncated = totalFileCount > MAX_FILE_COUNT;
const fileLimit = Math.min(
MAX_FILE_COUNT,
this.config.getTruncateToolOutputLines(),
);
const truncated = totalFileCount > fileLimit;
// Limit to MAX_FILE_COUNT if needed
// Limit to fileLimit if needed
const entriesToShow = truncated
? sortedEntries.slice(0, MAX_FILE_COUNT)
? sortedEntries.slice(0, fileLimit)
: sortedEntries;
const sortedAbsolutePaths = entriesToShow.map((entry) =>
@@ -178,7 +182,7 @@ class GlobToolInvocation extends BaseToolInvocation<
// Add truncation notice if needed
if (truncated) {
const omittedFiles = totalFileCount - MAX_FILE_COUNT;
const omittedFiles = totalFileCount - fileLimit;
const fileTerm = omittedFiles === 1 ? 'file' : 'files';
resultMessage += `\n---\n[${omittedFiles} ${fileTerm} truncated] ...`;
}

View File

@@ -43,6 +43,8 @@ describe('GrepTool', () => {
getFileExclusions: () => ({
getGlobExcludes: () => [],
}),
getTruncateToolOutputThreshold: () => 25000,
getTruncateToolOutputLines: () => 1000,
} as unknown as Config;
beforeEach(async () => {
@@ -282,6 +284,8 @@ describe('GrepTool', () => {
getFileExclusions: () => ({
getGlobExcludes: () => [],
}),
getTruncateToolOutputThreshold: () => 25000,
getTruncateToolOutputLines: () => 1000,
} as unknown as Config;
const multiDirGrepTool = new GrepTool(multiDirConfig);

View File

@@ -19,8 +19,6 @@ import type { Config } from '../config/config.js';
import type { FileExclusions } from '../utils/ignorePatterns.js';
import { ToolErrorType } from './tool-error.js';
const MAX_LLM_CONTENT_LENGTH = 20_000;
// --- Interfaces ---
/**
@@ -103,14 +101,17 @@ class GrepToolInvocation extends BaseToolInvocation<
return { llmContent: noMatchMsg, returnDisplay: `No matches found` };
}
const charLimit = this.config.getTruncateToolOutputThreshold();
const lineLimit = Math.min(
this.config.getTruncateToolOutputLines(),
this.params.limit ?? Number.POSITIVE_INFINITY,
);
// Apply line limit if specified
let truncatedByLineLimit = false;
let matchesToInclude = rawMatches;
if (
this.params.limit !== undefined &&
rawMatches.length > this.params.limit
) {
matchesToInclude = rawMatches.slice(0, this.params.limit);
if (rawMatches.length > lineLimit) {
matchesToInclude = rawMatches.slice(0, lineLimit);
truncatedByLineLimit = true;
}
@@ -147,8 +148,8 @@ class GrepToolInvocation extends BaseToolInvocation<
// Apply character limit as safety net
let truncatedByCharLimit = false;
if (grepOutput.length > MAX_LLM_CONTENT_LENGTH) {
grepOutput = grepOutput.slice(0, MAX_LLM_CONTENT_LENGTH) + '...';
if (Number.isFinite(charLimit) && grepOutput.length > charLimit) {
grepOutput = grepOutput.slice(0, charLimit) + '...';
truncatedByCharLimit = true;
}

View File

@@ -41,6 +41,8 @@ describe('ReadFileTool', () => {
storage: {
getProjectTempDir: () => path.join(tempRootDir, '.temp'),
},
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
} as unknown as Config;
tool = new ReadFileTool(mockConfigInstance);
});
@@ -281,11 +283,9 @@ describe('ReadFileTool', () => {
>;
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'IMPORTANT: The file content has been truncated',
expect(result.returnDisplay).toContain(
'Read lines 1-2 of 3 from longlines.txt (truncated)',
);
expect(result.llmContent).toContain('--- FILE CONTENT (truncated) ---');
expect(result.returnDisplay).toContain('some lines were shortened');
});
it('should handle image file and return appropriate content', async () => {
@@ -417,10 +417,7 @@ describe('ReadFileTool', () => {
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'IMPORTANT: The file content has been truncated',
);
expect(result.llmContent).toContain(
'Status: Showing lines 6-8 of 20 total lines',
'Showing lines 6-8 of 20 total lines',
);
expect(result.llmContent).toContain('Line 6');
expect(result.llmContent).toContain('Line 7');

View File

@@ -67,8 +67,7 @@ class ReadFileToolInvocation extends BaseToolInvocation<
async execute(): Promise<ToolResult> {
const result = await processSingleFileContent(
this.params.absolute_path,
this.config.getTargetDir(),
this.config.getFileSystemService(),
this.config,
this.params.offset,
this.params.limit,
);
@@ -88,16 +87,7 @@ class ReadFileToolInvocation extends BaseToolInvocation<
if (result.isTruncated) {
const [start, end] = result.linesShown!;
const total = result.originalLineCount!;
const nextOffset = this.params.offset
? this.params.offset + end - start + 1
: end;
llmContent = `
IMPORTANT: The file content has been truncated.
Status: Showing lines ${start}-${end} of ${total} total lines.
Action: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}.
--- FILE CONTENT (truncated) ---
${result.llmContent}`;
llmContent = `Showing lines ${start}-${end} of ${total} total lines.\n\n---\n\n${result.llmContent}`;
} else {
llmContent = result.llmContent || '';
}

View File

@@ -88,6 +88,8 @@ describe('ReadManyFilesTool', () => {
buildExcludePatterns: () => DEFAULT_FILE_EXCLUDES,
getReadManyFilesExcludes: () => DEFAULT_FILE_EXCLUDES,
}),
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
} as Partial<Config> as Config;
tool = new ReadManyFilesTool(mockConfig);
@@ -500,6 +502,8 @@ describe('ReadManyFilesTool', () => {
buildExcludePatterns: () => [],
getReadManyFilesExcludes: () => [],
}),
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
} as Partial<Config> as Config;
tool = new ReadManyFilesTool(mockConfig);
@@ -552,15 +556,10 @@ describe('ReadManyFilesTool', () => {
c.includes('large-file.txt'),
);
expect(normalFileContent).not.toContain(
'[WARNING: This file was truncated.',
);
expect(normalFileContent).not.toContain('Showing lines');
expect(truncatedFileContent).toContain(
"[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]",
'Showing lines 1-250 of 2500 total lines.',
);
// Check that the actual content is still there but truncated
expect(truncatedFileContent).toContain('L200');
expect(truncatedFileContent).not.toContain('L2400');
});
it('should read files with special characters like [] and () in the path', async () => {

View File

@@ -17,7 +17,6 @@ import {
processSingleFileContent,
DEFAULT_ENCODING,
getSpecificMimeType,
DEFAULT_MAX_LINES_TEXT_FILE,
} from '../utils/fileUtils.js';
import type { PartListUnion } from '@google/genai';
import {
@@ -278,8 +277,10 @@ ${finalExclusionPatternsForDescription
}
const sortedFiles = Array.from(filesToConsider).sort();
const file_line_limit =
DEFAULT_MAX_LINES_TEXT_FILE / Math.max(1, sortedFiles.length);
const truncateToolOutputLines = this.config.getTruncateToolOutputLines();
const file_line_limit = Number.isFinite(truncateToolOutputLines)
? Math.floor(truncateToolOutputLines / Math.max(1, sortedFiles.length))
: undefined;
const fileProcessingPromises = sortedFiles.map(
async (filePath): Promise<FileProcessingResult> => {
@@ -316,8 +317,7 @@ ${finalExclusionPatternsForDescription
// Use processSingleFileContent for all file types now
const fileReadResult = await processSingleFileContent(
filePath,
this.config.getTargetDir(),
this.config.getFileSystemService(),
this.config,
0,
file_line_limit,
);
@@ -376,9 +376,12 @@ ${finalExclusionPatternsForDescription
);
let fileContentForLlm = '';
if (fileReadResult.isTruncated) {
fileContentForLlm += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`;
const [start, end] = fileReadResult.linesShown!;
const total = fileReadResult.originalLineCount!;
fileContentForLlm = `Showing lines ${start}-${end} of ${total} total lines.\n---\n${fileReadResult.llmContent}`;
} else {
fileContentForLlm = fileReadResult.llmContent;
}
fileContentForLlm += fileReadResult.llmContent;
contentParts.push(`${separator}\n\n${fileContentForLlm}\n\n`);
} else {
// This is a Part for image/pdf, which we don't add the separator to.

View File

@@ -103,6 +103,8 @@ describe('RipGrepTool', () => {
getWorkingDir: () => tempRootDir,
getDebugMode: () => false,
getUseBuiltinRipgrep: () => true,
getTruncateToolOutputThreshold: () => 25000,
getTruncateToolOutputLines: () => 1000,
} as unknown as Config;
beforeEach(async () => {
@@ -417,7 +419,7 @@ describe('RipGrepTool', () => {
});
it('should truncate llm content when exceeding maximum length', async () => {
const longMatch = 'fileA.txt:1:' + 'a'.repeat(25_000);
const longMatch = 'fileA.txt:1:' + 'a'.repeat(30_000);
mockSpawn.mockImplementationOnce(
createMockSpawn({
@@ -430,7 +432,7 @@ describe('RipGrepTool', () => {
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(String(result.llmContent).length).toBeLessThanOrEqual(21_000);
expect(String(result.llmContent).length).toBeLessThanOrEqual(26_000);
expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./);
expect(result.returnDisplay).toContain('truncated');
});

View File

@@ -19,8 +19,6 @@ import { SchemaValidator } from '../utils/schemaValidator.js';
import type { FileFilteringOptions } from '../config/constants.js';
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
const MAX_LLM_CONTENT_LENGTH = 20_000;
/**
* Parameters for the GrepTool (Simplified)
*/
@@ -97,43 +95,49 @@ class GrepToolInvocation extends BaseToolInvocation<
// Build header early to calculate available space
const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`;
const charLimit = this.config.getTruncateToolOutputThreshold();
const lineLimit = Math.min(
this.config.getTruncateToolOutputLines(),
this.params.limit ?? Number.POSITIVE_INFINITY,
);
// Apply line limit first (if specified)
let truncatedByLineLimit = false;
let linesToInclude = allLines;
if (
this.params.limit !== undefined &&
allLines.length > this.params.limit
) {
linesToInclude = allLines.slice(0, this.params.limit);
if (allLines.length > lineLimit) {
linesToInclude = allLines.slice(0, lineLimit);
truncatedByLineLimit = true;
}
// Build output and track how many lines we include, respecting character limit
const parts: string[] = [];
let includedLines = 0;
let grepOutput = '';
let truncatedByCharLimit = false;
let currentLength = 0;
let includedLines = 0;
if (Number.isFinite(charLimit)) {
const parts: string[] = [];
let currentLength = 0;
for (const line of linesToInclude) {
const sep = includedLines > 0 ? 1 : 0;
for (const line of linesToInclude) {
const sep = includedLines > 0 ? 1 : 0;
includedLines++;
includedLines++;
if (currentLength + line.length <= MAX_LLM_CONTENT_LENGTH) {
parts.push(line);
currentLength = currentLength + line.length + sep;
} else {
const remaining = Math.max(
MAX_LLM_CONTENT_LENGTH - currentLength - sep,
10,
);
parts.push(line.slice(0, remaining) + '...');
truncatedByCharLimit = true;
break;
const projectedLength = currentLength + line.length + sep;
if (projectedLength <= charLimit) {
parts.push(line);
currentLength = projectedLength;
} else {
const remaining = Math.max(charLimit - currentLength - sep, 10);
parts.push(line.slice(0, remaining) + '...');
truncatedByCharLimit = true;
break;
}
}
}
const grepOutput = parts.join('\n');
grepOutput = parts.join('\n');
} else {
grepOutput = linesToInclude.join('\n');
includedLines = linesToInclude.length;
}
// Build result
let llmContent = header + grepOutput;

View File

@@ -21,4 +21,6 @@ export const ToolNames = {
MEMORY: 'save_memory',
TASK: 'task',
EXIT_PLAN_MODE: 'exit_plan_mode',
WEB_FETCH: 'web_fetch',
WEB_SEARCH: 'web_search',
} as const;

View File

@@ -23,6 +23,7 @@ import {
ToolConfirmationOutcome,
} from './tools.js';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import { ToolNames } from './tool-names.js';
const URL_FETCH_TIMEOUT_MS = 10000;
const MAX_CONTENT_LENGTH = 100000;
@@ -190,7 +191,7 @@ export class WebFetchTool extends BaseDeclarativeTool<
WebFetchToolParams,
ToolResult
> {
static readonly Name: string = 'web_fetch';
static readonly Name: string = ToolNames.WEB_FETCH;
constructor(private readonly config: Config) {
super(

View File

@@ -30,6 +30,7 @@ import type {
WebSearchProviderConfig,
DashScopeProviderConfig,
} from './types.js';
import { ToolNames } from '../tool-names.js';
class WebSearchToolInvocation extends BaseToolInvocation<
WebSearchToolParams,
@@ -274,7 +275,7 @@ export class WebSearchTool extends BaseDeclarativeTool<
WebSearchToolParams,
WebSearchToolResult
> {
static readonly Name: string = 'web_search';
static readonly Name: string = ToolNames.WEB_SEARCH;
constructor(private readonly config: Config) {
super(

View File

@@ -13,9 +13,11 @@ import {
afterEach,
type Mock,
} from 'vitest';
import type { Content } from '@google/genai';
import {
getEnvironmentContext,
getDirectoryContextString,
getInitialChatHistory,
} from './environmentContext.js';
import type { Config } from '../config/config.js';
import { getFolderStructure } from './getFolderStructure.js';
@@ -213,3 +215,102 @@ describe('getEnvironmentContext', () => {
expect(parts[1].text).toBe('\n--- Error reading full file context ---');
});
});
describe('getInitialChatHistory', () => {
let mockConfig: Partial<Config>;
beforeEach(() => {
vi.mocked(getFolderStructure).mockResolvedValue('Mock Folder Structure');
mockConfig = {
getSkipStartupContext: vi.fn().mockReturnValue(false),
getWorkspaceContext: vi.fn().mockReturnValue({
getDirectories: vi.fn().mockReturnValue(['/test/dir']),
}),
getFileService: vi.fn(),
getFullContext: vi.fn().mockReturnValue(false),
getToolRegistry: vi.fn().mockReturnValue({ getTool: vi.fn() }),
};
});
afterEach(() => {
vi.clearAllMocks();
vi.restoreAllMocks();
});
it('includes startup context when skipStartupContext is false', async () => {
const history = await getInitialChatHistory(mockConfig as Config);
expect(mockConfig.getSkipStartupContext).toHaveBeenCalled();
expect(history).toHaveLength(2);
expect(history).toEqual([
expect.objectContaining({
role: 'user',
parts: [
expect.objectContaining({
text: expect.stringContaining(
"I'm currently working in the directory",
),
}),
],
}),
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
]);
});
it('returns only extra history when skipStartupContext is true', async () => {
mockConfig.getSkipStartupContext = vi.fn().mockReturnValue(true);
mockConfig.getWorkspaceContext = vi.fn(() => {
throw new Error(
'getWorkspaceContext should not be called when skipping startup context',
);
});
mockConfig.getFullContext = vi.fn(() => {
throw new Error(
'getFullContext should not be called when skipping startup context',
);
});
mockConfig.getToolRegistry = vi.fn(() => {
throw new Error(
'getToolRegistry should not be called when skipping startup context',
);
});
const extraHistory: Content[] = [
{ role: 'user', parts: [{ text: 'custom context' }] },
];
const history = await getInitialChatHistory(
mockConfig as Config,
extraHistory,
);
expect(mockConfig.getSkipStartupContext).toHaveBeenCalled();
expect(history).toEqual(extraHistory);
expect(history).not.toBe(extraHistory);
});
it('returns empty history when skipping startup context without extras', async () => {
mockConfig.getSkipStartupContext = vi.fn().mockReturnValue(true);
mockConfig.getWorkspaceContext = vi.fn(() => {
throw new Error(
'getWorkspaceContext should not be called when skipping startup context',
);
});
mockConfig.getFullContext = vi.fn(() => {
throw new Error(
'getFullContext should not be called when skipping startup context',
);
});
mockConfig.getToolRegistry = vi.fn(() => {
throw new Error(
'getToolRegistry should not be called when skipping startup context',
);
});
const history = await getInitialChatHistory(mockConfig as Config);
expect(history).toEqual([]);
});
});

View File

@@ -112,6 +112,10 @@ export async function getInitialChatHistory(
config: Config,
extraHistory?: Content[],
): Promise<Content[]> {
if (config.getSkipStartupContext()) {
return extraHistory ? [...extraHistory] : [];
}
const envParts = await getEnvironmentContext(config);
const envContextString = envParts.map((part) => part.text || '').join('\n\n');

View File

@@ -30,7 +30,7 @@ import {
readFileWithEncoding,
fileExists,
} from './fileUtils.js';
import { StandardFileSystemService } from '../services/fileSystemService.js';
import type { Config } from '../config/config.js';
vi.mock('mime/lite', () => ({
default: { getType: vi.fn() },
@@ -50,6 +50,12 @@ describe('fileUtils', () => {
let nonexistentFilePath: string;
let directoryPath: string;
const mockConfig = {
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
getTargetDir: () => tempRootDir,
} as unknown as Config;
beforeEach(() => {
vi.resetAllMocks(); // Reset all mocks, including mime.getType
@@ -664,8 +670,7 @@ describe('fileUtils', () => {
actualNodeFs.writeFileSync(testTextFilePath, content);
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.llmContent).toBe(content);
expect(result.returnDisplay).toBe('');
@@ -675,8 +680,7 @@ describe('fileUtils', () => {
it('should handle file not found', async () => {
const result = await processSingleFileContent(
nonexistentFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.error).toContain('File not found');
expect(result.returnDisplay).toContain('File not found');
@@ -689,8 +693,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.error).toContain('Simulated read error');
expect(result.returnDisplay).toContain('Simulated read error');
@@ -704,8 +707,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testImageFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.error).toContain('Simulated image read error');
expect(result.returnDisplay).toContain('Simulated image read error');
@@ -717,8 +719,7 @@ describe('fileUtils', () => {
mockMimeGetType.mockReturnValue('image/png');
const result = await processSingleFileContent(
testImageFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(
(result.llmContent as { inlineData: unknown }).inlineData,
@@ -739,8 +740,7 @@ describe('fileUtils', () => {
mockMimeGetType.mockReturnValue('application/pdf');
const result = await processSingleFileContent(
testPdfFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(
(result.llmContent as { inlineData: unknown }).inlineData,
@@ -768,8 +768,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testSvgFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.llmContent).toBe(svgContent);
@@ -786,8 +785,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testBinaryFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.llmContent).toContain(
'Cannot display content of binary file',
@@ -796,11 +794,7 @@ describe('fileUtils', () => {
});
it('should handle path being a directory', async () => {
const result = await processSingleFileContent(
directoryPath,
tempRootDir,
new StandardFileSystemService(),
);
const result = await processSingleFileContent(directoryPath, mockConfig);
expect(result.error).toContain('Path is a directory');
expect(result.returnDisplay).toContain('Path is a directory');
});
@@ -811,8 +805,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
5,
5,
); // Read lines 6-10
@@ -832,8 +825,7 @@ describe('fileUtils', () => {
// Read from line 11 to 20. The start is not 0, so it's truncated.
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
10,
10,
);
@@ -852,8 +844,7 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
0,
10,
);
@@ -875,17 +866,16 @@ describe('fileUtils', () => {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.llmContent).toContain('Short line');
expect(result.llmContent).toContain(
longLine.substring(0, 2000) + '... [truncated]',
);
expect(result.llmContent).toContain('Another short line');
expect(result.llmContent).not.toContain('Another short line');
expect(result.returnDisplay).toBe(
'Read all 3 lines from test.txt (some lines were shortened)',
'Read lines 1-2 of 3 from test.txt (truncated)',
);
expect(result.isTruncated).toBe(true);
});
@@ -897,8 +887,7 @@ describe('fileUtils', () => {
// Read 5 lines, but there are 11 total
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
0,
5,
);
@@ -916,15 +905,14 @@ describe('fileUtils', () => {
// Read all 11 lines, including the long one
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
0,
11,
);
expect(result.isTruncated).toBe(true);
expect(result.returnDisplay).toBe(
'Read all 11 lines from test.txt (some lines were shortened)',
'Read lines 1-11 of 11 from test.txt (truncated)',
);
});
@@ -942,14 +930,13 @@ describe('fileUtils', () => {
// Read 10 lines out of 20, including the long line
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
0,
10,
);
expect(result.isTruncated).toBe(true);
expect(result.returnDisplay).toBe(
'Read lines 1-10 of 20 from test.txt (some lines were shortened)',
'Read lines 1-5 of 20 from test.txt (truncated)',
);
});
@@ -966,8 +953,7 @@ describe('fileUtils', () => {
try {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
mockConfig,
);
expect(result.error).toContain('File size exceeds the 20MB limit');

View File

@@ -9,13 +9,9 @@ import fsPromises from 'node:fs/promises';
import path from 'node:path';
import type { PartUnion } from '@google/genai';
import mime from 'mime/lite';
import type { FileSystemService } from '../services/fileSystemService.js';
import { ToolErrorType } from '../tools/tool-error.js';
import { BINARY_EXTENSIONS } from './ignorePatterns.js';
// Constants for text file processing
export const DEFAULT_MAX_LINES_TEXT_FILE = 2000;
const MAX_LINE_LENGTH_TEXT_FILE = 2000;
import type { Config } from '../config/config.js';
// Default values for encoding and separator format
export const DEFAULT_ENCODING: BufferEncoding = 'utf-8';
@@ -306,18 +302,18 @@ export interface ProcessedFileReadResult {
/**
* Reads and processes a single file, handling text, images, and PDFs.
* @param filePath Absolute path to the file.
* @param rootDirectory Absolute path to the project root for relative path display.
* @param config Config instance for truncation settings.
* @param offset Optional offset for text files (0-based line number).
* @param limit Optional limit for text files (number of lines to read).
* @returns ProcessedFileReadResult object.
*/
export async function processSingleFileContent(
filePath: string,
rootDirectory: string,
fileSystemService: FileSystemService,
config: Config,
offset?: number,
limit?: number,
): Promise<ProcessedFileReadResult> {
const rootDirectory = config.getTargetDir();
try {
if (!fs.existsSync(filePath)) {
// Sync check is acceptable before async read
@@ -379,45 +375,76 @@ export async function processSingleFileContent(
case 'text': {
// Use BOM-aware reader to avoid leaving a BOM character in content and to support UTF-16/32 transparently
const content = await readFileWithEncoding(filePath);
const lines = content.split('\n');
const lines = content.split('\n').map((line) => line.trimEnd());
const originalLineCount = lines.length;
const startLine = offset || 0;
const effectiveLimit =
limit === undefined ? DEFAULT_MAX_LINES_TEXT_FILE : limit;
const configLineLimit = config.getTruncateToolOutputLines();
const configCharLimit = config.getTruncateToolOutputThreshold();
const effectiveLimit = limit === undefined ? configLineLimit : limit;
// Ensure endLine does not exceed originalLineCount
const endLine = Math.min(startLine + effectiveLimit, originalLineCount);
// Ensure selectedLines doesn't try to slice beyond array bounds if startLine is too high
const actualStartLine = Math.min(startLine, originalLineCount);
const selectedLines = lines.slice(actualStartLine, endLine);
let linesWereTruncatedInLength = false;
const formattedLines = selectedLines.map((line) => {
if (line.length > MAX_LINE_LENGTH_TEXT_FILE) {
linesWereTruncatedInLength = true;
return (
line.substring(0, MAX_LINE_LENGTH_TEXT_FILE) + '... [truncated]'
);
// Apply character limit truncation
let llmContent = '';
let contentLengthTruncated = false;
let linesIncluded = 0;
if (Number.isFinite(configCharLimit)) {
const formattedLines: string[] = [];
let currentLength = 0;
for (const line of selectedLines) {
const sep = linesIncluded > 0 ? 1 : 0; // newline separator
linesIncluded++;
const projectedLength = currentLength + line.length + sep;
if (projectedLength <= configCharLimit) {
formattedLines.push(line);
currentLength = projectedLength;
} else {
// Truncate the current line to fit
const remaining = Math.max(
configCharLimit - currentLength - sep,
10,
);
formattedLines.push(
line.substring(0, remaining) + '... [truncated]',
);
contentLengthTruncated = true;
break;
}
}
return line;
});
llmContent = formattedLines.join('\n');
} else {
// No character limit, use all selected lines
llmContent = selectedLines.join('\n');
linesIncluded = selectedLines.length;
}
// Calculate actual end line shown
const actualEndLine = contentLengthTruncated
? actualStartLine + linesIncluded
: endLine;
const contentRangeTruncated =
startLine > 0 || endLine < originalLineCount;
const isTruncated = contentRangeTruncated || linesWereTruncatedInLength;
const llmContent = formattedLines.join('\n');
startLine > 0 || actualEndLine < originalLineCount;
const isTruncated = contentRangeTruncated || contentLengthTruncated;
// By default, return nothing to streamline the common case of a successful read_file.
let returnDisplay = '';
if (contentRangeTruncated) {
if (isTruncated) {
returnDisplay = `Read lines ${
actualStartLine + 1
}-${endLine} of ${originalLineCount} from ${relativePathForDisplay}`;
if (linesWereTruncatedInLength) {
returnDisplay += ' (some lines were shortened)';
}-${actualEndLine} of ${originalLineCount} from ${relativePathForDisplay}`;
if (contentLengthTruncated) {
returnDisplay += ' (truncated)';
}
} else if (linesWereTruncatedInLength) {
returnDisplay = `Read all ${originalLineCount} lines from ${relativePathForDisplay} (some lines were shortened)`;
}
return {
@@ -425,7 +452,7 @@ export async function processSingleFileContent(
returnDisplay,
isTruncated,
originalLineCount,
linesShown: [actualStartLine + 1, endLine],
linesShown: [actualStartLine + 1, actualEndLine],
};
}
case 'image':

View File

@@ -29,6 +29,8 @@ const createMockConfig = (
getTargetDir: () => cwd,
getFileSystemService: () => fileSystemService,
getFileService: () => mockFileService,
getTruncateToolOutputThreshold: () => 2500,
getTruncateToolOutputLines: () => 500,
} as unknown as Config;
};

View File

@@ -83,11 +83,7 @@ export async function readPathFromWorkspace(
for (const filePath of finalFiles) {
const relativePathForDisplay = path.relative(absolutePath, filePath);
allParts.push({ text: `--- ${relativePathForDisplay} ---\n` });
const result = await processSingleFileContent(
filePath,
config.getTargetDir(),
config.getFileSystemService(),
);
const result = await processSingleFileContent(filePath, config);
allParts.push(result.llmContent);
allParts.push({ text: '\n' }); // Add a newline for separation
}
@@ -108,11 +104,7 @@ export async function readPathFromWorkspace(
}
// It's a single file, process it directly.
const result = await processSingleFileContent(
absolutePath,
config.getTargetDir(),
config.getFileSystemService(),
);
const result = await processSingleFileContent(absolutePath, config);
return [result.llmContent];
}
}