mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-20 16:57:46 +00:00
Merge branch 'main' of github.com:QwenLM/qwen-code into feature/stream-json-migration
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.2.1",
|
||||
"version": "0.2.2",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -45,6 +45,15 @@ import { logRipgrepFallback } from '../telemetry/loggers.js';
|
||||
import { RipgrepFallbackEvent } from '../telemetry/types.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
|
||||
function createToolMock(toolName: string) {
|
||||
const ToolMock = vi.fn();
|
||||
Object.defineProperty(ToolMock, 'Name', {
|
||||
value: toolName,
|
||||
writable: true,
|
||||
});
|
||||
return ToolMock;
|
||||
}
|
||||
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
return {
|
||||
@@ -73,23 +82,41 @@ vi.mock('../utils/memoryDiscovery.js', () => ({
|
||||
}));
|
||||
|
||||
// Mock individual tools if their constructors are complex or have side effects
|
||||
vi.mock('../tools/ls');
|
||||
vi.mock('../tools/read-file');
|
||||
vi.mock('../tools/grep.js');
|
||||
vi.mock('../tools/ls', () => ({
|
||||
LSTool: createToolMock('list_directory'),
|
||||
}));
|
||||
vi.mock('../tools/read-file', () => ({
|
||||
ReadFileTool: createToolMock('read_file'),
|
||||
}));
|
||||
vi.mock('../tools/grep.js', () => ({
|
||||
GrepTool: createToolMock('grep_search'),
|
||||
}));
|
||||
vi.mock('../tools/ripGrep.js', () => ({
|
||||
RipGrepTool: class MockRipGrepTool {},
|
||||
RipGrepTool: createToolMock('grep_search'),
|
||||
}));
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
canUseRipgrep: vi.fn(),
|
||||
}));
|
||||
vi.mock('../tools/glob');
|
||||
vi.mock('../tools/edit');
|
||||
vi.mock('../tools/shell');
|
||||
vi.mock('../tools/write-file');
|
||||
vi.mock('../tools/web-fetch');
|
||||
vi.mock('../tools/read-many-files');
|
||||
vi.mock('../tools/glob', () => ({
|
||||
GlobTool: createToolMock('glob'),
|
||||
}));
|
||||
vi.mock('../tools/edit', () => ({
|
||||
EditTool: createToolMock('edit'),
|
||||
}));
|
||||
vi.mock('../tools/shell', () => ({
|
||||
ShellTool: createToolMock('run_shell_command'),
|
||||
}));
|
||||
vi.mock('../tools/write-file', () => ({
|
||||
WriteFileTool: createToolMock('write_file'),
|
||||
}));
|
||||
vi.mock('../tools/web-fetch', () => ({
|
||||
WebFetchTool: createToolMock('web_fetch'),
|
||||
}));
|
||||
vi.mock('../tools/read-many-files', () => ({
|
||||
ReadManyFilesTool: createToolMock('read_many_files'),
|
||||
}));
|
||||
vi.mock('../tools/memoryTool', () => ({
|
||||
MemoryTool: vi.fn(),
|
||||
MemoryTool: createToolMock('save_memory'),
|
||||
setGeminiMdFilename: vi.fn(),
|
||||
getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename
|
||||
DEFAULT_CONTEXT_FILENAME: 'QWEN.md',
|
||||
@@ -621,7 +648,7 @@ describe('Server Config (config.ts)', () => {
|
||||
it('should register a tool if coreTools contains an argument-specific pattern', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['ShellTool(git status)'],
|
||||
coreTools: ['Shell(git status)'], // Use display name instead of class name
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
@@ -646,6 +673,89 @@ describe('Server Config (config.ts)', () => {
|
||||
expect(wasReadFileToolRegistered).toBe(false);
|
||||
});
|
||||
|
||||
it('should register a tool if coreTools contains the displayName', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['Shell'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasShellToolRegistered = (registerToolMock as Mock).mock.calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(ShellTool),
|
||||
);
|
||||
expect(wasShellToolRegistered).toBe(true);
|
||||
});
|
||||
|
||||
it('should register a tool if coreTools contains the displayName with argument-specific pattern', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['Shell(git status)'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasShellToolRegistered = (registerToolMock as Mock).mock.calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(ShellTool),
|
||||
);
|
||||
expect(wasShellToolRegistered).toBe(true);
|
||||
});
|
||||
|
||||
it('should register a tool if coreTools contains a legacy tool name alias', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
useRipgrep: false,
|
||||
coreTools: ['search_file_content'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasGrepToolRegistered = (registerToolMock as Mock).mock.calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
expect(wasGrepToolRegistered).toBe(true);
|
||||
});
|
||||
|
||||
it('should not register a tool if excludeTools contains a legacy display name alias', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
useRipgrep: false,
|
||||
coreTools: undefined,
|
||||
excludeTools: ['SearchFiles'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasGrepToolRegistered = (registerToolMock as Mock).mock.calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
expect(wasGrepToolRegistered).toBe(false);
|
||||
});
|
||||
|
||||
describe('with minified tool class names', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(
|
||||
@@ -671,7 +781,27 @@ describe('Server Config (config.ts)', () => {
|
||||
it('should register a tool if coreTools contains the non-minified class name', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['ShellTool'],
|
||||
coreTools: ['Shell'], // Use display name instead of class name
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasShellToolRegistered = (
|
||||
registerToolMock as Mock
|
||||
).mock.calls.some((call) => call[0] instanceof vi.mocked(ShellTool));
|
||||
expect(wasShellToolRegistered).toBe(true);
|
||||
});
|
||||
|
||||
it('should register a tool if coreTools contains the displayName', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['Shell'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
@@ -692,7 +822,28 @@ describe('Server Config (config.ts)', () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: undefined, // all tools enabled by default
|
||||
excludeTools: ['ShellTool'],
|
||||
excludeTools: ['Shell'], // Use display name instead of class name
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasShellToolRegistered = (
|
||||
registerToolMock as Mock
|
||||
).mock.calls.some((call) => call[0] instanceof vi.mocked(ShellTool));
|
||||
expect(wasShellToolRegistered).toBe(false);
|
||||
});
|
||||
|
||||
it('should not register a tool if excludeTools contains the displayName', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: undefined, // all tools enabled by default
|
||||
excludeTools: ['Shell'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
@@ -712,7 +863,27 @@ describe('Server Config (config.ts)', () => {
|
||||
it('should register a tool if coreTools contains an argument-specific pattern with the non-minified class name', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['ShellTool(git status)'],
|
||||
coreTools: ['Shell(git status)'], // Use display name instead of class name
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
const registerToolMock = (
|
||||
(await vi.importMock('../tools/tool-registry')) as {
|
||||
ToolRegistry: { prototype: { registerTool: Mock } };
|
||||
}
|
||||
).ToolRegistry.prototype.registerTool;
|
||||
|
||||
const wasShellToolRegistered = (
|
||||
registerToolMock as Mock
|
||||
).mock.calls.some((call) => call[0] instanceof vi.mocked(ShellTool));
|
||||
expect(wasShellToolRegistered).toBe(true);
|
||||
});
|
||||
|
||||
it('should register a tool if coreTools contains an argument-specific pattern with the displayName', async () => {
|
||||
const params: ConfigParameters = {
|
||||
...baseParams,
|
||||
coreTools: ['Shell(git status)'],
|
||||
};
|
||||
const config = new Config(params);
|
||||
await config.initialize();
|
||||
|
||||
@@ -81,6 +81,7 @@ import {
|
||||
import { shouldAttemptBrowserLaunch } from '../utils/browser.js';
|
||||
import { FileExclusions } from '../utils/ignorePatterns.js';
|
||||
import { WorkspaceContext } from '../utils/workspaceContext.js';
|
||||
import { isToolEnabled, type ToolName } from '../utils/tool-utils.js';
|
||||
|
||||
// Local config modules
|
||||
import type { FileFilteringOptions } from './constants.js';
|
||||
@@ -1143,37 +1144,35 @@ export class Config {
|
||||
async createToolRegistry(): Promise<ToolRegistry> {
|
||||
const registry = new ToolRegistry(this, this.eventEmitter);
|
||||
|
||||
// helper to create & register core tools that are enabled
|
||||
const coreToolsConfig = this.getCoreTools();
|
||||
const excludeToolsConfig = this.getExcludeTools();
|
||||
|
||||
// Helper to create & register core tools that are enabled
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const registerCoreTool = (ToolClass: any, ...args: unknown[]) => {
|
||||
const className = ToolClass.name;
|
||||
const toolName = ToolClass.Name || className;
|
||||
const coreTools = this.getCoreTools();
|
||||
const excludeTools = this.getExcludeTools() || [];
|
||||
// On some platforms, the className can be minified to _ClassName.
|
||||
const normalizedClassName = className.replace(/^_+/, '');
|
||||
const toolName = ToolClass?.Name as ToolName | undefined;
|
||||
const className = ToolClass?.name ?? 'UnknownTool';
|
||||
|
||||
let isEnabled = true; // Enabled by default if coreTools is not set.
|
||||
if (coreTools) {
|
||||
isEnabled = coreTools.some(
|
||||
(tool) =>
|
||||
tool === toolName ||
|
||||
tool === normalizedClassName ||
|
||||
tool.startsWith(`${toolName}(`) ||
|
||||
tool.startsWith(`${normalizedClassName}(`),
|
||||
if (!toolName) {
|
||||
// Log warning and skip this tool instead of crashing
|
||||
console.warn(
|
||||
`[Config] Skipping tool registration: ${className} is missing static Name property. ` +
|
||||
`Tools must define a static Name property to be registered. ` +
|
||||
`Location: config.ts:registerCoreTool`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const isExcluded = excludeTools.some(
|
||||
(tool) => tool === toolName || tool === normalizedClassName,
|
||||
);
|
||||
|
||||
if (isExcluded) {
|
||||
isEnabled = false;
|
||||
}
|
||||
|
||||
if (isEnabled) {
|
||||
registry.registerTool(new ToolClass(...args));
|
||||
if (isToolEnabled(toolName, coreToolsConfig, excludeToolsConfig)) {
|
||||
try {
|
||||
registry.registerTool(new ToolClass(...args));
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[Config] Failed to register tool ${className} (${toolName}):`,
|
||||
error,
|
||||
);
|
||||
throw error; // Re-throw after logging context
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -23,6 +23,14 @@ import type OpenAI from 'openai';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import { StreamingToolCallParser } from './streamingToolCallParser.js';
|
||||
|
||||
/**
|
||||
* Extended usage type that supports both OpenAI standard format and alternative formats
|
||||
* Some models return cached_tokens at the top level instead of in prompt_tokens_details
|
||||
*/
|
||||
interface ExtendedCompletionUsage extends OpenAI.CompletionUsage {
|
||||
cached_tokens?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool call accumulator for streaming responses
|
||||
*/
|
||||
@@ -582,7 +590,13 @@ export class OpenAIContentConverter {
|
||||
const promptTokens = usage.prompt_tokens || 0;
|
||||
const completionTokens = usage.completion_tokens || 0;
|
||||
const totalTokens = usage.total_tokens || 0;
|
||||
const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
|
||||
// Support both formats: prompt_tokens_details.cached_tokens (OpenAI standard)
|
||||
// and cached_tokens (some models return it at top level)
|
||||
const extendedUsage = usage as ExtendedCompletionUsage;
|
||||
const cachedTokens =
|
||||
usage.prompt_tokens_details?.cached_tokens ??
|
||||
extendedUsage.cached_tokens ??
|
||||
0;
|
||||
|
||||
// If we only have total tokens but no breakdown, estimate the split
|
||||
// Typically input is ~70% and output is ~30% for most conversations
|
||||
@@ -707,7 +721,13 @@ export class OpenAIContentConverter {
|
||||
const promptTokens = usage.prompt_tokens || 0;
|
||||
const completionTokens = usage.completion_tokens || 0;
|
||||
const totalTokens = usage.total_tokens || 0;
|
||||
const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
|
||||
// Support both formats: prompt_tokens_details.cached_tokens (OpenAI standard)
|
||||
// and cached_tokens (some models return it at top level)
|
||||
const extendedUsage = usage as ExtendedCompletionUsage;
|
||||
const cachedTokens =
|
||||
usage.prompt_tokens_details?.cached_tokens ??
|
||||
extendedUsage.cached_tokens ??
|
||||
0;
|
||||
|
||||
// If we only have total tokens but no breakdown, estimate the split
|
||||
// Typically input is ~70% and output is ~30% for most conversations
|
||||
|
||||
@@ -165,9 +165,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
// -------------------
|
||||
// DeepSeek
|
||||
// -------------------
|
||||
[/^deepseek$/, LIMITS['128k']],
|
||||
[/^deepseek-r1(?:-.*)?$/, LIMITS['128k']],
|
||||
[/^deepseek-v3(?:\.\d+)?(?:-.*)?$/, LIMITS['128k']],
|
||||
[/^deepseek(?:-.*)?$/, LIMITS['128k']],
|
||||
|
||||
// -------------------
|
||||
// Moonshot / Kimi
|
||||
@@ -211,6 +209,12 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
|
||||
// Qwen3-VL-Plus: 32K max output tokens
|
||||
[/^qwen3-vl-plus$/, LIMITS['32k']],
|
||||
|
||||
// Deepseek-chat: 8k max tokens
|
||||
[/^deepseek-chat$/, LIMITS['8k']],
|
||||
|
||||
// Deepseek-reasoner: 64k max tokens
|
||||
[/^deepseek-reasoner$/, LIMITS['64k']],
|
||||
];
|
||||
|
||||
/**
|
||||
|
||||
@@ -29,6 +29,7 @@ import { SubagentValidator } from './validation.js';
|
||||
import { SubAgentScope } from './subagent.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { BuiltinAgentRegistry } from './builtin-agents.js';
|
||||
import { ToolDisplayNamesMigration } from '../tools/tool-names.js';
|
||||
|
||||
const QWEN_CONFIG_DIR = '.qwen';
|
||||
const AGENT_CONFIG_DIR = 'agents';
|
||||
@@ -632,7 +633,12 @@ export class SubagentManager {
|
||||
|
||||
// If no exact name match, try to find by display name
|
||||
const displayNameMatch = allTools.find(
|
||||
(tool) => tool.displayName === toolIdentifier,
|
||||
(tool) =>
|
||||
tool.displayName === toolIdentifier ||
|
||||
tool.displayName ===
|
||||
(ToolDisplayNamesMigration[
|
||||
toolIdentifier as keyof typeof ToolDisplayNamesMigration
|
||||
] as string | undefined),
|
||||
);
|
||||
if (displayNameMatch) {
|
||||
result.push(displayNameMatch.name);
|
||||
|
||||
@@ -48,7 +48,6 @@ import type {
|
||||
} from './event-types.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { safeJsonStringify } from '../../utils/safeJsonStringify.js';
|
||||
import { type HttpError, retryWithBackoff } from '../../utils/retry.js';
|
||||
import { InstallationManager } from '../../utils/installationManager.js';
|
||||
import { FixedDeque } from 'mnemonist';
|
||||
import { AuthType } from '../../core/contentGenerator.js';
|
||||
@@ -288,8 +287,8 @@ export class QwenLogger {
|
||||
const rumPayload = await this.createRumPayload();
|
||||
// Override events with the ones we're sending
|
||||
rumPayload.events = eventsToSend;
|
||||
const flushFn = () =>
|
||||
new Promise<Buffer>((resolve, reject) => {
|
||||
try {
|
||||
await new Promise<Buffer>((resolve, reject) => {
|
||||
const body = safeJsonStringify(rumPayload);
|
||||
const options = {
|
||||
hostname: USAGE_STATS_HOSTNAME,
|
||||
@@ -311,10 +310,9 @@ export class QwenLogger {
|
||||
res.statusCode &&
|
||||
(res.statusCode < 200 || res.statusCode >= 300)
|
||||
) {
|
||||
const err: HttpError = new Error(
|
||||
const err = new Error(
|
||||
`Request failed with status ${res.statusCode}`,
|
||||
);
|
||||
err.status = res.statusCode;
|
||||
res.resume();
|
||||
return reject(err);
|
||||
}
|
||||
@@ -326,26 +324,11 @@ export class QwenLogger {
|
||||
req.end(body);
|
||||
});
|
||||
|
||||
try {
|
||||
await retryWithBackoff(flushFn, {
|
||||
maxAttempts: 3,
|
||||
initialDelayMs: 200,
|
||||
shouldRetryOnError: (err: unknown) => {
|
||||
if (!(err instanceof Error)) return false;
|
||||
const status = (err as HttpError).status as number | undefined;
|
||||
// If status is not available, it's likely a network error
|
||||
if (status === undefined) return true;
|
||||
|
||||
// Retry on 429 (Too many Requests) and 5xx server errors.
|
||||
return status === 429 || (status >= 500 && status < 600);
|
||||
},
|
||||
});
|
||||
|
||||
this.lastFlushTime = Date.now();
|
||||
return {};
|
||||
} catch (error) {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.error('RUM flush failed after multiple retries.', error);
|
||||
console.error('RUM flush failed.', error);
|
||||
}
|
||||
|
||||
// Re-queue failed events for retry
|
||||
|
||||
@@ -425,7 +425,9 @@ describe('EditTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toMatch(/Successfully modified file/);
|
||||
expect(result.llmContent).toMatch(
|
||||
/Showing lines \d+-\d+ of \d+ from the edited file:/,
|
||||
);
|
||||
expect(fs.readFileSync(filePath, 'utf8')).toBe(newContent);
|
||||
const display = result.returnDisplay as FileDiff;
|
||||
expect(display.fileDiff).toMatch(initialContent);
|
||||
@@ -450,6 +452,9 @@ describe('EditTool', () => {
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toMatch(/Created new file/);
|
||||
expect(result.llmContent).toMatch(
|
||||
/Showing lines \d+-\d+ of \d+ from the edited file:/,
|
||||
);
|
||||
expect(fs.existsSync(newFilePath)).toBe(true);
|
||||
expect(fs.readFileSync(newFilePath, 'utf8')).toBe(fileContent);
|
||||
|
||||
@@ -485,7 +490,7 @@ describe('EditTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return error if multiple occurrences of old_string are found', async () => {
|
||||
it('should return error if multiple occurrences of old_string are found and replace_all is false', async () => {
|
||||
fs.writeFileSync(filePath, 'multiple old old strings', 'utf8');
|
||||
const params: EditToolParams = {
|
||||
file_path: filePath,
|
||||
@@ -494,27 +499,27 @@ describe('EditTool', () => {
|
||||
};
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toMatch(
|
||||
/Expected 1 occurrence but found 2 for old_string in file/,
|
||||
);
|
||||
expect(result.llmContent).toMatch(/replace_all was not enabled/);
|
||||
expect(result.returnDisplay).toMatch(
|
||||
/Failed to edit, expected 1 occurrence but found 2/,
|
||||
/Failed to edit because the text matches multiple locations/,
|
||||
);
|
||||
});
|
||||
|
||||
it('should successfully replace multiple occurrences when expected_replacements specified', async () => {
|
||||
it('should successfully replace multiple occurrences when replace_all is true', async () => {
|
||||
fs.writeFileSync(filePath, 'old text\nold text\nold text', 'utf8');
|
||||
const params: EditToolParams = {
|
||||
file_path: filePath,
|
||||
old_string: 'old',
|
||||
new_string: 'new',
|
||||
expected_replacements: 3,
|
||||
replace_all: true,
|
||||
};
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toMatch(/Successfully modified file/);
|
||||
expect(result.llmContent).toMatch(
|
||||
/Showing lines \d+-\d+ of \d+ from the edited file/,
|
||||
);
|
||||
expect(fs.readFileSync(filePath, 'utf8')).toBe(
|
||||
'new text\nnew text\nnew text',
|
||||
);
|
||||
@@ -535,24 +540,6 @@ describe('EditTool', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should return error if expected_replacements does not match actual occurrences', async () => {
|
||||
fs.writeFileSync(filePath, 'old text old text', 'utf8');
|
||||
const params: EditToolParams = {
|
||||
file_path: filePath,
|
||||
old_string: 'old',
|
||||
new_string: 'new',
|
||||
expected_replacements: 3, // Expecting 3 but only 2 exist
|
||||
};
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toMatch(
|
||||
/Expected 3 occurrences but found 2 for old_string in file/,
|
||||
);
|
||||
expect(result.returnDisplay).toMatch(
|
||||
/Failed to edit, expected 3 occurrences but found 2/,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return error if trying to create a file that already exists (empty old_string)', async () => {
|
||||
fs.writeFileSync(filePath, 'Existing content', 'utf8');
|
||||
const params: EditToolParams = {
|
||||
@@ -568,38 +555,6 @@ describe('EditTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should include modification message when proposed content is modified', async () => {
|
||||
const initialContent = 'Line 1\nold line\nLine 3\nLine 4\nLine 5\n';
|
||||
fs.writeFileSync(filePath, initialContent, 'utf8');
|
||||
const params: EditToolParams = {
|
||||
file_path: filePath,
|
||||
old_string: 'old',
|
||||
new_string: 'new',
|
||||
modified_by_user: true,
|
||||
ai_proposed_content: 'Line 1\nAI line\nLine 3\nLine 4\nLine 5\n',
|
||||
};
|
||||
|
||||
(mockConfig.getApprovalMode as Mock).mockReturnValueOnce(
|
||||
ApprovalMode.AUTO_EDIT,
|
||||
);
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toMatch(
|
||||
/User modified the `new_string` content/,
|
||||
);
|
||||
expect((result.returnDisplay as FileDiff).diffStat).toStrictEqual({
|
||||
model_added_lines: 1,
|
||||
model_removed_lines: 1,
|
||||
model_added_chars: 7,
|
||||
model_removed_chars: 8,
|
||||
user_added_lines: 1,
|
||||
user_removed_lines: 1,
|
||||
user_added_chars: 8,
|
||||
user_removed_chars: 7,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not include modification message when proposed content is not modified', async () => {
|
||||
const initialContent = 'This is some old text.';
|
||||
fs.writeFileSync(filePath, initialContent, 'utf8');
|
||||
@@ -723,13 +678,12 @@ describe('EditTool', () => {
|
||||
expect(result.error?.type).toBe(ToolErrorType.EDIT_NO_OCCURRENCE_FOUND);
|
||||
});
|
||||
|
||||
it('should return EXPECTED_OCCURRENCE_MISMATCH error', async () => {
|
||||
it('should return EXPECTED_OCCURRENCE_MISMATCH error when replace_all is false and text is not unique', async () => {
|
||||
fs.writeFileSync(filePath, 'one one two', 'utf8');
|
||||
const params: EditToolParams = {
|
||||
file_path: filePath,
|
||||
old_string: 'one',
|
||||
new_string: 'new',
|
||||
expected_replacements: 3,
|
||||
};
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
@@ -22,7 +22,7 @@ import type { Config } from '../config/config.js';
|
||||
import { ApprovalMode } from '../config/config.js';
|
||||
import { DEFAULT_DIFF_OPTIONS, getDiffStat } from './diffOptions.js';
|
||||
import { ReadFileTool } from './read-file.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { logFileOperation } from '../telemetry/loggers.js';
|
||||
import { FileOperationEvent } from '../telemetry/types.js';
|
||||
import { FileOperation } from '../telemetry/metrics.js';
|
||||
@@ -34,6 +34,12 @@ import type {
|
||||
} from './modifiable-tool.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import { safeLiteralReplace } from '../utils/textUtils.js';
|
||||
import {
|
||||
countOccurrences,
|
||||
extractEditSnippet,
|
||||
maybeAugmentOldStringForDeletion,
|
||||
normalizeEditStrings,
|
||||
} from '../utils/editHelper.js';
|
||||
|
||||
export function applyReplacement(
|
||||
currentContent: string | null,
|
||||
@@ -77,10 +83,9 @@ export interface EditToolParams {
|
||||
new_string: string;
|
||||
|
||||
/**
|
||||
* Number of replacements expected. Defaults to 1 if not specified.
|
||||
* Use when you want to replace multiple occurrences.
|
||||
* Replace every occurrence of old_string instead of requiring a unique match.
|
||||
*/
|
||||
expected_replacements?: number;
|
||||
replace_all?: boolean;
|
||||
|
||||
/**
|
||||
* Whether the edit was modified manually by the user.
|
||||
@@ -118,12 +123,12 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
* @throws File system errors if reading the file fails unexpectedly (e.g., permissions)
|
||||
*/
|
||||
private async calculateEdit(params: EditToolParams): Promise<CalculatedEdit> {
|
||||
const expectedReplacements = params.expected_replacements ?? 1;
|
||||
const replaceAll = params.replace_all ?? false;
|
||||
let currentContent: string | null = null;
|
||||
let fileExists = false;
|
||||
let isNewFile = false;
|
||||
const finalNewString = params.new_string;
|
||||
const finalOldString = params.old_string;
|
||||
let finalNewString = params.new_string;
|
||||
let finalOldString = params.old_string;
|
||||
let occurrences = 0;
|
||||
let error:
|
||||
| { display: string; raw: string; type: ToolErrorType }
|
||||
@@ -144,7 +149,15 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
fileExists = false;
|
||||
}
|
||||
|
||||
if (params.old_string === '' && !fileExists) {
|
||||
const normalizedStrings = normalizeEditStrings(
|
||||
currentContent,
|
||||
finalOldString,
|
||||
finalNewString,
|
||||
);
|
||||
finalOldString = normalizedStrings.oldString;
|
||||
finalNewString = normalizedStrings.newString;
|
||||
|
||||
if (finalOldString === '' && !fileExists) {
|
||||
// Creating a new file
|
||||
isNewFile = true;
|
||||
} else if (!fileExists) {
|
||||
@@ -155,7 +168,13 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
type: ToolErrorType.FILE_NOT_FOUND,
|
||||
};
|
||||
} else if (currentContent !== null) {
|
||||
occurrences = this.countOccurrences(currentContent, params.old_string);
|
||||
finalOldString = maybeAugmentOldStringForDeletion(
|
||||
currentContent,
|
||||
finalOldString,
|
||||
finalNewString,
|
||||
);
|
||||
|
||||
occurrences = countOccurrences(currentContent, finalOldString);
|
||||
if (params.old_string === '') {
|
||||
// Error: Trying to create a file that already exists
|
||||
error = {
|
||||
@@ -169,13 +188,10 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
raw: `Failed to edit, 0 occurrences found for old_string in ${params.file_path}. No edits made. The exact text in old_string was not found. Ensure you're not escaping content incorrectly and check whitespace, indentation, and context. Use ${ReadFileTool.Name} tool to verify.`,
|
||||
type: ToolErrorType.EDIT_NO_OCCURRENCE_FOUND,
|
||||
};
|
||||
} else if (occurrences !== expectedReplacements) {
|
||||
const occurrenceTerm =
|
||||
expectedReplacements === 1 ? 'occurrence' : 'occurrences';
|
||||
|
||||
} else if (!replaceAll && occurrences > 1) {
|
||||
error = {
|
||||
display: `Failed to edit, expected ${expectedReplacements} ${occurrenceTerm} but found ${occurrences}.`,
|
||||
raw: `Failed to edit, Expected ${expectedReplacements} ${occurrenceTerm} but found ${occurrences} for old_string in file: ${params.file_path}`,
|
||||
display: `Failed to edit because the text matches multiple locations. Provide more context or set replace_all to true.`,
|
||||
raw: `Failed to edit. Found ${occurrences} occurrences for old_string in ${params.file_path} but replace_all was not enabled.`,
|
||||
type: ToolErrorType.EDIT_EXPECTED_OCCURRENCE_MISMATCH,
|
||||
};
|
||||
} else if (finalOldString === finalNewString) {
|
||||
@@ -221,22 +237,6 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Counts occurrences of a substring in a string
|
||||
*/
|
||||
private countOccurrences(str: string, substr: string): number {
|
||||
if (substr === '') {
|
||||
return 0;
|
||||
}
|
||||
let count = 0;
|
||||
let pos = str.indexOf(substr);
|
||||
while (pos !== -1) {
|
||||
count++;
|
||||
pos = str.indexOf(substr, pos + substr.length); // Start search after the current match
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles the confirmation prompt for the Edit tool in the CLI.
|
||||
* It needs to calculate the diff to show the user.
|
||||
@@ -422,12 +422,16 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
const llmSuccessMessageParts = [
|
||||
editData.isNewFile
|
||||
? `Created new file: ${this.params.file_path} with provided content.`
|
||||
: `Successfully modified file: ${this.params.file_path} (${editData.occurrences} replacements).`,
|
||||
: `The file: ${this.params.file_path} has been updated.`,
|
||||
];
|
||||
if (this.params.modified_by_user) {
|
||||
llmSuccessMessageParts.push(
|
||||
`User modified the \`new_string\` content to be: ${this.params.new_string}.`,
|
||||
);
|
||||
|
||||
const snippetResult = extractEditSnippet(
|
||||
editData.currentContent,
|
||||
editData.newContent,
|
||||
);
|
||||
if (snippetResult) {
|
||||
const snippetText = `Showing lines ${snippetResult.startLine}-${snippetResult.endLine} of ${snippetResult.totalLines} from the edited file:\n\n---\n\n${snippetResult.content}`;
|
||||
llmSuccessMessageParts.push(snippetText);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -469,8 +473,8 @@ export class EditTool
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
EditTool.Name,
|
||||
'Edit',
|
||||
`Replaces text within a file. By default, replaces a single occurrence, but can replace multiple occurrences when \`expected_replacements\` is specified. This tool requires providing significant context around the change to ensure precise targeting. Always use the ${ReadFileTool.Name} tool to examine the file's current content before attempting a text replacement.
|
||||
ToolDisplayNames.EDIT,
|
||||
`Replaces text within a file. By default, replaces a single occurrence. Set \`replace_all\` to true when you intend to modify every instance of \`old_string\`. This tool requires providing significant context around the change to ensure precise targeting. Always use the ${ReadFileTool.Name} tool to examine the file's current content before attempting a text replacement.
|
||||
|
||||
The user has the ability to modify the \`new_string\` content. If modified, this will be stated in the response.
|
||||
|
||||
@@ -480,7 +484,7 @@ Expectation for required parameters:
|
||||
3. \`new_string\` MUST be the exact literal text to replace \`old_string\` with (also including all whitespace, indentation, newlines, and surrounding code etc.). Ensure the resulting code is correct and idiomatic.
|
||||
4. NEVER escape \`old_string\` or \`new_string\`, that would break the exact literal text requirement.
|
||||
**Important:** If ANY of the above are not satisfied, the tool will fail. CRITICAL for \`old_string\`: Must uniquely identify the single instance to change. Include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations, or does not match exactly, the tool will fail.
|
||||
**Multiple replacements:** Set \`expected_replacements\` to the number of occurrences you want to replace. The tool will replace ALL occurrences that match \`old_string\` exactly. Ensure the number of replacements matches your expectation.`,
|
||||
**Multiple replacements:** Set \`replace_all\` to true when you want to replace every occurrence that matches \`old_string\`.`,
|
||||
Kind.Edit,
|
||||
{
|
||||
properties: {
|
||||
@@ -491,7 +495,7 @@ Expectation for required parameters:
|
||||
},
|
||||
old_string: {
|
||||
description:
|
||||
'The exact literal text to replace, preferably unescaped. For single replacements (default), include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. For multiple replacements, specify expected_replacements parameter. If this string is not the exact literal text (i.e. you escaped it) or does not match exactly, the tool will fail.',
|
||||
'The exact literal text to replace, preferably unescaped. For single replacements (default), include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string is not the exact literal text (i.e. you escaped it) or does not match exactly, the tool will fail.',
|
||||
type: 'string',
|
||||
},
|
||||
new_string: {
|
||||
@@ -499,11 +503,10 @@ Expectation for required parameters:
|
||||
'The exact literal text to replace `old_string` with, preferably unescaped. Provide the EXACT text. Ensure the resulting code is correct and idiomatic.',
|
||||
type: 'string',
|
||||
},
|
||||
expected_replacements: {
|
||||
type: 'number',
|
||||
replace_all: {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Number of replacements expected. Defaults to 1 if not specified. Use when you want to replace multiple occurrences.',
|
||||
minimum: 1,
|
||||
'Replace all occurrences of old_string (default false).',
|
||||
},
|
||||
},
|
||||
required: ['file_path', 'old_string', 'new_string'],
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
import type { FunctionDeclaration } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { ApprovalMode } from '../config/config.js';
|
||||
import { ToolDisplayNames, ToolNames } from './tool-names.js';
|
||||
|
||||
export interface ExitPlanModeParams {
|
||||
plan: string;
|
||||
@@ -152,12 +153,12 @@ export class ExitPlanModeTool extends BaseDeclarativeTool<
|
||||
ExitPlanModeParams,
|
||||
ToolResult
|
||||
> {
|
||||
static readonly Name: string = exitPlanModeToolSchemaData.name!;
|
||||
static readonly Name: string = ToolNames.EXIT_PLAN_MODE;
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
ExitPlanModeTool.Name,
|
||||
'ExitPlanMode',
|
||||
ToolDisplayNames.EXIT_PLAN_MODE,
|
||||
exitPlanModeToolDescription,
|
||||
Kind.Think,
|
||||
exitPlanModeToolSchemaData.parametersJsonSchema as Record<
|
||||
|
||||
@@ -9,7 +9,7 @@ import path from 'node:path';
|
||||
import { glob, escape } from 'glob';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { resolveAndValidatePath } from '../utils/paths.js';
|
||||
import { type Config } from '../config/config.js';
|
||||
import {
|
||||
@@ -229,7 +229,7 @@ export class GlobTool extends BaseDeclarativeTool<GlobToolParams, ToolResult> {
|
||||
constructor(private config: Config) {
|
||||
super(
|
||||
GlobTool.Name,
|
||||
'FindFiles',
|
||||
ToolDisplayNames.GLOB,
|
||||
'Fast file pattern matching tool that works with any codebase size\n- Supports glob patterns like "**/*.js" or "src/**/*.ts"\n- Returns matching file paths sorted by modification time\n- Use this tool when you need to find files by name patterns\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Agent tool instead\n- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.',
|
||||
Kind.Search,
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@ import { spawn } from 'node:child_process';
|
||||
import { globStream } from 'glob';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { resolveAndValidatePath } from '../utils/paths.js';
|
||||
import { getErrorMessage, isNodeError } from '../utils/errors.js';
|
||||
import { isGitRepository } from '../utils/gitUtils.js';
|
||||
@@ -522,7 +522,7 @@ export class GrepTool extends BaseDeclarativeTool<GrepToolParams, ToolResult> {
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
GrepTool.Name,
|
||||
'Grep',
|
||||
ToolDisplayNames.GREP,
|
||||
'A powerful search tool for finding patterns in files\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx")\n - Case-insensitive by default\n - Use Task tool for open-ended searches requiring multiple rounds\n',
|
||||
Kind.Search,
|
||||
{
|
||||
|
||||
@@ -12,6 +12,7 @@ import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
import { ToolErrorType } from './tool-error.js';
|
||||
import { ToolDisplayNames, ToolNames } from './tool-names.js';
|
||||
|
||||
/**
|
||||
* Parameters for the LS tool
|
||||
@@ -252,12 +253,12 @@ class LSToolInvocation extends BaseToolInvocation<LSToolParams, ToolResult> {
|
||||
* Implementation of the LS tool logic
|
||||
*/
|
||||
export class LSTool extends BaseDeclarativeTool<LSToolParams, ToolResult> {
|
||||
static readonly Name = 'list_directory';
|
||||
static readonly Name = ToolNames.LS;
|
||||
|
||||
constructor(private config: Config) {
|
||||
super(
|
||||
LSTool.Name,
|
||||
'ReadFolder',
|
||||
ToolDisplayNames.LS,
|
||||
'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.',
|
||||
Kind.Search,
|
||||
{
|
||||
|
||||
@@ -18,6 +18,7 @@ import { Storage } from '../config/storage.js';
|
||||
import * as Diff from 'diff';
|
||||
import { DEFAULT_DIFF_OPTIONS } from './diffOptions.js';
|
||||
import { tildeifyPath } from '../utils/paths.js';
|
||||
import { ToolDisplayNames, ToolNames } from './tool-names.js';
|
||||
import type {
|
||||
ModifiableDeclarativeTool,
|
||||
ModifyContext,
|
||||
@@ -380,11 +381,11 @@ export class MemoryTool
|
||||
extends BaseDeclarativeTool<SaveMemoryParams, ToolResult>
|
||||
implements ModifiableDeclarativeTool<SaveMemoryParams>
|
||||
{
|
||||
static readonly Name: string = memoryToolSchemaData.name!;
|
||||
static readonly Name: string = ToolNames.MEMORY;
|
||||
constructor() {
|
||||
super(
|
||||
MemoryTool.Name,
|
||||
'SaveMemory',
|
||||
ToolDisplayNames.MEMORY,
|
||||
memoryToolDescription,
|
||||
Kind.Think,
|
||||
memoryToolSchemaData.parametersJsonSchema as Record<string, unknown>,
|
||||
|
||||
@@ -8,7 +8,7 @@ import path from 'node:path';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import type { ToolInvocation, ToolLocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
|
||||
import type { PartUnion } from '@google/genai';
|
||||
import {
|
||||
@@ -131,7 +131,7 @@ export class ReadFileTool extends BaseDeclarativeTool<
|
||||
constructor(private config: Config) {
|
||||
super(
|
||||
ReadFileTool.Name,
|
||||
'ReadFile',
|
||||
ToolDisplayNames.READ_FILE,
|
||||
`Reads and returns the content of a specified file. If the file is large, the content will be truncated. The tool's response will clearly indicate if truncation has occurred and will provide details on how to read more of the file using the 'offset' and 'limit' parameters. Handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges.`,
|
||||
Kind.Read,
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
@@ -554,7 +554,7 @@ export class ReadManyFilesTool extends BaseDeclarativeTool<
|
||||
|
||||
super(
|
||||
ReadManyFilesTool.Name,
|
||||
'ReadManyFiles',
|
||||
ToolDisplayNames.READ_MANY_FILES,
|
||||
`Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded).
|
||||
|
||||
This tool is useful when you need to understand or analyze a collection of files, such as:
|
||||
|
||||
@@ -9,7 +9,7 @@ import path from 'node:path';
|
||||
import os, { EOL } from 'node:os';
|
||||
import crypto from 'node:crypto';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { ToolErrorType } from './tool-error.js';
|
||||
import type {
|
||||
ToolInvocation,
|
||||
@@ -429,7 +429,7 @@ export class ShellTool extends BaseDeclarativeTool<
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
ShellTool.Name,
|
||||
'Shell',
|
||||
ToolDisplayNames.SHELL,
|
||||
getShellToolDescription(),
|
||||
Kind.Execute,
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import type {
|
||||
ToolResult,
|
||||
ToolResultDisplay,
|
||||
@@ -77,7 +77,7 @@ export class TaskTool extends BaseDeclarativeTool<TaskParams, ToolResult> {
|
||||
|
||||
super(
|
||||
TaskTool.Name,
|
||||
'Task',
|
||||
ToolDisplayNames.TASK,
|
||||
'Delegate tasks to specialized subagents. Loading available subagents...', // Initial description
|
||||
Kind.Other,
|
||||
initialSchema,
|
||||
|
||||
@@ -14,6 +14,7 @@ import * as process from 'process';
|
||||
|
||||
import { QWEN_DIR } from '../utils/paths.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { ToolDisplayNames, ToolNames } from './tool-names.js';
|
||||
|
||||
export interface TodoItem {
|
||||
id: string;
|
||||
@@ -422,12 +423,12 @@ export class TodoWriteTool extends BaseDeclarativeTool<
|
||||
TodoWriteParams,
|
||||
ToolResult
|
||||
> {
|
||||
static readonly Name: string = todoWriteToolSchemaData.name!;
|
||||
static readonly Name: string = ToolNames.TODO_WRITE;
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
TodoWriteTool.Name,
|
||||
'TodoWrite',
|
||||
ToolDisplayNames.TODO_WRITE,
|
||||
todoWriteToolDescription,
|
||||
Kind.Think,
|
||||
todoWriteToolSchemaData.parametersJsonSchema as Record<string, unknown>,
|
||||
|
||||
@@ -23,4 +23,43 @@ export const ToolNames = {
|
||||
EXIT_PLAN_MODE: 'exit_plan_mode',
|
||||
WEB_FETCH: 'web_fetch',
|
||||
WEB_SEARCH: 'web_search',
|
||||
LS: 'list_directory',
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Tool display name constants to avoid circular dependencies.
|
||||
* These constants are used across multiple files and should be kept in sync
|
||||
* with the actual tool display names.
|
||||
*/
|
||||
export const ToolDisplayNames = {
|
||||
EDIT: 'Edit',
|
||||
WRITE_FILE: 'WriteFile',
|
||||
READ_FILE: 'ReadFile',
|
||||
READ_MANY_FILES: 'ReadManyFiles',
|
||||
GREP: 'Grep',
|
||||
GLOB: 'Glob',
|
||||
SHELL: 'Shell',
|
||||
TODO_WRITE: 'TodoWrite',
|
||||
MEMORY: 'SaveMemory',
|
||||
TASK: 'Task',
|
||||
EXIT_PLAN_MODE: 'ExitPlanMode',
|
||||
WEB_FETCH: 'WebFetch',
|
||||
WEB_SEARCH: 'WebSearch',
|
||||
LS: 'ListFiles',
|
||||
} as const;
|
||||
|
||||
// Migration from old tool names to new tool names
|
||||
// These legacy tool names were used in earlier versions and need to be supported
|
||||
// for backward compatibility with existing user configurations
|
||||
export const ToolNamesMigration = {
|
||||
search_file_content: ToolNames.GREP, // Legacy name from grep tool
|
||||
replace: ToolNames.EDIT, // Legacy name from edit tool
|
||||
} as const;
|
||||
|
||||
// Migration from old tool display names to new tool display names
|
||||
// These legacy display names were used before the tool naming standardization
|
||||
export const ToolDisplayNamesMigration = {
|
||||
SearchFiles: ToolDisplayNames.GREP, // Old display name for Grep
|
||||
FindFiles: ToolDisplayNames.GLOB, // Old display name for Glob
|
||||
ReadFolder: ToolDisplayNames.LS, // Old display name for ListFiles
|
||||
} as const;
|
||||
|
||||
@@ -23,7 +23,7 @@ import {
|
||||
ToolConfirmationOutcome,
|
||||
} from './tools.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
|
||||
const URL_FETCH_TIMEOUT_MS = 10000;
|
||||
const MAX_CONTENT_LENGTH = 100000;
|
||||
@@ -196,7 +196,7 @@ export class WebFetchTool extends BaseDeclarativeTool<
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WebFetchTool.Name,
|
||||
'WebFetch',
|
||||
ToolDisplayNames.WEB_FETCH,
|
||||
'Fetches content from a specified URL and processes it using an AI model\n- Takes a URL and a prompt as input\n- Fetches the URL content, converts HTML to markdown\n- Processes the content with the prompt using a small, fast model\n- Returns the model\'s response about the content\n- Use this tool when you need to retrieve and analyze web content\n\nUsage notes:\n - IMPORTANT: If an MCP-provided web fetch tool is available, prefer using that tool instead of this one, as it may have fewer restrictions. All MCP-provided tools start with "mcp__".\n - The URL must be a fully-formed valid URL\n - The prompt should describe what information you want to extract from the page\n - This tool is read-only and does not modify any files\n - Results may be summarized if the content is very large\n - Supports both public and private/localhost URLs using direct fetch',
|
||||
Kind.Fetch,
|
||||
{
|
||||
|
||||
@@ -30,7 +30,7 @@ import type {
|
||||
WebSearchProviderConfig,
|
||||
DashScopeProviderConfig,
|
||||
} from './types.js';
|
||||
import { ToolNames } from '../tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from '../tool-names.js';
|
||||
|
||||
class WebSearchToolInvocation extends BaseToolInvocation<
|
||||
WebSearchToolParams,
|
||||
@@ -280,7 +280,7 @@ export class WebSearchTool extends BaseDeclarativeTool<
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WebSearchTool.Name,
|
||||
'WebSearch',
|
||||
ToolDisplayNames.WEB_SEARCH,
|
||||
'Allows searching the web and using results to inform responses. Provides up-to-date information for current events and recent data beyond the training data cutoff. Returns search results formatted with concise answers and source links. Use this tool when accessing information that may be outdated or beyond the knowledge cutoff.',
|
||||
Kind.Search,
|
||||
{
|
||||
|
||||
@@ -27,7 +27,7 @@ import { ToolErrorType } from './tool-error.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { getErrorMessage, isNodeError } from '../utils/errors.js';
|
||||
import { DEFAULT_DIFF_OPTIONS, getDiffStat } from './diffOptions.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import type {
|
||||
ModifiableDeclarativeTool,
|
||||
ModifyContext,
|
||||
@@ -361,7 +361,7 @@ export class WriteFileTool
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WriteFileTool.Name,
|
||||
'WriteFile',
|
||||
ToolDisplayNames.WRITE_FILE,
|
||||
`Writes content to a specified file in the local filesystem.
|
||||
|
||||
The user has the ability to modify \`content\`. If modified, this will be stated in the response.`,
|
||||
|
||||
153
packages/core/src/utils/editHelper.test.ts
Normal file
153
packages/core/src/utils/editHelper.test.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
countOccurrences,
|
||||
maybeAugmentOldStringForDeletion,
|
||||
normalizeEditStrings,
|
||||
} from './editHelper.js';
|
||||
|
||||
describe('normalizeEditStrings', () => {
|
||||
const file = `const one = 1;
|
||||
const two = 2;
|
||||
`;
|
||||
|
||||
it('returns literal matches unchanged and trims new_string trailing whitespace', () => {
|
||||
const result = normalizeEditStrings(
|
||||
file,
|
||||
'const two = 2;',
|
||||
' const two = 42; ',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: 'const two = 2;',
|
||||
newString: ' const two = 42;',
|
||||
});
|
||||
});
|
||||
|
||||
it('normalizes smart quotes to match on-disk text', () => {
|
||||
const result = normalizeEditStrings(
|
||||
"const greeting = 'Don't';\n",
|
||||
'const greeting = ‘Don’t’;',
|
||||
'const greeting = “Hello”; ',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: "const greeting = 'Don't';",
|
||||
newString: 'const greeting = “Hello”;',
|
||||
});
|
||||
});
|
||||
|
||||
it('falls back to original strings when no match is found', () => {
|
||||
const result = normalizeEditStrings(file, 'missing text', 'replacement');
|
||||
expect(result).toEqual({
|
||||
oldString: 'missing text',
|
||||
newString: 'replacement',
|
||||
});
|
||||
});
|
||||
|
||||
it('still trims new_string when editing a brand-new file', () => {
|
||||
const result = normalizeEditStrings(null, '', 'new file contents ');
|
||||
expect(result).toEqual({
|
||||
oldString: '',
|
||||
newString: 'new file contents',
|
||||
});
|
||||
});
|
||||
|
||||
it('matches unicode dash variants', () => {
|
||||
const result = normalizeEditStrings(
|
||||
'const range = "1-2";\n',
|
||||
'const range = "1\u20132";',
|
||||
'const range = "3\u20135"; ',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: 'const range = "1-2";',
|
||||
newString: 'const range = "3\u20135";',
|
||||
});
|
||||
});
|
||||
|
||||
it('matches when trailing whitespace differs only at line ends', () => {
|
||||
const result = normalizeEditStrings(
|
||||
'value = 1;\n',
|
||||
'value = 1; \n',
|
||||
'value = 2; \n',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: 'value = 1;\n',
|
||||
newString: 'value = 2;\n',
|
||||
});
|
||||
});
|
||||
|
||||
it('treats non-breaking spaces as regular spaces', () => {
|
||||
const result = normalizeEditStrings(
|
||||
'const label = "hello world";\n',
|
||||
'const label = "hello\u00a0world";',
|
||||
'const label = "hi\u00a0world";',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: 'const label = "hello world";',
|
||||
newString: 'const label = "hi\u00a0world";',
|
||||
});
|
||||
});
|
||||
|
||||
it('drops trailing newline from new content when the file lacks it', () => {
|
||||
const result = normalizeEditStrings(
|
||||
'console.log("hi")',
|
||||
'console.log("hi")\n',
|
||||
'console.log("bye")\n',
|
||||
);
|
||||
expect(result).toEqual({
|
||||
oldString: 'console.log("hi")',
|
||||
newString: 'console.log("bye")',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('countOccurrences', () => {
|
||||
it('returns zero when substring empty or missing', () => {
|
||||
expect(countOccurrences('abc', '')).toBe(0);
|
||||
expect(countOccurrences('abc', 'z')).toBe(0);
|
||||
});
|
||||
|
||||
it('counts non-overlapping occurrences', () => {
|
||||
expect(countOccurrences('aaaa', 'aa')).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('maybeAugmentOldStringForDeletion', () => {
|
||||
const file = 'console.log("hi")\nconsole.log("bye")\n';
|
||||
|
||||
it('appends newline when deleting text followed by newline', () => {
|
||||
expect(
|
||||
maybeAugmentOldStringForDeletion(file, 'console.log("hi")', ''),
|
||||
).toBe('console.log("hi")\n');
|
||||
});
|
||||
|
||||
it('leaves strings untouched when not deleting', () => {
|
||||
expect(
|
||||
maybeAugmentOldStringForDeletion(
|
||||
file,
|
||||
'console.log("hi")',
|
||||
'replacement',
|
||||
),
|
||||
).toBe('console.log("hi")');
|
||||
});
|
||||
|
||||
it('does not append newline when file lacks the variant', () => {
|
||||
expect(
|
||||
maybeAugmentOldStringForDeletion(
|
||||
'console.log("hi")',
|
||||
'console.log("hi")',
|
||||
'',
|
||||
),
|
||||
).toBe('console.log("hi")');
|
||||
});
|
||||
|
||||
it('no-ops when the old string already ends with a newline', () => {
|
||||
expect(
|
||||
maybeAugmentOldStringForDeletion(file, 'console.log("bye")\n', ''),
|
||||
).toBe('console.log("bye")\n');
|
||||
});
|
||||
});
|
||||
499
packages/core/src/utils/editHelper.ts
Normal file
499
packages/core/src/utils/editHelper.ts
Normal file
@@ -0,0 +1,499 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Helpers for reconciling LLM-proposed edits with on-disk text.
|
||||
*
|
||||
* The normalization pipeline intentionally stays deterministic: we first try
|
||||
* literal substring matches, then gradually relax comparison rules (smart
|
||||
* quotes, em-dashes, trailing whitespace, etc.) until we either locate the
|
||||
* exact slice from the file or conclude the edit cannot be applied.
|
||||
*/
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Character-level normalization */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const UNICODE_EQUIVALENT_MAP: Record<string, string> = {
|
||||
// Hyphen variations → ASCII hyphen-minus.
|
||||
'\u2010': '-',
|
||||
'\u2011': '-',
|
||||
'\u2012': '-',
|
||||
'\u2013': '-',
|
||||
'\u2014': '-',
|
||||
'\u2015': '-',
|
||||
'\u2212': '-',
|
||||
// Curly single quotes → straight apostrophe.
|
||||
'\u2018': "'",
|
||||
'\u2019': "'",
|
||||
'\u201A': "'",
|
||||
'\u201B': "'",
|
||||
// Curly double quotes → straight double quote.
|
||||
'\u201C': '"',
|
||||
'\u201D': '"',
|
||||
'\u201E': '"',
|
||||
'\u201F': '"',
|
||||
// Whitespace variants → normal space.
|
||||
'\u00A0': ' ',
|
||||
'\u2002': ' ',
|
||||
'\u2003': ' ',
|
||||
'\u2004': ' ',
|
||||
'\u2005': ' ',
|
||||
'\u2006': ' ',
|
||||
'\u2007': ' ',
|
||||
'\u2008': ' ',
|
||||
'\u2009': ' ',
|
||||
'\u200A': ' ',
|
||||
'\u202F': ' ',
|
||||
'\u205F': ' ',
|
||||
'\u3000': ' ',
|
||||
};
|
||||
|
||||
function normalizeBasicCharacters(text: string): string {
|
||||
if (text === '') {
|
||||
return text;
|
||||
}
|
||||
|
||||
let normalized = '';
|
||||
for (const char of text) {
|
||||
normalized += UNICODE_EQUIVALENT_MAP[char] ?? char;
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes trailing whitespace from each line while keeping the original newline
|
||||
* separators intact.
|
||||
*/
|
||||
function stripTrailingWhitespacePreserveNewlines(text: string): string {
|
||||
const pieces = text.split(/(\r\n|\n|\r)/);
|
||||
let result = '';
|
||||
|
||||
for (let i = 0; i < pieces.length; i++) {
|
||||
const segment = pieces[i];
|
||||
if (segment === undefined) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i % 2 === 0) {
|
||||
result += segment.trimEnd();
|
||||
} else {
|
||||
result += segment;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Line-based search helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
interface MatchedSliceResult {
|
||||
slice: string;
|
||||
removedTrailingFinalEmptyLine: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Comparison passes become progressively more forgiving, making it possible to
|
||||
* match when only trailing whitespace differs. Leading whitespace (indentation)
|
||||
* is always preserved to avoid matching at incorrect scope levels.
|
||||
*/
|
||||
const LINE_COMPARISON_PASSES: Array<(value: string) => string> = [
|
||||
(value) => value,
|
||||
(value) => value.trimEnd(),
|
||||
];
|
||||
|
||||
function normalizeLineForComparison(value: string): string {
|
||||
return normalizeBasicCharacters(value).trimEnd();
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the first index where {@link pattern} appears within {@link lines} once
|
||||
* both sequences are transformed in the same way.
|
||||
*/
|
||||
function seekSequenceWithTransform(
|
||||
lines: string[],
|
||||
pattern: string[],
|
||||
transform: (value: string) => string,
|
||||
): number | null {
|
||||
if (pattern.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pattern.length > lines.length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
outer: for (let i = 0; i <= lines.length - pattern.length; i++) {
|
||||
for (let p = 0; p < pattern.length; p++) {
|
||||
if (transform(lines[i + p]) !== transform(pattern[p])) {
|
||||
continue outer;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function buildLineIndex(text: string): {
|
||||
lines: string[];
|
||||
offsets: number[];
|
||||
} {
|
||||
const lines = text.split('\n');
|
||||
const offsets = new Array<number>(lines.length + 1);
|
||||
let cursor = 0;
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
offsets[i] = cursor;
|
||||
cursor += lines[i].length;
|
||||
if (i < lines.length - 1) {
|
||||
cursor += 1; // Account for the newline that split() removed.
|
||||
}
|
||||
}
|
||||
offsets[lines.length] = text.length;
|
||||
|
||||
return { lines, offsets };
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconstructs the original characters for the matched lines, optionally
|
||||
* preserving the newline that follows the final line.
|
||||
*/
|
||||
function sliceFromLines(
|
||||
text: string,
|
||||
offsets: number[],
|
||||
lines: string[],
|
||||
startLine: number,
|
||||
lineCount: number,
|
||||
includeTrailingNewline: boolean,
|
||||
): string {
|
||||
if (lineCount === 0) {
|
||||
return includeTrailingNewline ? '\n' : '';
|
||||
}
|
||||
|
||||
const startIndex = offsets[startLine] ?? 0;
|
||||
const lastLineIndex = startLine + lineCount - 1;
|
||||
const lastLineStart = offsets[lastLineIndex] ?? 0;
|
||||
let endIndex = lastLineStart + (lines[lastLineIndex]?.length ?? 0);
|
||||
|
||||
if (includeTrailingNewline) {
|
||||
const nextLineStart = offsets[startLine + lineCount];
|
||||
if (nextLineStart !== undefined) {
|
||||
endIndex = nextLineStart;
|
||||
} else if (text.endsWith('\n')) {
|
||||
endIndex = text.length;
|
||||
}
|
||||
}
|
||||
|
||||
return text.slice(startIndex, endIndex);
|
||||
}
|
||||
|
||||
function findLineBasedMatch(
|
||||
haystack: string,
|
||||
needle: string,
|
||||
): MatchedSliceResult | null {
|
||||
const { lines, offsets } = buildLineIndex(haystack);
|
||||
const patternLines = needle.split('\n');
|
||||
const endsWithNewline = needle.endsWith('\n');
|
||||
|
||||
if (patternLines.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const attemptMatch = (candidate: string[]): number | null => {
|
||||
for (const pass of LINE_COMPARISON_PASSES) {
|
||||
const idx = seekSequenceWithTransform(lines, candidate, pass);
|
||||
if (idx !== null) {
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
return seekSequenceWithTransform(
|
||||
lines,
|
||||
candidate,
|
||||
normalizeLineForComparison,
|
||||
);
|
||||
};
|
||||
|
||||
let matchIndex = attemptMatch(patternLines);
|
||||
if (matchIndex !== null) {
|
||||
return {
|
||||
slice: sliceFromLines(
|
||||
haystack,
|
||||
offsets,
|
||||
lines,
|
||||
matchIndex,
|
||||
patternLines.length,
|
||||
endsWithNewline,
|
||||
),
|
||||
removedTrailingFinalEmptyLine: false,
|
||||
};
|
||||
}
|
||||
|
||||
if (patternLines.at(-1) === '') {
|
||||
const trimmedPattern = patternLines.slice(0, -1);
|
||||
if (trimmedPattern.length === 0) {
|
||||
return null;
|
||||
}
|
||||
matchIndex = attemptMatch(trimmedPattern);
|
||||
if (matchIndex !== null) {
|
||||
return {
|
||||
slice: sliceFromLines(
|
||||
haystack,
|
||||
offsets,
|
||||
lines,
|
||||
matchIndex,
|
||||
trimmedPattern.length,
|
||||
false,
|
||||
),
|
||||
removedTrailingFinalEmptyLine: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Slice discovery */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
function findMatchedSlice(
|
||||
haystack: string,
|
||||
needle: string,
|
||||
): MatchedSliceResult | null {
|
||||
if (needle === '') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const literalIndex = haystack.indexOf(needle);
|
||||
if (literalIndex !== -1) {
|
||||
return {
|
||||
slice: haystack.slice(literalIndex, literalIndex + needle.length),
|
||||
removedTrailingFinalEmptyLine: false,
|
||||
};
|
||||
}
|
||||
|
||||
const normalizedHaystack = normalizeBasicCharacters(haystack);
|
||||
const normalizedNeedleChars = normalizeBasicCharacters(needle);
|
||||
const normalizedIndex = normalizedHaystack.indexOf(normalizedNeedleChars);
|
||||
if (normalizedIndex !== -1) {
|
||||
return {
|
||||
slice: haystack.slice(normalizedIndex, normalizedIndex + needle.length),
|
||||
removedTrailingFinalEmptyLine: false,
|
||||
};
|
||||
}
|
||||
|
||||
return findLineBasedMatch(haystack, needle);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the literal slice from {@link haystack} that best corresponds to the
|
||||
* provided {@link needle}, or {@code null} when no match is found.
|
||||
*/
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Replacement helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
function removeTrailingNewline(text: string): string {
|
||||
if (text.endsWith('\r\n')) {
|
||||
return text.slice(0, -2);
|
||||
}
|
||||
if (text.endsWith('\n') || text.endsWith('\r')) {
|
||||
return text.slice(0, -1);
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
function adjustNewStringForTrailingLine(
|
||||
newString: string,
|
||||
removedTrailingLine: boolean,
|
||||
): string {
|
||||
return removedTrailingLine ? removeTrailingNewline(newString) : newString;
|
||||
}
|
||||
|
||||
export interface NormalizedEditStrings {
|
||||
oldString: string;
|
||||
newString: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs the core normalization pipeline:
|
||||
* 1. Strip trailing whitespace copied from numbered output.
|
||||
* 2. Attempt to find the literal text inside {@link fileContent}.
|
||||
* 3. If found through a relaxed match (smart quotes, line trims, etc.),
|
||||
* return the canonical slice from disk so later replacements operate on
|
||||
* exact bytes.
|
||||
*/
|
||||
export function normalizeEditStrings(
|
||||
fileContent: string | null,
|
||||
oldString: string,
|
||||
newString: string,
|
||||
): NormalizedEditStrings {
|
||||
const trimmedNewString = stripTrailingWhitespacePreserveNewlines(newString);
|
||||
|
||||
if (fileContent === null || oldString === '') {
|
||||
return {
|
||||
oldString,
|
||||
newString: trimmedNewString,
|
||||
};
|
||||
}
|
||||
|
||||
const canonicalOriginal = findMatchedSlice(fileContent, oldString);
|
||||
if (canonicalOriginal !== null) {
|
||||
return {
|
||||
oldString: canonicalOriginal.slice,
|
||||
newString: adjustNewStringForTrailingLine(
|
||||
trimmedNewString,
|
||||
canonicalOriginal.removedTrailingFinalEmptyLine,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
oldString,
|
||||
newString: trimmedNewString,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* When deleting text and the on-disk content contains the same substring with a
|
||||
* trailing newline, automatically consume that newline so the removal does not
|
||||
* leave a blank line behind.
|
||||
*/
|
||||
export function maybeAugmentOldStringForDeletion(
|
||||
fileContent: string | null,
|
||||
oldString: string,
|
||||
newString: string,
|
||||
): string {
|
||||
if (
|
||||
fileContent === null ||
|
||||
oldString === '' ||
|
||||
newString !== '' ||
|
||||
oldString.endsWith('\n')
|
||||
) {
|
||||
return oldString;
|
||||
}
|
||||
|
||||
const candidate = `${oldString}\n`;
|
||||
return fileContent.includes(candidate) ? candidate : oldString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Counts the number of non-overlapping occurrences of {@link substr} inside
|
||||
* {@link source}. Returns 0 when the substring is empty.
|
||||
*/
|
||||
export function countOccurrences(source: string, substr: string): number {
|
||||
if (substr === '') {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let count = 0;
|
||||
let index = source.indexOf(substr);
|
||||
while (index !== -1) {
|
||||
count++;
|
||||
index = source.indexOf(substr, index + substr.length);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from extracting a snippet showing the edited region.
|
||||
*/
|
||||
export interface EditSnippetResult {
|
||||
/** Starting line number (1-indexed) of the snippet */
|
||||
startLine: number;
|
||||
/** Ending line number (1-indexed) of the snippet */
|
||||
endLine: number;
|
||||
/** Total number of lines in the new content */
|
||||
totalLines: number;
|
||||
/** The snippet content (subset of lines from newContent) */
|
||||
content: string;
|
||||
}
|
||||
|
||||
const SNIPPET_CONTEXT_LINES = 4;
|
||||
const SNIPPET_MAX_LINES = 1000;
|
||||
|
||||
/**
|
||||
* Extracts a snippet from the edited file showing the changed region with
|
||||
* surrounding context. This compares the old and new content line-by-line
|
||||
* from both ends to locate the changed region.
|
||||
*
|
||||
* @param oldContent The original file content before the edit (null for new files)
|
||||
* @param newContent The new file content after the edit
|
||||
* @param contextLines Number of context lines to show before and after the change
|
||||
* @returns Snippet information, or null if no meaningful snippet can be extracted
|
||||
*/
|
||||
export function extractEditSnippet(
|
||||
oldContent: string | null,
|
||||
newContent: string,
|
||||
): EditSnippetResult | null {
|
||||
const newLines = newContent.split('\n');
|
||||
const totalLines = newLines.length;
|
||||
|
||||
if (oldContent === null) {
|
||||
return {
|
||||
startLine: 1,
|
||||
endLine: totalLines,
|
||||
totalLines,
|
||||
content: newContent,
|
||||
};
|
||||
}
|
||||
|
||||
// No changes case
|
||||
if (oldContent === newContent || !newContent) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const oldLines = oldContent.split('\n');
|
||||
|
||||
// Find the first line that differs from the start
|
||||
let firstDiffLine = 0;
|
||||
const minLength = Math.min(oldLines.length, newLines.length);
|
||||
|
||||
while (firstDiffLine < minLength) {
|
||||
if (oldLines[firstDiffLine] !== newLines[firstDiffLine]) {
|
||||
break;
|
||||
}
|
||||
firstDiffLine++;
|
||||
}
|
||||
|
||||
// Find the first line that differs from the end
|
||||
let oldEndIndex = oldLines.length - 1;
|
||||
let newEndIndex = newLines.length - 1;
|
||||
|
||||
while (oldEndIndex >= firstDiffLine && newEndIndex >= firstDiffLine) {
|
||||
if (oldLines[oldEndIndex] !== newLines[newEndIndex]) {
|
||||
break;
|
||||
}
|
||||
oldEndIndex--;
|
||||
newEndIndex--;
|
||||
}
|
||||
|
||||
// The changed region in the new content is from firstDiffLine to newEndIndex (inclusive)
|
||||
// Convert to 1-indexed line numbers
|
||||
const changeStart = firstDiffLine + 1;
|
||||
const changeEnd = newEndIndex + 1;
|
||||
|
||||
// If the change region is too large, don't generate a snippet
|
||||
if (changeEnd - changeStart > SNIPPET_MAX_LINES) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Calculate snippet bounds with context
|
||||
const snippetStart = Math.max(1, changeStart - SNIPPET_CONTEXT_LINES);
|
||||
const snippetEnd = Math.min(totalLines, changeEnd + SNIPPET_CONTEXT_LINES);
|
||||
|
||||
const snippetLines = newLines.slice(snippetStart - 1, snippetEnd);
|
||||
|
||||
return {
|
||||
startLine: snippetStart,
|
||||
endLine: snippetEnd,
|
||||
totalLines,
|
||||
content: snippetLines.join('\n'),
|
||||
};
|
||||
}
|
||||
@@ -72,6 +72,7 @@ describe('editor utils', () => {
|
||||
{ editor: 'neovim', commands: ['nvim'], win32Commands: ['nvim'] },
|
||||
{ editor: 'zed', commands: ['zed', 'zeditor'], win32Commands: ['zed'] },
|
||||
{ editor: 'emacs', commands: ['emacs'], win32Commands: ['emacs.exe'] },
|
||||
{ editor: 'trae', commands: ['trae'], win32Commands: ['trae'] },
|
||||
];
|
||||
|
||||
for (const { editor, commands, win32Commands } of testCases) {
|
||||
@@ -171,6 +172,7 @@ describe('editor utils', () => {
|
||||
},
|
||||
{ editor: 'cursor', commands: ['cursor'], win32Commands: ['cursor'] },
|
||||
{ editor: 'zed', commands: ['zed', 'zeditor'], win32Commands: ['zed'] },
|
||||
{ editor: 'trae', commands: ['trae'], win32Commands: ['trae'] },
|
||||
];
|
||||
|
||||
for (const { editor, commands, win32Commands } of guiEditors) {
|
||||
@@ -321,6 +323,7 @@ describe('editor utils', () => {
|
||||
'windsurf',
|
||||
'cursor',
|
||||
'zed',
|
||||
'trae',
|
||||
];
|
||||
|
||||
for (const editor of guiEditors) {
|
||||
@@ -430,6 +433,7 @@ describe('editor utils', () => {
|
||||
'windsurf',
|
||||
'cursor',
|
||||
'zed',
|
||||
'trae',
|
||||
];
|
||||
for (const editor of guiEditors) {
|
||||
it(`should not call onEditorClose for ${editor}`, async () => {
|
||||
@@ -481,6 +485,7 @@ describe('editor utils', () => {
|
||||
'windsurf',
|
||||
'cursor',
|
||||
'zed',
|
||||
'trae',
|
||||
];
|
||||
for (const editor of guiEditors) {
|
||||
it(`should not allow ${editor} in sandbox mode`, () => {
|
||||
|
||||
@@ -14,7 +14,8 @@ export type EditorType =
|
||||
| 'vim'
|
||||
| 'neovim'
|
||||
| 'zed'
|
||||
| 'emacs';
|
||||
| 'emacs'
|
||||
| 'trae';
|
||||
|
||||
function isValidEditorType(editor: string): editor is EditorType {
|
||||
return [
|
||||
@@ -26,6 +27,7 @@ function isValidEditorType(editor: string): editor is EditorType {
|
||||
'neovim',
|
||||
'zed',
|
||||
'emacs',
|
||||
'trae',
|
||||
].includes(editor);
|
||||
}
|
||||
|
||||
@@ -62,6 +64,7 @@ const editorCommands: Record<
|
||||
neovim: { win32: ['nvim'], default: ['nvim'] },
|
||||
zed: { win32: ['zed'], default: ['zed', 'zeditor'] },
|
||||
emacs: { win32: ['emacs.exe'], default: ['emacs'] },
|
||||
trae: { win32: ['trae'], default: ['trae'] },
|
||||
};
|
||||
|
||||
export function checkHasEditorType(editor: EditorType): boolean {
|
||||
@@ -73,7 +76,9 @@ export function checkHasEditorType(editor: EditorType): boolean {
|
||||
|
||||
export function allowEditorTypeInSandbox(editor: EditorType): boolean {
|
||||
const notUsingSandbox = !process.env['SANDBOX'];
|
||||
if (['vscode', 'vscodium', 'windsurf', 'cursor', 'zed'].includes(editor)) {
|
||||
if (
|
||||
['vscode', 'vscodium', 'windsurf', 'cursor', 'zed', 'trae'].includes(editor)
|
||||
) {
|
||||
return notUsingSandbox;
|
||||
}
|
||||
// For terminal-based editors like vim and emacs, allow in sandbox.
|
||||
@@ -115,6 +120,7 @@ export function getDiffCommand(
|
||||
case 'windsurf':
|
||||
case 'cursor':
|
||||
case 'zed':
|
||||
case 'trae':
|
||||
return { command, args: ['--wait', '--diff', oldPath, newPath] };
|
||||
case 'vim':
|
||||
case 'neovim':
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
*/
|
||||
|
||||
import { expect, describe, it } from 'vitest';
|
||||
import { doesToolInvocationMatch } from './tool-utils.js';
|
||||
import { doesToolInvocationMatch, isToolEnabled } from './tool-utils.js';
|
||||
import type { AnyToolInvocation, Config } from '../index.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { ToolNames } from '../tools/tool-names.js';
|
||||
|
||||
describe('doesToolInvocationMatch', () => {
|
||||
it('should not match a partial command prefix', () => {
|
||||
@@ -92,3 +93,67 @@ describe('doesToolInvocationMatch', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('isToolEnabled', () => {
|
||||
it('enables tool when coreTools is undefined and tool is not excluded', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, undefined, undefined)).toBe(true);
|
||||
});
|
||||
|
||||
it('disables tool when excluded by canonical tool name', () => {
|
||||
expect(
|
||||
isToolEnabled(ToolNames.SHELL, undefined, ['run_shell_command']),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it('enables tool when explicitly listed by display name', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, ['Shell'], undefined)).toBe(true);
|
||||
});
|
||||
|
||||
it('enables tool when explicitly listed by class name', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, ['ShellTool'], undefined)).toBe(true);
|
||||
});
|
||||
|
||||
it('supports class names with leading underscores', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, ['__ShellTool'], undefined)).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it('enables tool when coreTools contains a legacy tool name alias', () => {
|
||||
expect(
|
||||
isToolEnabled(ToolNames.GREP, ['search_file_content'], undefined),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('enables tool when coreTools contains a legacy display name alias', () => {
|
||||
expect(isToolEnabled(ToolNames.GLOB, ['FindFiles'], undefined)).toBe(true);
|
||||
});
|
||||
|
||||
it('enables tool when coreTools contains an argument-specific pattern', () => {
|
||||
expect(
|
||||
isToolEnabled(ToolNames.SHELL, ['Shell(git status)'], undefined),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('disables tool when not present in coreTools', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, ['Edit'], undefined)).toBe(false);
|
||||
});
|
||||
|
||||
it('uses legacy display name aliases when excluding tools', () => {
|
||||
expect(isToolEnabled(ToolNames.GREP, undefined, ['SearchFiles'])).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it('does not treat argument-specific exclusions as matches', () => {
|
||||
expect(
|
||||
isToolEnabled(ToolNames.SHELL, undefined, ['Shell(git status)']),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('considers excludeTools even when tool is explicitly enabled', () => {
|
||||
expect(isToolEnabled(ToolNames.SHELL, ['Shell'], ['ShellTool'])).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,6 +6,111 @@
|
||||
|
||||
import type { AnyDeclarativeTool, AnyToolInvocation } from '../index.js';
|
||||
import { isTool } from '../index.js';
|
||||
import {
|
||||
ToolNames,
|
||||
ToolDisplayNames,
|
||||
ToolNamesMigration,
|
||||
ToolDisplayNamesMigration,
|
||||
} from '../tools/tool-names.js';
|
||||
|
||||
export type ToolName = (typeof ToolNames)[keyof typeof ToolNames];
|
||||
|
||||
const normalizeIdentifier = (identifier: string): string =>
|
||||
identifier.trim().replace(/^_+/, '');
|
||||
|
||||
const toolNameKeys = Object.keys(ToolNames) as Array<keyof typeof ToolNames>;
|
||||
|
||||
const TOOL_ALIAS_MAP: Map<ToolName, Set<string>> = (() => {
|
||||
const map = new Map<ToolName, Set<string>>();
|
||||
|
||||
const addAlias = (set: Set<string>, alias?: string) => {
|
||||
if (!alias) {
|
||||
return;
|
||||
}
|
||||
set.add(normalizeIdentifier(alias));
|
||||
};
|
||||
|
||||
for (const key of toolNameKeys) {
|
||||
const canonicalName = ToolNames[key];
|
||||
const displayName = ToolDisplayNames[key];
|
||||
const aliases = new Set<string>();
|
||||
|
||||
addAlias(aliases, canonicalName);
|
||||
addAlias(aliases, displayName);
|
||||
addAlias(aliases, `${displayName}Tool`);
|
||||
|
||||
for (const [legacyName, mappedName] of Object.entries(ToolNamesMigration)) {
|
||||
if (mappedName === canonicalName) {
|
||||
addAlias(aliases, legacyName);
|
||||
}
|
||||
}
|
||||
|
||||
for (const [legacyDisplay, mappedDisplay] of Object.entries(
|
||||
ToolDisplayNamesMigration,
|
||||
)) {
|
||||
if (mappedDisplay === displayName) {
|
||||
addAlias(aliases, legacyDisplay);
|
||||
}
|
||||
}
|
||||
|
||||
map.set(canonicalName, aliases);
|
||||
}
|
||||
|
||||
return map;
|
||||
})();
|
||||
|
||||
const getAliasSetForTool = (toolName: ToolName): Set<string> => {
|
||||
const aliases = TOOL_ALIAS_MAP.get(toolName);
|
||||
if (!aliases) {
|
||||
return new Set([normalizeIdentifier(toolName)]);
|
||||
}
|
||||
return aliases;
|
||||
};
|
||||
|
||||
const sanitizeExactIdentifier = (value: string): string =>
|
||||
normalizeIdentifier(value);
|
||||
|
||||
const sanitizePatternIdentifier = (value: string): string => {
|
||||
const openParenIndex = value.indexOf('(');
|
||||
if (openParenIndex === -1) {
|
||||
return normalizeIdentifier(value);
|
||||
}
|
||||
return normalizeIdentifier(value.slice(0, openParenIndex));
|
||||
};
|
||||
|
||||
const filterList = (list?: string[]): string[] =>
|
||||
(list ?? []).filter((entry): entry is string =>
|
||||
Boolean(entry && entry.trim()),
|
||||
);
|
||||
|
||||
export function isToolEnabled(
|
||||
toolName: ToolName,
|
||||
coreTools?: string[],
|
||||
excludeTools?: string[],
|
||||
): boolean {
|
||||
const aliasSet = getAliasSetForTool(toolName);
|
||||
const matchesIdentifier = (value: string): boolean =>
|
||||
aliasSet.has(sanitizeExactIdentifier(value));
|
||||
const matchesIdentifierWithArgs = (value: string): boolean =>
|
||||
aliasSet.has(sanitizePatternIdentifier(value));
|
||||
|
||||
const filteredCore = filterList(coreTools);
|
||||
const filteredExclude = filterList(excludeTools);
|
||||
|
||||
if (filteredCore.length === 0) {
|
||||
return !filteredExclude.some((entry) => matchesIdentifier(entry));
|
||||
}
|
||||
|
||||
const isExplicitlyEnabled = filteredCore.some(
|
||||
(entry) => matchesIdentifier(entry) || matchesIdentifierWithArgs(entry),
|
||||
);
|
||||
|
||||
if (!isExplicitlyEnabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !filteredExclude.some((entry) => matchesIdentifier(entry));
|
||||
}
|
||||
|
||||
const SHELL_TOOL_NAMES = ['run_shell_command', 'ShellTool'];
|
||||
|
||||
|
||||
Reference in New Issue
Block a user