From 82170e96c6be32dcced9545ee1552a648f4bd296 Mon Sep 17 00:00:00 2001 From: pomelo-nwu Date: Thu, 6 Nov 2025 10:42:52 +0800 Subject: [PATCH 1/8] refactor(cli): centralize system information collection --- .../cli/src/ui/commands/aboutCommand.test.ts | 229 +++++++++--- packages/cli/src/ui/commands/aboutCommand.ts | 38 +- .../cli/src/ui/commands/bugCommand.test.ts | 157 ++++----- packages/cli/src/ui/commands/bugCommand.ts | 62 +--- packages/cli/src/ui/components/AboutBox.tsx | 157 ++------- .../ui/components/HistoryItemDisplay.test.tsx | 25 +- .../src/ui/components/HistoryItemDisplay.tsx | 10 +- .../cli/src/ui/hooks/slashCommandProcessor.ts | 8 +- packages/cli/src/ui/types.ts | 46 ++- packages/cli/src/utils/systemInfo.test.ts | 331 ++++++++++++++++++ packages/cli/src/utils/systemInfo.ts | 173 +++++++++ packages/cli/src/utils/systemInfoFields.ts | 117 +++++++ 12 files changed, 975 insertions(+), 378 deletions(-) create mode 100644 packages/cli/src/utils/systemInfo.test.ts create mode 100644 packages/cli/src/utils/systemInfo.ts create mode 100644 packages/cli/src/utils/systemInfoFields.ts diff --git a/packages/cli/src/ui/commands/aboutCommand.test.ts b/packages/cli/src/ui/commands/aboutCommand.test.ts index 414c06ad..8a1daaeb 100644 --- a/packages/cli/src/ui/commands/aboutCommand.test.ts +++ b/packages/cli/src/ui/commands/aboutCommand.test.ts @@ -8,38 +8,22 @@ import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; import { aboutCommand } from './aboutCommand.js'; import { type CommandContext } from './types.js'; import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; -import * as versionUtils from '../../utils/version.js'; import { MessageType } from '../types.js'; -import { IdeClient } from '@qwen-code/qwen-code-core'; +import * as systemInfoUtils from '../../utils/systemInfo.js'; -vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { - const actual = - await importOriginal(); - return { - ...actual, - IdeClient: { - getInstance: vi.fn().mockResolvedValue({ - getDetectedIdeDisplayName: vi.fn().mockReturnValue('test-ide'), - }), - }, - }; -}); - -vi.mock('../../utils/version.js', () => ({ - getCliVersion: vi.fn(), -})); +vi.mock('../../utils/systemInfo.js'); describe('aboutCommand', () => { let mockContext: CommandContext; - const originalPlatform = process.platform; const originalEnv = { ...process.env }; beforeEach(() => { mockContext = createMockCommandContext({ services: { config: { - getModel: vi.fn(), + getModel: vi.fn().mockReturnValue('test-model'), getIdeMode: vi.fn().mockReturnValue(true), + getSessionId: vi.fn().mockReturnValue('test-session-id'), }, settings: { merged: { @@ -56,21 +40,25 @@ describe('aboutCommand', () => { }, } as unknown as CommandContext); - vi.mocked(versionUtils.getCliVersion).mockResolvedValue('test-version'); - vi.spyOn(mockContext.services.config!, 'getModel').mockReturnValue( - 'test-model', - ); - process.env['GOOGLE_CLOUD_PROJECT'] = 'test-gcp-project'; - Object.defineProperty(process, 'platform', { - value: 'test-os', + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, }); }); afterEach(() => { vi.unstubAllEnvs(); - Object.defineProperty(process, 'platform', { - value: originalPlatform, - }); process.env = originalEnv; vi.clearAllMocks(); }); @@ -81,30 +69,55 @@ describe('aboutCommand', () => { }); it('should call addItem with all version info', async () => { - process.env['SANDBOX'] = ''; if (!aboutCommand.action) { throw new Error('The about command must have an action.'); } await aboutCommand.action(mockContext, ''); + expect(systemInfoUtils.getExtendedSystemInfo).toHaveBeenCalledWith( + mockContext, + ); expect(mockContext.ui.addItem).toHaveBeenCalledWith( - { + expect.objectContaining({ type: MessageType.ABOUT, - cliVersion: 'test-version', - osVersion: 'test-os', - sandboxEnv: 'no sandbox', - modelVersion: 'test-model', - selectedAuthType: 'test-auth', - gcpProject: 'test-gcp-project', - ideClient: 'test-ide', - }, + systemInfo: expect.objectContaining({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }), + }), expect.any(Number), ); }); it('should show the correct sandbox environment variable', async () => { - process.env['SANDBOX'] = 'gemini-sandbox'; + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'gemini-sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }); + if (!aboutCommand.action) { throw new Error('The about command must have an action.'); } @@ -113,15 +126,32 @@ describe('aboutCommand', () => { expect(mockContext.ui.addItem).toHaveBeenCalledWith( expect.objectContaining({ - sandboxEnv: 'gemini-sandbox', + type: MessageType.ABOUT, + systemInfo: expect.objectContaining({ + sandboxEnv: 'gemini-sandbox', + }), }), expect.any(Number), ); }); it('should show sandbox-exec profile when applicable', async () => { - process.env['SANDBOX'] = 'sandbox-exec'; - process.env['SEATBELT_PROFILE'] = 'test-profile'; + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'sandbox-exec (test-profile)', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }); + if (!aboutCommand.action) { throw new Error('The about command must have an action.'); } @@ -130,18 +160,31 @@ describe('aboutCommand', () => { expect(mockContext.ui.addItem).toHaveBeenCalledWith( expect.objectContaining({ - sandboxEnv: 'sandbox-exec (test-profile)', + systemInfo: expect.objectContaining({ + sandboxEnv: 'sandbox-exec (test-profile)', + }), }), expect.any(Number), ); }); it('should not show ide client when it is not detected', async () => { - vi.mocked(IdeClient.getInstance).mockResolvedValue({ - getDetectedIdeDisplayName: vi.fn().mockReturnValue(undefined), - } as unknown as IdeClient); + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: '', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }); - process.env['SANDBOX'] = ''; if (!aboutCommand.action) { throw new Error('The about command must have an action.'); } @@ -151,13 +194,87 @@ describe('aboutCommand', () => { expect(mockContext.ui.addItem).toHaveBeenCalledWith( expect.objectContaining({ type: MessageType.ABOUT, - cliVersion: 'test-version', - osVersion: 'test-os', - sandboxEnv: 'no sandbox', - modelVersion: 'test-model', - selectedAuthType: 'test-auth', - gcpProject: 'test-gcp-project', - ideClient: '', + systemInfo: expect.objectContaining({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: '', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }), + }), + expect.any(Number), + ); + }); + + it('should show unknown npmVersion when npm command fails', async () => { + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: 'unknown', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + }); + + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + systemInfo: expect.objectContaining({ + npmVersion: 'unknown', + }), + }), + expect.any(Number), + ); + }); + + it('should show unknown sessionId when config is not available', async () => { + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'Unknown', + selectedAuthType: 'test-auth', + ideClient: '', + sessionId: 'unknown', + memoryUsage: '100 MB', + baseUrl: undefined, + }); + + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + systemInfo: expect.objectContaining({ + sessionId: 'unknown', + }), }), expect.any(Number), ); diff --git a/packages/cli/src/ui/commands/aboutCommand.ts b/packages/cli/src/ui/commands/aboutCommand.ts index 36bfbdff..0f35db92 100644 --- a/packages/cli/src/ui/commands/aboutCommand.ts +++ b/packages/cli/src/ui/commands/aboutCommand.ts @@ -4,53 +4,23 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { getCliVersion } from '../../utils/version.js'; -import type { CommandContext, SlashCommand } from './types.js'; +import type { SlashCommand } from './types.js'; import { CommandKind } from './types.js'; -import process from 'node:process'; import { MessageType, type HistoryItemAbout } from '../types.js'; -import { IdeClient } from '@qwen-code/qwen-code-core'; +import { getExtendedSystemInfo } from '../../utils/systemInfo.js'; export const aboutCommand: SlashCommand = { name: 'about', description: 'show version info', kind: CommandKind.BUILT_IN, action: async (context) => { - const osVersion = process.platform; - let sandboxEnv = 'no sandbox'; - if (process.env['SANDBOX'] && process.env['SANDBOX'] !== 'sandbox-exec') { - sandboxEnv = process.env['SANDBOX']; - } else if (process.env['SANDBOX'] === 'sandbox-exec') { - sandboxEnv = `sandbox-exec (${ - process.env['SEATBELT_PROFILE'] || 'unknown' - })`; - } - const modelVersion = context.services.config?.getModel() || 'Unknown'; - const cliVersion = await getCliVersion(); - const selectedAuthType = - context.services.settings.merged.security?.auth?.selectedType || ''; - const gcpProject = process.env['GOOGLE_CLOUD_PROJECT'] || ''; - const ideClient = await getIdeClientName(context); + const systemInfo = await getExtendedSystemInfo(context); const aboutItem: Omit = { type: MessageType.ABOUT, - cliVersion, - osVersion, - sandboxEnv, - modelVersion, - selectedAuthType, - gcpProject, - ideClient, + systemInfo, }; context.ui.addItem(aboutItem, Date.now()); }, }; - -async function getIdeClientName(context: CommandContext) { - if (!context.services.config?.getIdeMode()) { - return ''; - } - const ideClient = await IdeClient.getInstance(); - return ideClient?.getDetectedIdeDisplayName() ?? ''; -} diff --git a/packages/cli/src/ui/commands/bugCommand.test.ts b/packages/cli/src/ui/commands/bugCommand.test.ts index 9d668055..09c28aad 100644 --- a/packages/cli/src/ui/commands/bugCommand.test.ts +++ b/packages/cli/src/ui/commands/bugCommand.test.ts @@ -8,41 +8,34 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import open from 'open'; import { bugCommand } from './bugCommand.js'; import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; -import { getCliVersion } from '../../utils/version.js'; import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; -import { formatMemoryUsage } from '../utils/formatters.js'; import { AuthType } from '@qwen-code/qwen-code-core'; +import * as systemInfoUtils from '../../utils/systemInfo.js'; // Mock dependencies vi.mock('open'); -vi.mock('../../utils/version.js'); -vi.mock('../utils/formatters.js'); -vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { - const actual = - await importOriginal(); - return { - ...actual, - IdeClient: { - getInstance: () => ({ - getDetectedIdeDisplayName: vi.fn().mockReturnValue('VSCode'), - }), - }, - }; -}); -vi.mock('node:process', () => ({ - default: { - platform: 'test-platform', - version: 'v20.0.0', - // Keep other necessary process properties if needed by other parts of the code - env: process.env, - memoryUsage: () => ({ rss: 0 }), - }, -})); +vi.mock('../../utils/systemInfo.js'); describe('bugCommand', () => { beforeEach(() => { - vi.mocked(getCliVersion).mockResolvedValue('0.1.0'); - vi.mocked(formatMemoryUsage).mockReturnValue('100 MB'); + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: '0.1.0', + osPlatform: 'test-platform', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'test', + modelVersion: 'qwen3-coder-plus', + selectedAuthType: '', + ideClient: 'VSCode', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + gitCommit: + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? GIT_COMMIT_INFO + : undefined, + }); vi.stubEnv('SANDBOX', 'qwen-test'); }); @@ -55,19 +48,7 @@ describe('bugCommand', () => { const mockContext = createMockCommandContext({ services: { config: { - getModel: () => 'qwen3-coder-plus', getBugCommand: () => undefined, - getIdeMode: () => true, - getSessionId: () => 'test-session-id', - }, - settings: { - merged: { - security: { - auth: { - selectedType: undefined, - }, - }, - }, }, }, }); @@ -75,14 +56,21 @@ describe('bugCommand', () => { if (!bugCommand.action) throw new Error('Action is not defined'); await bugCommand.action(mockContext, 'A test bug'); + const gitCommitLine = + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? `* **Git Commit:** ${GIT_COMMIT_INFO}\n` + : ''; const expectedInfo = ` * **CLI Version:** 0.1.0 -* **Git Commit:** ${GIT_COMMIT_INFO} +${gitCommitLine}* **Model:** qwen3-coder-plus +* **Sandbox:** test +* **OS Platform:** test-platform +* **OS Arch:** x64 +* **OS Release:** 22.0.0 +* **Node.js Version:** v20.0.0 +* **NPM Version:** 10.0.0 * **Session ID:** test-session-id -* **Operating System:** test-platform v20.0.0 -* **Sandbox Environment:** test -* **Auth Type:** -* **Model Version:** qwen3-coder-plus +* **Auth Method:** * **Memory Usage:** 100 MB * **IDE Client:** VSCode `; @@ -99,19 +87,7 @@ describe('bugCommand', () => { const mockContext = createMockCommandContext({ services: { config: { - getModel: () => 'qwen3-coder-plus', getBugCommand: () => ({ urlTemplate: customTemplate }), - getIdeMode: () => true, - getSessionId: () => 'test-session-id', - }, - settings: { - merged: { - security: { - auth: { - selectedType: undefined, - }, - }, - }, }, }, }); @@ -119,14 +95,21 @@ describe('bugCommand', () => { if (!bugCommand.action) throw new Error('Action is not defined'); await bugCommand.action(mockContext, 'A custom bug'); + const gitCommitLine = + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? `* **Git Commit:** ${GIT_COMMIT_INFO}\n` + : ''; const expectedInfo = ` * **CLI Version:** 0.1.0 -* **Git Commit:** ${GIT_COMMIT_INFO} +${gitCommitLine}* **Model:** qwen3-coder-plus +* **Sandbox:** test +* **OS Platform:** test-platform +* **OS Arch:** x64 +* **OS Release:** 22.0.0 +* **Node.js Version:** v20.0.0 +* **NPM Version:** 10.0.0 * **Session ID:** test-session-id -* **Operating System:** test-platform v20.0.0 -* **Sandbox Environment:** test -* **Auth Type:** -* **Model Version:** qwen3-coder-plus +* **Auth Method:** * **Memory Usage:** 100 MB * **IDE Client:** VSCode `; @@ -138,25 +121,30 @@ describe('bugCommand', () => { }); it('should include Base URL when auth type is OpenAI', async () => { + vi.mocked(systemInfoUtils.getExtendedSystemInfo).mockResolvedValue({ + cliVersion: '0.1.0', + osPlatform: 'test-platform', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'test', + modelVersion: 'qwen3-coder-plus', + selectedAuthType: AuthType.USE_OPENAI, + ideClient: 'VSCode', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: 'https://api.openai.com/v1', + gitCommit: + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? GIT_COMMIT_INFO + : undefined, + }); + const mockContext = createMockCommandContext({ services: { config: { - getModel: () => 'qwen3-coder-plus', getBugCommand: () => undefined, - getIdeMode: () => true, - getSessionId: () => 'test-session-id', - getContentGeneratorConfig: () => ({ - baseUrl: 'https://api.openai.com/v1', - }), - }, - settings: { - merged: { - security: { - auth: { - selectedType: AuthType.USE_OPENAI, - }, - }, - }, }, }, }); @@ -164,15 +152,22 @@ describe('bugCommand', () => { if (!bugCommand.action) throw new Error('Action is not defined'); await bugCommand.action(mockContext, 'OpenAI bug'); + const gitCommitLine = + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? `* **Git Commit:** ${GIT_COMMIT_INFO}\n` + : ''; const expectedInfo = ` * **CLI Version:** 0.1.0 -* **Git Commit:** ${GIT_COMMIT_INFO} +${gitCommitLine}* **Model:** qwen3-coder-plus +* **Sandbox:** test +* **OS Platform:** test-platform +* **OS Arch:** x64 +* **OS Release:** 22.0.0 +* **Node.js Version:** v20.0.0 +* **NPM Version:** 10.0.0 * **Session ID:** test-session-id -* **Operating System:** test-platform v20.0.0 -* **Sandbox Environment:** test -* **Auth Type:** ${AuthType.USE_OPENAI} +* **Auth Method:** ${AuthType.USE_OPENAI} * **Base URL:** https://api.openai.com/v1 -* **Model Version:** qwen3-coder-plus * **Memory Usage:** 100 MB * **IDE Client:** VSCode `; diff --git a/packages/cli/src/ui/commands/bugCommand.ts b/packages/cli/src/ui/commands/bugCommand.ts index 2eb9b823..869024b5 100644 --- a/packages/cli/src/ui/commands/bugCommand.ts +++ b/packages/cli/src/ui/commands/bugCommand.ts @@ -5,17 +5,17 @@ */ import open from 'open'; -import process from 'node:process'; import { type CommandContext, type SlashCommand, CommandKind, } from './types.js'; import { MessageType } from '../types.js'; -import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; -import { formatMemoryUsage } from '../utils/formatters.js'; -import { getCliVersion } from '../../utils/version.js'; -import { IdeClient, AuthType } from '@qwen-code/qwen-code-core'; +import { getExtendedSystemInfo } from '../../utils/systemInfo.js'; +import { + getSystemInfoFields, + getFieldValue, +} from '../../utils/systemInfoFields.js'; export const bugCommand: SlashCommand = { name: 'bug', @@ -23,50 +23,20 @@ export const bugCommand: SlashCommand = { kind: CommandKind.BUILT_IN, action: async (context: CommandContext, args?: string): Promise => { const bugDescription = (args || '').trim(); - const { config } = context.services; + const systemInfo = await getExtendedSystemInfo(context); - const osVersion = `${process.platform} ${process.version}`; - let sandboxEnv = 'no sandbox'; - if (process.env['SANDBOX'] && process.env['SANDBOX'] !== 'sandbox-exec') { - sandboxEnv = process.env['SANDBOX'].replace(/^qwen-(?:code-)?/, ''); - } else if (process.env['SANDBOX'] === 'sandbox-exec') { - sandboxEnv = `sandbox-exec (${ - process.env['SEATBELT_PROFILE'] || 'unknown' - })`; - } - const modelVersion = config?.getModel() || 'Unknown'; - const cliVersion = await getCliVersion(); - const memoryUsage = formatMemoryUsage(process.memoryUsage().rss); - const ideClient = await getIdeClientName(context); - const selectedAuthType = - context.services.settings.merged.security?.auth?.selectedType || ''; - const baseUrl = - selectedAuthType === AuthType.USE_OPENAI - ? config?.getContentGeneratorConfig()?.baseUrl - : undefined; + const fields = getSystemInfoFields(systemInfo); - let info = ` -* **CLI Version:** ${cliVersion} -* **Git Commit:** ${GIT_COMMIT_INFO} -* **Session ID:** ${config?.getSessionId() || 'unknown'} -* **Operating System:** ${osVersion} -* **Sandbox Environment:** ${sandboxEnv} -* **Auth Type:** ${selectedAuthType}`; - if (baseUrl) { - info += `\n* **Base URL:** ${baseUrl}`; - } - info += ` -* **Model Version:** ${modelVersion} -* **Memory Usage:** ${memoryUsage} -`; - if (ideClient) { - info += `* **IDE Client:** ${ideClient}\n`; + // Generate bug report info using the same field configuration + let info = '\n'; + for (const field of fields) { + info += `* **${field.label}:** ${getFieldValue(field, systemInfo)}\n`; } let bugReportUrl = 'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title={title}&info={info}'; - const bugCommandSettings = config?.getBugCommand(); + const bugCommandSettings = context.services.config?.getBugCommand(); if (bugCommandSettings?.urlTemplate) { bugReportUrl = bugCommandSettings.urlTemplate; } @@ -98,11 +68,3 @@ export const bugCommand: SlashCommand = { } }, }; - -async function getIdeClientName(context: CommandContext) { - if (!context.services.config?.getIdeMode()) { - return ''; - } - const ideClient = await IdeClient.getInstance(); - return ideClient.getDetectedIdeDisplayName() ?? ''; -} diff --git a/packages/cli/src/ui/components/AboutBox.tsx b/packages/cli/src/ui/components/AboutBox.tsx index 70cf47cd..fba5fb13 100644 --- a/packages/cli/src/ui/components/AboutBox.tsx +++ b/packages/cli/src/ui/components/AboutBox.tsx @@ -7,127 +7,46 @@ import type React from 'react'; import { Box, Text } from 'ink'; import { theme } from '../semantic-colors.js'; -import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; +import type { ExtendedSystemInfo } from '../../utils/systemInfo.js'; +import { + getSystemInfoFields, + getFieldValue, + type SystemInfoField, +} from '../../utils/systemInfoFields.js'; -interface AboutBoxProps { - cliVersion: string; - osVersion: string; - sandboxEnv: string; - modelVersion: string; - selectedAuthType: string; - gcpProject: string; - ideClient: string; -} +type AboutBoxProps = ExtendedSystemInfo; -export const AboutBox: React.FC = ({ - cliVersion, - osVersion, - sandboxEnv, - modelVersion, - selectedAuthType, - gcpProject, - ideClient, -}) => ( - - - - About Qwen Code - - - - - - CLI Version +export const AboutBox: React.FC = (props) => { + const fields = getSystemInfoFields(props); + + return ( + + + + About Qwen Code - - {cliVersion} - + {fields.map((field: SystemInfoField) => ( + + + + {field.label} + + + + + {getFieldValue(field, props)} + + + + ))} - {GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) && ( - - - - Git Commit - - - - {GIT_COMMIT_INFO} - - - )} - - - - Model - - - - {modelVersion} - - - - - - Sandbox - - - - {sandboxEnv} - - - - - - OS - - - - {osVersion} - - - - - - Auth Method - - - - - {selectedAuthType.startsWith('oauth') ? 'OAuth' : selectedAuthType} - - - - {gcpProject && ( - - - - GCP Project - - - - {gcpProject} - - - )} - {ideClient && ( - - - - IDE Client - - - - {ideClient} - - - )} - -); + ); +}; diff --git a/packages/cli/src/ui/components/HistoryItemDisplay.test.tsx b/packages/cli/src/ui/components/HistoryItemDisplay.test.tsx index 4eaf8ab3..7cca61ae 100644 --- a/packages/cli/src/ui/components/HistoryItemDisplay.test.tsx +++ b/packages/cli/src/ui/components/HistoryItemDisplay.test.tsx @@ -71,15 +71,24 @@ describe('', () => { it('renders AboutBox for "about" type', () => { const item: HistoryItem = { - ...baseItem, + id: 1, type: MessageType.ABOUT, - cliVersion: '1.0.0', - osVersion: 'test-os', - sandboxEnv: 'test-env', - modelVersion: 'test-model', - selectedAuthType: 'test-auth', - gcpProject: 'test-project', - ideClient: 'test-ide', + systemInfo: { + cliVersion: '1.0.0', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'test-env', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + memoryUsage: '100 MB', + baseUrl: undefined, + gitCommit: undefined, + }, }; const { lastFrame } = renderWithProviders( , diff --git a/packages/cli/src/ui/components/HistoryItemDisplay.tsx b/packages/cli/src/ui/components/HistoryItemDisplay.tsx index 1e86ffa1..bec9c23d 100644 --- a/packages/cli/src/ui/components/HistoryItemDisplay.tsx +++ b/packages/cli/src/ui/components/HistoryItemDisplay.tsx @@ -95,15 +95,7 @@ const HistoryItemDisplayComponent: React.FC = ({ )} {itemForDisplay.type === 'about' && ( - + )} {itemForDisplay.type === 'help' && commands && ( diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index f2929d56..cba3bf7a 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -138,13 +138,7 @@ export const useSlashCommandProcessor = ( if (message.type === MessageType.ABOUT) { historyItemContent = { type: 'about', - cliVersion: message.cliVersion, - osVersion: message.osVersion, - sandboxEnv: message.sandboxEnv, - modelVersion: message.modelVersion, - selectedAuthType: message.selectedAuthType, - gcpProject: message.gcpProject, - ideClient: message.ideClient, + systemInfo: message.systemInfo, }; } else if (message.type === MessageType.HELP) { historyItemContent = { diff --git a/packages/cli/src/ui/types.ts b/packages/cli/src/ui/types.ts index 1d2fa782..bc9a6317 100644 --- a/packages/cli/src/ui/types.ts +++ b/packages/cli/src/ui/types.ts @@ -120,13 +120,22 @@ export type HistoryItemWarning = HistoryItemBase & { export type HistoryItemAbout = HistoryItemBase & { type: 'about'; - cliVersion: string; - osVersion: string; - sandboxEnv: string; - modelVersion: string; - selectedAuthType: string; - gcpProject: string; - ideClient: string; + systemInfo: { + cliVersion: string; + osPlatform: string; + osArch: string; + osRelease: string; + nodeVersion: string; + npmVersion: string; + sandboxEnv: string; + modelVersion: string; + selectedAuthType: string; + ideClient: string; + sessionId: string; + memoryUsage: string; + baseUrl?: string; + gitCommit?: string; + }; }; export type HistoryItemHelp = HistoryItemBase & { @@ -288,13 +297,22 @@ export type Message = | { type: MessageType.ABOUT; timestamp: Date; - cliVersion: string; - osVersion: string; - sandboxEnv: string; - modelVersion: string; - selectedAuthType: string; - gcpProject: string; - ideClient: string; + systemInfo: { + cliVersion: string; + osPlatform: string; + osArch: string; + osRelease: string; + nodeVersion: string; + npmVersion: string; + sandboxEnv: string; + modelVersion: string; + selectedAuthType: string; + ideClient: string; + sessionId: string; + memoryUsage: string; + baseUrl?: string; + gitCommit?: string; + }; content?: string; // Optional content, not really used for ABOUT } | { diff --git a/packages/cli/src/utils/systemInfo.test.ts b/packages/cli/src/utils/systemInfo.test.ts new file mode 100644 index 00000000..4849f1b1 --- /dev/null +++ b/packages/cli/src/utils/systemInfo.test.ts @@ -0,0 +1,331 @@ +/** + * @license + * Copyright 2025 Qwen + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; +import { + getSystemInfo, + getExtendedSystemInfo, + getNpmVersion, + getSandboxEnv, + getIdeClientName, +} from './systemInfo.js'; +import type { CommandContext } from '../ui/commands/types.js'; +import { createMockCommandContext } from '../test-utils/mockCommandContext.js'; +import * as child_process from 'node:child_process'; +import os from 'node:os'; +import { IdeClient } from '@qwen-code/qwen-code-core'; +import * as versionUtils from './version.js'; +import type { ExecSyncOptions } from 'node:child_process'; + +vi.mock('node:child_process'); + +vi.mock('node:os', () => ({ + default: { + release: vi.fn(), + }, +})); + +vi.mock('./version.js', () => ({ + getCliVersion: vi.fn(), +})); + +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const actual = + await importOriginal(); + return { + ...actual, + IdeClient: { + getInstance: vi.fn(), + }, + }; +}); + +describe('systemInfo', () => { + let mockContext: CommandContext; + const originalPlatform = process.platform; + const originalArch = process.arch; + const originalVersion = process.version; + const originalEnv = { ...process.env }; + + beforeEach(() => { + mockContext = createMockCommandContext({ + services: { + config: { + getModel: vi.fn().mockReturnValue('test-model'), + getIdeMode: vi.fn().mockReturnValue(true), + getSessionId: vi.fn().mockReturnValue('test-session-id'), + getContentGeneratorConfig: vi.fn().mockReturnValue({ + baseUrl: 'https://api.openai.com', + }), + }, + settings: { + merged: { + security: { + auth: { + selectedType: 'test-auth', + }, + }, + }, + }, + }, + } as unknown as CommandContext); + + vi.mocked(versionUtils.getCliVersion).mockResolvedValue('test-version'); + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + vi.mocked(os.release).mockReturnValue('22.0.0'); + process.env['GOOGLE_CLOUD_PROJECT'] = 'test-gcp-project'; + Object.defineProperty(process, 'platform', { + value: 'test-os', + }); + Object.defineProperty(process, 'arch', { + value: 'x64', + }); + Object.defineProperty(process, 'version', { + value: 'v20.0.0', + }); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + Object.defineProperty(process, 'platform', { + value: originalPlatform, + }); + Object.defineProperty(process, 'arch', { + value: originalArch, + }); + Object.defineProperty(process, 'version', { + value: originalVersion, + }); + process.env = originalEnv; + vi.clearAllMocks(); + vi.resetAllMocks(); + }); + + describe('getNpmVersion', () => { + it('should return npm version when available', async () => { + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + const version = await getNpmVersion(); + expect(version).toBe('10.0.0'); + }); + + it('should return unknown when npm command fails', async () => { + vi.mocked(child_process.execSync).mockImplementation(() => { + throw new Error('npm not found'); + }); + const version = await getNpmVersion(); + expect(version).toBe('unknown'); + }); + }); + + describe('getSandboxEnv', () => { + it('should return "no sandbox" when SANDBOX is not set', () => { + delete process.env['SANDBOX']; + expect(getSandboxEnv()).toBe('no sandbox'); + }); + + it('should return sandbox-exec info when SANDBOX is sandbox-exec', () => { + process.env['SANDBOX'] = 'sandbox-exec'; + process.env['SEATBELT_PROFILE'] = 'test-profile'; + expect(getSandboxEnv()).toBe('sandbox-exec (test-profile)'); + }); + + it('should return sandbox name without prefix when stripPrefix is true', () => { + process.env['SANDBOX'] = 'qwen-code-test-sandbox'; + expect(getSandboxEnv(true)).toBe('test-sandbox'); + }); + + it('should return sandbox name with prefix when stripPrefix is false', () => { + process.env['SANDBOX'] = 'qwen-code-test-sandbox'; + expect(getSandboxEnv(false)).toBe('qwen-code-test-sandbox'); + }); + + it('should handle qwen- prefix removal', () => { + process.env['SANDBOX'] = 'qwen-custom-sandbox'; + expect(getSandboxEnv(true)).toBe('custom-sandbox'); + }); + }); + + describe('getIdeClientName', () => { + it('should return IDE client name when IDE mode is enabled', async () => { + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue('test-ide'), + } as unknown as IdeClient); + + const ideClient = await getIdeClientName(mockContext); + expect(ideClient).toBe('test-ide'); + }); + + it('should return empty string when IDE mode is disabled', async () => { + vi.mocked(mockContext.services.config!.getIdeMode).mockReturnValue(false); + + const ideClient = await getIdeClientName(mockContext); + expect(ideClient).toBe(''); + }); + + it('should return empty string when IDE client detection fails', async () => { + vi.mocked(IdeClient.getInstance).mockRejectedValue( + new Error('IDE client error'), + ); + + const ideClient = await getIdeClientName(mockContext); + expect(ideClient).toBe(''); + }); + }); + + describe('getSystemInfo', () => { + it('should collect all system information', async () => { + // Ensure SANDBOX is not set for this test + delete process.env['SANDBOX']; + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue('test-ide'), + } as unknown as IdeClient); + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + + const systemInfo = await getSystemInfo(mockContext); + + expect(systemInfo).toEqual({ + cliVersion: 'test-version', + osPlatform: 'test-os', + osArch: 'x64', + osRelease: '22.0.0', + nodeVersion: 'v20.0.0', + npmVersion: '10.0.0', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + ideClient: 'test-ide', + sessionId: 'test-session-id', + }); + }); + + it('should handle missing config gracefully', async () => { + mockContext.services.config = null; + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue(''), + } as unknown as IdeClient); + + const systemInfo = await getSystemInfo(mockContext); + + expect(systemInfo.modelVersion).toBe('Unknown'); + expect(systemInfo.sessionId).toBe('unknown'); + }); + }); + + describe('getExtendedSystemInfo', () => { + it('should include memory usage and base URL', async () => { + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue('test-ide'), + } as unknown as IdeClient); + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + + const { AuthType } = await import('@qwen-code/qwen-code-core'); + // Update the mock context to use OpenAI auth + mockContext.services.settings.merged.security!.auth!.selectedType = + AuthType.USE_OPENAI; + + const extendedInfo = await getExtendedSystemInfo(mockContext); + + expect(extendedInfo.memoryUsage).toBeDefined(); + expect(extendedInfo.memoryUsage).toMatch(/\d+\.\d+ (KB|MB|GB)/); + expect(extendedInfo.baseUrl).toBe('https://api.openai.com'); + }); + + it('should use sandbox env without prefix for bug reports', async () => { + process.env['SANDBOX'] = 'qwen-code-test-sandbox'; + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue(''), + } as unknown as IdeClient); + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + + const extendedInfo = await getExtendedSystemInfo(mockContext); + + expect(extendedInfo.sandboxEnv).toBe('test-sandbox'); + }); + + it('should not include base URL for non-OpenAI auth', async () => { + vi.mocked(IdeClient.getInstance).mockResolvedValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue(''), + } as unknown as IdeClient); + vi.mocked(child_process.execSync).mockImplementation( + (command: string, options?: ExecSyncOptions) => { + if ( + options && + typeof options === 'object' && + 'encoding' in options && + options.encoding === 'utf-8' + ) { + return '10.0.0'; + } + return Buffer.from('10.0.0', 'utf-8'); + }, + ); + + const extendedInfo = await getExtendedSystemInfo(mockContext); + + expect(extendedInfo.baseUrl).toBeUndefined(); + }); + }); +}); diff --git a/packages/cli/src/utils/systemInfo.ts b/packages/cli/src/utils/systemInfo.ts new file mode 100644 index 00000000..84927a95 --- /dev/null +++ b/packages/cli/src/utils/systemInfo.ts @@ -0,0 +1,173 @@ +/** + * @license + * Copyright 2025 Qwen + * SPDX-License-Identifier: Apache-2.0 + */ + +import process from 'node:process'; +import os from 'node:os'; +import { execSync } from 'node:child_process'; +import type { CommandContext } from '../ui/commands/types.js'; +import { getCliVersion } from './version.js'; +import { IdeClient, AuthType } from '@qwen-code/qwen-code-core'; +import { formatMemoryUsage } from '../ui/utils/formatters.js'; +import { GIT_COMMIT_INFO } from '../generated/git-commit.js'; + +/** + * System information interface containing all system-related details + * that can be collected for debugging and reporting purposes. + */ +export interface SystemInfo { + cliVersion: string; + osPlatform: string; + osArch: string; + osRelease: string; + nodeVersion: string; + npmVersion: string; + sandboxEnv: string; + modelVersion: string; + selectedAuthType: string; + ideClient: string; + sessionId: string; +} + +/** + * Additional system information for bug reports + */ +export interface ExtendedSystemInfo extends SystemInfo { + memoryUsage: string; + baseUrl?: string; + gitCommit?: string; +} + +/** + * Gets the NPM version, handling cases where npm might not be available. + * Returns 'unknown' if npm command fails or is not found. + */ +export async function getNpmVersion(): Promise { + try { + return execSync('npm --version', { encoding: 'utf-8' }).trim(); + } catch { + return 'unknown'; + } +} + +/** + * Gets the IDE client name if IDE mode is enabled. + * Returns empty string if IDE mode is disabled or IDE client is not detected. + */ +export async function getIdeClientName( + context: CommandContext, +): Promise { + if (!context.services.config?.getIdeMode()) { + return ''; + } + try { + const ideClient = await IdeClient.getInstance(); + return ideClient?.getDetectedIdeDisplayName() ?? ''; + } catch { + return ''; + } +} + +/** + * Gets the sandbox environment information. + * Handles different sandbox types including sandbox-exec and custom sandbox environments. + * For bug reports, removes 'qwen-' or 'qwen-code-' prefixes from sandbox names. + * + * @param stripPrefix - Whether to strip 'qwen-' prefix (used for bug reports) + */ +export function getSandboxEnv(stripPrefix = false): string { + const sandbox = process.env['SANDBOX']; + + if (!sandbox || sandbox === 'sandbox-exec') { + if (sandbox === 'sandbox-exec') { + const profile = process.env['SEATBELT_PROFILE'] || 'unknown'; + return `sandbox-exec (${profile})`; + } + return 'no sandbox'; + } + + // For bug reports, remove qwen- prefix + if (stripPrefix) { + return sandbox.replace(/^qwen-(?:code-)?/, ''); + } + + return sandbox; +} + +/** + * Collects comprehensive system information for debugging and reporting. + * This function gathers all system-related details including OS, versions, + * sandbox environment, authentication, and session information. + * + * @param context - Command context containing config and settings + * @returns Promise resolving to SystemInfo object with all collected information + */ +export async function getSystemInfo( + context: CommandContext, +): Promise { + const osPlatform = process.platform; + const osArch = process.arch; + const osRelease = os.release(); + const nodeVersion = process.version; + const npmVersion = await getNpmVersion(); + const sandboxEnv = getSandboxEnv(); + const modelVersion = context.services.config?.getModel() || 'Unknown'; + const cliVersion = await getCliVersion(); + const selectedAuthType = + context.services.settings.merged.security?.auth?.selectedType || ''; + const ideClient = await getIdeClientName(context); + const sessionId = context.services.config?.getSessionId() || 'unknown'; + + return { + cliVersion, + osPlatform, + osArch, + osRelease, + nodeVersion, + npmVersion, + sandboxEnv, + modelVersion, + selectedAuthType, + ideClient, + sessionId, + }; +} + +/** + * Collects extended system information for bug reports. + * Includes all standard system info plus memory usage and optional base URL. + * + * @param context - Command context containing config and settings + * @returns Promise resolving to ExtendedSystemInfo object + */ +export async function getExtendedSystemInfo( + context: CommandContext, +): Promise { + const baseInfo = await getSystemInfo(context); + const memoryUsage = formatMemoryUsage(process.memoryUsage().rss); + + // For bug reports, use sandbox name without prefix + const sandboxEnv = getSandboxEnv(true); + + // Get base URL if using OpenAI auth + const baseUrl = + baseInfo.selectedAuthType === AuthType.USE_OPENAI + ? context.services.config?.getContentGeneratorConfig()?.baseUrl + : undefined; + + // Get git commit info + const gitCommit = + GIT_COMMIT_INFO && !['N/A'].includes(GIT_COMMIT_INFO) + ? GIT_COMMIT_INFO + : undefined; + + return { + ...baseInfo, + sandboxEnv, + memoryUsage, + baseUrl, + gitCommit, + }; +} diff --git a/packages/cli/src/utils/systemInfoFields.ts b/packages/cli/src/utils/systemInfoFields.ts new file mode 100644 index 00000000..d4b959fb --- /dev/null +++ b/packages/cli/src/utils/systemInfoFields.ts @@ -0,0 +1,117 @@ +/** + * @license + * Copyright 2025 Qwen + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { ExtendedSystemInfo } from './systemInfo.js'; + +/** + * Field configuration for system information display + */ +export interface SystemInfoField { + label: string; + key: keyof ExtendedSystemInfo; +} + +/** + * Unified field configuration for system information display. + * This ensures consistent labeling between /about and /bug commands. + */ +export function getSystemInfoFields( + info: ExtendedSystemInfo, +): SystemInfoField[] { + const allFields: SystemInfoField[] = [ + { + label: 'CLI Version', + key: 'cliVersion', + }, + { + label: 'Git Commit', + key: 'gitCommit', + }, + { + label: 'Model', + key: 'modelVersion', + }, + { + label: 'Sandbox', + key: 'sandboxEnv', + }, + { + label: 'OS Platform', + key: 'osPlatform', + }, + { + label: 'OS Arch', + key: 'osArch', + }, + { + label: 'OS Release', + key: 'osRelease', + }, + { + label: 'Node.js Version', + key: 'nodeVersion', + }, + { + label: 'NPM Version', + key: 'npmVersion', + }, + { + label: 'Session ID', + key: 'sessionId', + }, + { + label: 'Auth Method', + key: 'selectedAuthType', + }, + { + label: 'Base URL', + key: 'baseUrl', + }, + { + label: 'Memory Usage', + key: 'memoryUsage', + }, + { + label: 'IDE Client', + key: 'ideClient', + }, + ]; + + // Filter out optional fields that are not present + return allFields.filter((field) => { + const value = info[field.key]; + // Optional fields: only show if they exist and are non-empty + if ( + field.key === 'baseUrl' || + field.key === 'gitCommit' || + field.key === 'ideClient' + ) { + return Boolean(value); + } + return true; + }); +} + +/** + * Get the value for a field from system info + */ +export function getFieldValue( + field: SystemInfoField, + info: ExtendedSystemInfo, +): string { + const value = info[field.key]; + + if (value === undefined || value === null) { + return ''; + } + + // Special formatting for selectedAuthType + if (field.key === 'selectedAuthType') { + return String(value).startsWith('oauth') ? 'OAuth' : String(value); + } + + return String(value); +} From d17c37af7df27daa369fd4656166ad3fe5524f79 Mon Sep 17 00:00:00 2001 From: tanzhenxin Date: Thu, 6 Nov 2025 16:25:06 +0800 Subject: [PATCH 2/8] Feat: Simplify and Improve Search Tools (glob, grep, ripgrep) (#969) --- docs/tools/file-system.md | 58 ++-- packages/core/src/tools/glob.test.ts | 203 ++++++++------ packages/core/src/tools/glob.ts | 240 ++++++---------- packages/core/src/tools/grep.test.ts | 173 ++++-------- packages/core/src/tools/grep.ts | 351 ++++++++---------------- packages/core/src/tools/ripGrep.test.ts | 53 +++- packages/core/src/tools/ripGrep.ts | 165 +++-------- packages/core/src/utils/paths.test.ts | 286 ++++++++++++++++++- packages/core/src/utils/paths.ts | 93 +++++++ 9 files changed, 840 insertions(+), 782 deletions(-) diff --git a/docs/tools/file-system.md b/docs/tools/file-system.md index 7bf90d06..7ce38359 100644 --- a/docs/tools/file-system.md +++ b/docs/tools/file-system.md @@ -68,72 +68,66 @@ Qwen Code provides a comprehensive suite of tools for interacting with the local - **File:** `glob.ts` - **Parameters:** - `pattern` (string, required): The glob pattern to match against (e.g., `"*.py"`, `"src/**/*.js"`). - - `path` (string, optional): The absolute path to the directory to search within. If omitted, searches the tool's root directory. - - `case_sensitive` (boolean, optional): Whether the search should be case-sensitive. Defaults to `false`. - - `respect_git_ignore` (boolean, optional): Whether to respect .gitignore patterns when finding files. Defaults to `true`. + - `path` (string, optional): The directory to search in. If not specified, the current working directory will be used. - **Behavior:** - Searches for files matching the glob pattern within the specified directory. - Returns a list of absolute paths, sorted with the most recently modified files first. - - Ignores common nuisance directories like `node_modules` and `.git` by default. -- **Output (`llmContent`):** A message like: `Found 5 file(s) matching "*.ts" within src, sorted by modification time (newest first):\nsrc/file1.ts\nsrc/subdir/file2.ts...` + - Respects .gitignore and .qwenignore patterns by default. + - Limits results to 100 files to prevent context overflow. +- **Output (`llmContent`):** A message like: `Found 5 file(s) matching "*.ts" within /path/to/search/dir, sorted by modification time (newest first):\n---\n/path/to/file1.ts\n/path/to/subdir/file2.ts\n---\n[95 files truncated] ...` - **Confirmation:** No. -## 5. `search_file_content` (SearchText) +## 5. `grep_search` (Grep) -`search_file_content` searches for a regular expression pattern within the content of files in a specified directory. Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. +`grep_search` searches for a regular expression pattern within the content of files in a specified directory. Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. -- **Tool name:** `search_file_content` -- **Display name:** SearchText -- **File:** `grep.ts` +- **Tool name:** `grep_search` +- **Display name:** Grep +- **File:** `ripGrep.ts` (with `grep.ts` as fallback) - **Parameters:** - - `pattern` (string, required): The regular expression (regex) to search for (e.g., `"function\s+myFunction"`). - - `path` (string, optional): The absolute path to the directory to search within. Defaults to the current working directory. - - `include` (string, optional): A glob pattern to filter which files are searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most files (respecting common ignores). - - `maxResults` (number, optional): Maximum number of matches to return to prevent context overflow (default: 20, max: 100). Use lower values for broad searches, higher for specific searches. + - `pattern` (string, required): The regular expression pattern to search for in file contents (e.g., `"function\\s+myFunction"`, `"log.*Error"`). + - `path` (string, optional): File or directory to search in. Defaults to current working directory. + - `glob` (string, optional): Glob pattern to filter files (e.g. `"*.js"`, `"src/**/*.{ts,tsx}"`). + - `limit` (number, optional): Limit output to first N matching lines. Optional - shows all matches if not specified. - **Behavior:** - - Uses `git grep` if available in a Git repository for speed; otherwise, falls back to system `grep` or a JavaScript-based search. - - Returns a list of matching lines, each prefixed with its file path (relative to the search directory) and line number. - - Limits results to a maximum of 20 matches by default to prevent context overflow. When results are truncated, shows a clear warning with guidance on refining searches. + - Uses ripgrep for fast search when available; otherwise falls back to a JavaScript-based search implementation. + - Returns matching lines with file paths and line numbers. + - Case-insensitive by default. + - Respects .gitignore and .qwenignore patterns. + - Limits output to prevent context overflow. - **Output (`llmContent`):** A formatted string of matches, e.g.: ``` Found 3 matches for pattern "myFunction" in path "." (filter: "*.ts"): --- - File: src/utils.ts - L15: export function myFunction() { - L22: myFunction.call(); - --- - File: src/index.ts - L5: import { myFunction } from './utils'; + src/utils.ts:15:export function myFunction() { + src/utils.ts:22: myFunction.call(); + src/index.ts:5:import { myFunction } from './utils'; --- - WARNING: Results truncated to prevent context overflow. To see more results: - - Use a more specific pattern to reduce matches - - Add file filters with the 'include' parameter (e.g., "*.js", "src/**") - - Specify a narrower 'path' to search in a subdirectory - - Increase 'maxResults' parameter if you need more matches (current: 20) + [0 lines truncated] ... ``` - **Confirmation:** No. -### `search_file_content` examples +### `grep_search` examples Search for a pattern with default result limiting: ``` -search_file_content(pattern="function\s+myFunction", path="src") +grep_search(pattern="function\\s+myFunction", path="src") ``` Search for a pattern with custom result limiting: ``` -search_file_content(pattern="function", path="src", maxResults=50) +grep_search(pattern="function", path="src", limit=50) ``` Search for a pattern with file filtering and custom result limiting: ``` -search_file_content(pattern="function", include="*.js", maxResults=10) +grep_search(pattern="function", glob="*.js", limit=10) ``` ## 6. `edit` (Edit) diff --git a/packages/core/src/tools/glob.test.ts b/packages/core/src/tools/glob.test.ts index 164eb7e0..0cf1f9e3 100644 --- a/packages/core/src/tools/glob.test.ts +++ b/packages/core/src/tools/glob.test.ts @@ -88,17 +88,6 @@ describe('GlobTool', () => { expect(result.returnDisplay).toBe('Found 2 matching file(s)'); }); - it('should find files case-sensitively when case_sensitive is true', async () => { - const params: GlobToolParams = { pattern: '*.txt', case_sensitive: true }; - const invocation = globTool.build(params); - const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain('Found 1 file(s)'); - expect(result.llmContent).toContain(path.join(tempRootDir, 'fileA.txt')); - expect(result.llmContent).not.toContain( - path.join(tempRootDir, 'FileB.TXT'), - ); - }); - it('should find files case-insensitively by default (pattern: *.TXT)', async () => { const params: GlobToolParams = { pattern: '*.TXT' }; const invocation = globTool.build(params); @@ -108,18 +97,6 @@ describe('GlobTool', () => { expect(result.llmContent).toContain(path.join(tempRootDir, 'FileB.TXT')); }); - it('should find files case-insensitively when case_sensitive is false (pattern: *.TXT)', async () => { - const params: GlobToolParams = { - pattern: '*.TXT', - case_sensitive: false, - }; - const invocation = globTool.build(params); - const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain('Found 2 file(s)'); - expect(result.llmContent).toContain(path.join(tempRootDir, 'fileA.txt')); - expect(result.llmContent).toContain(path.join(tempRootDir, 'FileB.TXT')); - }); - it('should find files using a pattern that includes a subdirectory', async () => { const params: GlobToolParams = { pattern: 'sub/*.md' }; const invocation = globTool.build(params); @@ -207,7 +184,7 @@ describe('GlobTool', () => { const filesListed = llmContent .trim() .split(/\r?\n/) - .slice(1) + .slice(2) .map((line) => line.trim()) .filter(Boolean); @@ -220,14 +197,13 @@ describe('GlobTool', () => { ); }); - it('should return a PATH_NOT_IN_WORKSPACE error if path is outside workspace', async () => { + it('should return error if path is outside workspace', async () => { // Bypassing validation to test execute method directly vi.spyOn(globTool, 'validateToolParams').mockReturnValue(null); const params: GlobToolParams = { pattern: '*.txt', path: '/etc' }; const invocation = globTool.build(params); const result = await invocation.execute(abortSignal); - expect(result.error?.type).toBe(ToolErrorType.PATH_NOT_IN_WORKSPACE); - expect(result.returnDisplay).toBe('Path is not within workspace'); + expect(result.returnDisplay).toBe('Error: Path is not within workspace'); }); it('should return a GLOB_EXECUTION_ERROR on glob failure', async () => { @@ -255,15 +231,6 @@ describe('GlobTool', () => { expect(globTool.validateToolParams(params)).toBeNull(); }); - it('should return null for valid parameters (pattern, path, and case_sensitive)', () => { - const params: GlobToolParams = { - pattern: '*.js', - path: 'sub', - case_sensitive: true, - }; - expect(globTool.validateToolParams(params)).toBeNull(); - }); - it('should return error if pattern is missing (schema validation)', () => { // Need to correctly define this as an object without pattern const params = { path: '.' }; @@ -297,16 +264,6 @@ describe('GlobTool', () => { ); }); - it('should return error if case_sensitive is provided but is not a boolean', () => { - const params = { - pattern: '*.ts', - case_sensitive: 'true', - } as unknown as GlobToolParams; // Force incorrect type - expect(globTool.validateToolParams(params)).toBe( - 'params/case_sensitive must be boolean', - ); - }); - it("should return error if search path resolves outside the tool's root directory", () => { // Create a globTool instance specifically for this test, with a deeper root tempRootDir = path.join(tempRootDir, 'sub'); @@ -319,7 +276,7 @@ describe('GlobTool', () => { path: '../../../../../../../../../../tmp', // Definitely outside }; expect(specificGlobTool.validateToolParams(paramsOutside)).toContain( - 'resolves outside the allowed workspace directories', + 'Path is not within workspace', ); }); @@ -329,14 +286,14 @@ describe('GlobTool', () => { path: 'nonexistent_subdir', }; expect(globTool.validateToolParams(params)).toContain( - 'Search path does not exist', + 'Path does not exist', ); }); it('should return error if specified search path is a file, not a directory', async () => { const params: GlobToolParams = { pattern: '*.txt', path: 'fileA.txt' }; expect(globTool.validateToolParams(params)).toContain( - 'Search path is not a directory', + 'Path is not a directory', ); }); }); @@ -348,20 +305,10 @@ describe('GlobTool', () => { expect(globTool.validateToolParams(validPath)).toBeNull(); expect(globTool.validateToolParams(invalidPath)).toContain( - 'resolves outside the allowed workspace directories', + 'Path is not within workspace', ); }); - it('should provide clear error messages when path is outside workspace', () => { - const invalidPath = { pattern: '*.ts', path: '/etc' }; - const error = globTool.validateToolParams(invalidPath); - - expect(error).toContain( - 'resolves outside the allowed workspace directories', - ); - expect(error).toContain(tempRootDir); - }); - it('should work with paths in workspace subdirectories', async () => { const params: GlobToolParams = { pattern: '*.md', path: 'sub' }; const invocation = globTool.build(params); @@ -417,47 +364,123 @@ describe('GlobTool', () => { expect(result.llmContent).toContain('Found 3 file(s)'); // fileA.txt, FileB.TXT, b.notignored.txt expect(result.llmContent).not.toContain('a.qwenignored.txt'); }); + }); - it('should not respect .gitignore when respect_git_ignore is false', async () => { - await fs.writeFile(path.join(tempRootDir, '.gitignore'), '*.ignored.txt'); - await fs.writeFile( - path.join(tempRootDir, 'a.ignored.txt'), - 'ignored content', - ); + describe('file count truncation', () => { + it('should truncate results when more than 100 files are found', async () => { + // Create 150 test files + for (let i = 1; i <= 150; i++) { + await fs.writeFile( + path.join(tempRootDir, `file${i}.trunctest`), + `content${i}`, + ); + } - const params: GlobToolParams = { - pattern: '*.txt', - respect_git_ignore: false, - }; + const params: GlobToolParams = { pattern: '*.trunctest' }; const invocation = globTool.build(params); const result = await invocation.execute(abortSignal); + const llmContent = partListUnionToString(result.llmContent); - expect(result.llmContent).toContain('Found 3 file(s)'); // fileA.txt, FileB.TXT, a.ignored.txt - expect(result.llmContent).toContain('a.ignored.txt'); + // Should report all 150 files found + expect(llmContent).toContain('Found 150 file(s)'); + + // Should include truncation notice + expect(llmContent).toContain('[50 files truncated] ...'); + + // Count the number of .trunctest files mentioned in the output + const fileMatches = llmContent.match(/file\d+\.trunctest/g); + expect(fileMatches).toBeDefined(); + expect(fileMatches?.length).toBe(100); + + // returnDisplay should indicate truncation + expect(result.returnDisplay).toBe( + 'Found 150 matching file(s) (truncated)', + ); }); - it('should not respect .qwenignore when respect_qwen_ignore is false', async () => { - await fs.writeFile( - path.join(tempRootDir, '.qwenignore'), - '*.qwenignored.txt', - ); - await fs.writeFile( - path.join(tempRootDir, 'a.qwenignored.txt'), - 'ignored content', - ); + it('should not truncate when exactly 100 files are found', async () => { + // Create exactly 100 test files + for (let i = 1; i <= 100; i++) { + await fs.writeFile( + path.join(tempRootDir, `exact${i}.trunctest`), + `content${i}`, + ); + } - // Recreate the tool to pick up the new .qwenignore file - globTool = new GlobTool(mockConfig); - - const params: GlobToolParams = { - pattern: '*.txt', - respect_qwen_ignore: false, - }; + const params: GlobToolParams = { pattern: '*.trunctest' }; const invocation = globTool.build(params); const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain('Found 3 file(s)'); // fileA.txt, FileB.TXT, a.qwenignored.txt - expect(result.llmContent).toContain('a.qwenignored.txt'); + // Should report all 100 files found + expect(result.llmContent).toContain('Found 100 file(s)'); + + // Should NOT include truncation notice + expect(result.llmContent).not.toContain('truncated'); + + // Should show all 100 files + expect(result.llmContent).toContain('exact1.trunctest'); + expect(result.llmContent).toContain('exact100.trunctest'); + + // returnDisplay should NOT indicate truncation + expect(result.returnDisplay).toBe('Found 100 matching file(s)'); + }); + + it('should not truncate when fewer than 100 files are found', async () => { + // Create 50 test files + for (let i = 1; i <= 50; i++) { + await fs.writeFile( + path.join(tempRootDir, `small${i}.trunctest`), + `content${i}`, + ); + } + + const params: GlobToolParams = { pattern: '*.trunctest' }; + const invocation = globTool.build(params); + const result = await invocation.execute(abortSignal); + + // Should report all 50 files found + expect(result.llmContent).toContain('Found 50 file(s)'); + + // Should NOT include truncation notice + expect(result.llmContent).not.toContain('truncated'); + + // returnDisplay should NOT indicate truncation + expect(result.returnDisplay).toBe('Found 50 matching file(s)'); + }); + + it('should use correct singular/plural in truncation message for 1 file truncated', async () => { + // Create 101 test files (will truncate 1 file) + for (let i = 1; i <= 101; i++) { + await fs.writeFile( + path.join(tempRootDir, `singular${i}.trunctest`), + `content${i}`, + ); + } + + const params: GlobToolParams = { pattern: '*.trunctest' }; + const invocation = globTool.build(params); + const result = await invocation.execute(abortSignal); + + // Should use singular "file" for 1 truncated file + expect(result.llmContent).toContain('[1 file truncated] ...'); + expect(result.llmContent).not.toContain('[1 files truncated]'); + }); + + it('should use correct plural in truncation message for multiple files truncated', async () => { + // Create 105 test files (will truncate 5 files) + for (let i = 1; i <= 105; i++) { + await fs.writeFile( + path.join(tempRootDir, `plural${i}.trunctest`), + `content${i}`, + ); + } + + const params: GlobToolParams = { pattern: '*.trunctest' }; + const invocation = globTool.build(params); + const result = await invocation.execute(abortSignal); + + // Should use plural "files" for multiple truncated files + expect(result.llmContent).toContain('[5 files truncated] ...'); }); }); }); diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index 4826c859..2e9fa58e 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -10,10 +10,17 @@ import { glob, escape } from 'glob'; import type { ToolInvocation, ToolResult } from './tools.js'; import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js'; import { ToolNames } from './tool-names.js'; -import { shortenPath, makeRelative } from '../utils/paths.js'; +import { resolveAndValidatePath } from '../utils/paths.js'; import { type Config } from '../config/config.js'; -import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js'; +import { + DEFAULT_FILE_FILTERING_OPTIONS, + type FileFilteringOptions, +} from '../config/constants.js'; import { ToolErrorType } from './tool-error.js'; +import { getErrorMessage } from '../utils/errors.js'; +import type { FileDiscoveryService } from '../services/fileDiscoveryService.js'; + +const MAX_FILE_COUNT = 100; // Subset of 'Path' interface provided by 'glob' that we can implement for testing export interface GlobPath { @@ -64,118 +71,68 @@ export interface GlobToolParams { * The directory to search in (optional, defaults to current directory) */ path?: string; - - /** - * Whether the search should be case-sensitive (optional, defaults to false) - */ - case_sensitive?: boolean; - - /** - * Whether to respect .gitignore patterns (optional, defaults to true) - */ - respect_git_ignore?: boolean; - - /** - * Whether to respect .qwenignore patterns (optional, defaults to true) - */ - respect_qwen_ignore?: boolean; } class GlobToolInvocation extends BaseToolInvocation< GlobToolParams, ToolResult > { + private fileService: FileDiscoveryService; + constructor( private config: Config, params: GlobToolParams, ) { super(params); + this.fileService = config.getFileService(); } getDescription(): string { let description = `'${this.params.pattern}'`; if (this.params.path) { - const searchDir = path.resolve( - this.config.getTargetDir(), - this.params.path || '.', - ); - const relativePath = makeRelative(searchDir, this.config.getTargetDir()); - description += ` within ${shortenPath(relativePath)}`; + description += ` in path '${this.params.path}'`; } + return description; } async execute(signal: AbortSignal): Promise { try { - const workspaceContext = this.config.getWorkspaceContext(); - const workspaceDirectories = workspaceContext.getDirectories(); + // Default to target directory if no path is provided + const searchDirAbs = resolveAndValidatePath( + this.config, + this.params.path, + ); + const searchLocationDescription = this.params.path + ? `within ${searchDirAbs}` + : `in the workspace directory`; - // If a specific path is provided, resolve it and check if it's within workspace - let searchDirectories: readonly string[]; - if (this.params.path) { - const searchDirAbsolute = path.resolve( - this.config.getTargetDir(), - this.params.path, - ); - if (!workspaceContext.isPathWithinWorkspace(searchDirAbsolute)) { - const rawError = `Error: Path "${this.params.path}" is not within any workspace directory`; - return { - llmContent: rawError, - returnDisplay: `Path is not within workspace`, - error: { - message: rawError, - type: ToolErrorType.PATH_NOT_IN_WORKSPACE, - }, - }; - } - searchDirectories = [searchDirAbsolute]; - } else { - // Search across all workspace directories - searchDirectories = workspaceDirectories; + // Collect entries from the search directory + let pattern = this.params.pattern; + const fullPath = path.join(searchDirAbs, pattern); + if (fs.existsSync(fullPath)) { + pattern = escape(pattern); } - // Get centralized file discovery service - const fileDiscovery = this.config.getFileService(); - - // Collect entries from all search directories - const allEntries: GlobPath[] = []; - for (const searchDir of searchDirectories) { - let pattern = this.params.pattern; - const fullPath = path.join(searchDir, pattern); - if (fs.existsSync(fullPath)) { - pattern = escape(pattern); - } - - const entries = (await glob(pattern, { - cwd: searchDir, - withFileTypes: true, - nodir: true, - stat: true, - nocase: !this.params.case_sensitive, - dot: true, - ignore: this.config.getFileExclusions().getGlobExcludes(), - follow: false, - signal, - })) as GlobPath[]; - - allEntries.push(...entries); - } + const allEntries = (await glob(pattern, { + cwd: searchDirAbs, + withFileTypes: true, + nodir: true, + stat: true, + nocase: true, + dot: true, + follow: false, + signal, + })) as GlobPath[]; const relativePaths = allEntries.map((p) => path.relative(this.config.getTargetDir(), p.fullpath()), ); - const { filteredPaths, gitIgnoredCount, qwenIgnoredCount } = - fileDiscovery.filterFilesWithReport(relativePaths, { - respectGitIgnore: - this.params?.respect_git_ignore ?? - this.config.getFileFilteringOptions().respectGitIgnore ?? - DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore, - respectQwenIgnore: - this.params?.respect_qwen_ignore ?? - this.config.getFileFilteringOptions().respectQwenIgnore ?? - DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore, - }); + const { filteredPaths } = this.fileService.filterFilesWithReport( + relativePaths, + this.getFileFilteringOptions(), + ); const filteredAbsolutePaths = new Set( filteredPaths.map((p) => path.resolve(this.config.getTargetDir(), p)), @@ -186,20 +143,8 @@ class GlobToolInvocation extends BaseToolInvocation< ); if (!filteredEntries || filteredEntries.length === 0) { - let message = `No files found matching pattern "${this.params.pattern}"`; - if (searchDirectories.length === 1) { - message += ` within ${searchDirectories[0]}`; - } else { - message += ` within ${searchDirectories.length} workspace directories`; - } - if (gitIgnoredCount > 0) { - message += ` (${gitIgnoredCount} files were git-ignored)`; - } - if (qwenIgnoredCount > 0) { - message += ` (${qwenIgnoredCount} files were qwen-ignored)`; - } return { - llmContent: message, + llmContent: `No files found matching pattern "${this.params.pattern}" ${searchLocationDescription}`, returnDisplay: `No files found`, }; } @@ -215,29 +160,32 @@ class GlobToolInvocation extends BaseToolInvocation< oneDayInMs, ); - const sortedAbsolutePaths = sortedEntries.map((entry) => + const totalFileCount = sortedEntries.length; + const truncated = totalFileCount > MAX_FILE_COUNT; + + // Limit to MAX_FILE_COUNT if needed + const entriesToShow = truncated + ? sortedEntries.slice(0, MAX_FILE_COUNT) + : sortedEntries; + + const sortedAbsolutePaths = entriesToShow.map((entry) => entry.fullpath(), ); const fileListDescription = sortedAbsolutePaths.join('\n'); - const fileCount = sortedAbsolutePaths.length; - let resultMessage = `Found ${fileCount} file(s) matching "${this.params.pattern}"`; - if (searchDirectories.length === 1) { - resultMessage += ` within ${searchDirectories[0]}`; - } else { - resultMessage += ` across ${searchDirectories.length} workspace directories`; + let resultMessage = `Found ${totalFileCount} file(s) matching "${this.params.pattern}" ${searchLocationDescription}`; + resultMessage += `, sorted by modification time (newest first):\n---\n${fileListDescription}`; + + // Add truncation notice if needed + if (truncated) { + const omittedFiles = totalFileCount - MAX_FILE_COUNT; + const fileTerm = omittedFiles === 1 ? 'file' : 'files'; + resultMessage += `\n---\n[${omittedFiles} ${fileTerm} truncated] ...`; } - if (gitIgnoredCount > 0) { - resultMessage += ` (${gitIgnoredCount} additional files were git-ignored)`; - } - if (qwenIgnoredCount > 0) { - resultMessage += ` (${qwenIgnoredCount} additional files were qwen-ignored)`; - } - resultMessage += `, sorted by modification time (newest first):\n${fileListDescription}`; return { llmContent: resultMessage, - returnDisplay: `Found ${fileCount} matching file(s)`, + returnDisplay: `Found ${totalFileCount} matching file(s)${truncated ? ' (truncated)' : ''}`, }; } catch (error) { const errorMessage = @@ -246,7 +194,7 @@ class GlobToolInvocation extends BaseToolInvocation< const rawError = `Error during glob search operation: ${errorMessage}`; return { llmContent: rawError, - returnDisplay: `Error: An unexpected error occurred.`, + returnDisplay: `Error: ${errorMessage || 'An unexpected error occurred.'}`, error: { message: rawError, type: ToolErrorType.GLOB_EXECUTION_ERROR, @@ -254,6 +202,18 @@ class GlobToolInvocation extends BaseToolInvocation< }; } } + + private getFileFilteringOptions(): FileFilteringOptions { + const options = this.config.getFileFilteringOptions?.(); + return { + respectGitIgnore: + options?.respectGitIgnore ?? + DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore, + respectQwenIgnore: + options?.respectQwenIgnore ?? + DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore, + }; + } } /** @@ -266,35 +226,19 @@ export class GlobTool extends BaseDeclarativeTool { super( GlobTool.Name, 'FindFiles', - 'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.', + 'Fast file pattern matching tool that works with any codebase size\n- Supports glob patterns like "**/*.js" or "src/**/*.ts"\n- Returns matching file paths sorted by modification time\n- Use this tool when you need to find files by name patterns\n- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Agent tool instead\n- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.', Kind.Search, { properties: { pattern: { - description: - "The glob pattern to match against (e.g., '**/*.py', 'docs/*.md').", + description: 'The glob pattern to match files against', type: 'string', }, path: { description: - 'Optional: The absolute path to the directory to search within. If omitted, searches the root directory.', + 'The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter "undefined" or "null" - simply omit it for the default behavior. Must be a valid directory path if provided.', type: 'string', }, - case_sensitive: { - description: - 'Optional: Whether the search should be case-sensitive. Defaults to false.', - type: 'boolean', - }, - respect_git_ignore: { - description: - 'Optional: Whether to respect .gitignore patterns when finding files. Only available in git repositories. Defaults to true.', - type: 'boolean', - }, - respect_qwen_ignore: { - description: - 'Optional: Whether to respect .qwenignore patterns when finding files. Defaults to true.', - type: 'boolean', - }, }, required: ['pattern'], type: 'object', @@ -308,29 +252,6 @@ export class GlobTool extends BaseDeclarativeTool { protected override validateToolParamValues( params: GlobToolParams, ): string | null { - const searchDirAbsolute = path.resolve( - this.config.getTargetDir(), - params.path || '.', - ); - - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(searchDirAbsolute)) { - const directories = workspaceContext.getDirectories(); - return `Search path ("${searchDirAbsolute}") resolves outside the allowed workspace directories: ${directories.join(', ')}`; - } - - const targetDir = searchDirAbsolute || this.config.getTargetDir(); - try { - if (!fs.existsSync(targetDir)) { - return `Search path does not exist ${targetDir}`; - } - if (!fs.statSync(targetDir).isDirectory()) { - return `Search path is not a directory: ${targetDir}`; - } - } catch (e: unknown) { - return `Error accessing search path: ${e}`; - } - if ( !params.pattern || typeof params.pattern !== 'string' || @@ -339,6 +260,15 @@ export class GlobTool extends BaseDeclarativeTool { return "The 'pattern' parameter cannot be empty."; } + // Only validate path if one is provided + if (params.path) { + try { + resolveAndValidatePath(this.config, params.path); + } catch (error) { + return getErrorMessage(error); + } + } + return null; } diff --git a/packages/core/src/tools/grep.test.ts b/packages/core/src/tools/grep.test.ts index f0707908..497fbb7d 100644 --- a/packages/core/src/tools/grep.test.ts +++ b/packages/core/src/tools/grep.test.ts @@ -84,11 +84,11 @@ describe('GrepTool', () => { expect(grepTool.validateToolParams(params)).toBeNull(); }); - it('should return null for valid params (pattern, path, and include)', () => { + it('should return null for valid params (pattern, path, and glob)', () => { const params: GrepToolParams = { pattern: 'hello', path: '.', - include: '*.txt', + glob: '*.txt', }; expect(grepTool.validateToolParams(params)).toBeNull(); }); @@ -111,7 +111,7 @@ describe('GrepTool', () => { const params: GrepToolParams = { pattern: 'hello', path: 'nonexistent' }; // Check for the core error message, as the full path might vary expect(grepTool.validateToolParams(params)).toContain( - 'Failed to access path stats for', + 'Path does not exist:', ); expect(grepTool.validateToolParams(params)).toContain('nonexistent'); }); @@ -155,8 +155,8 @@ describe('GrepTool', () => { expect(result.returnDisplay).toBe('Found 1 match'); }); - it('should find matches with an include glob', async () => { - const params: GrepToolParams = { pattern: 'hello', include: '*.js' }; + it('should find matches with a glob filter', async () => { + const params: GrepToolParams = { pattern: 'hello', glob: '*.js' }; const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); expect(result.llmContent).toContain( @@ -169,7 +169,7 @@ describe('GrepTool', () => { expect(result.returnDisplay).toBe('Found 1 match'); }); - it('should find matches with an include glob and path', async () => { + it('should find matches with a glob filter and path', async () => { await fs.writeFile( path.join(tempRootDir, 'sub', 'another.js'), 'const greeting = "hello";', @@ -177,7 +177,7 @@ describe('GrepTool', () => { const params: GrepToolParams = { pattern: 'hello', path: 'sub', - include: '*.js', + glob: '*.js', }; const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); @@ -244,59 +244,23 @@ describe('GrepTool', () => { describe('multi-directory workspace', () => { it('should search across all workspace directories when no path is specified', async () => { - // Create additional directory with test files - const secondDir = await fs.mkdtemp( - path.join(os.tmpdir(), 'grep-tool-second-'), - ); - await fs.writeFile( - path.join(secondDir, 'other.txt'), - 'hello from second directory\nworld in second', - ); - await fs.writeFile( - path.join(secondDir, 'another.js'), - 'function world() { return "test"; }', - ); - - // Create a mock config with multiple directories - const multiDirConfig = { - getTargetDir: () => tempRootDir, - getWorkspaceContext: () => - createMockWorkspaceContext(tempRootDir, [secondDir]), - getFileExclusions: () => ({ - getGlobExcludes: () => [], - }), - } as unknown as Config; - - const multiDirGrepTool = new GrepTool(multiDirConfig); + // The new implementation searches only in the target directory (first workspace directory) + // when no path is specified, not across all workspace directories const params: GrepToolParams = { pattern: 'world' }; - const invocation = multiDirGrepTool.build(params); + const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); - // Should find matches in both directories + // Should find matches in the target directory only expect(result.llmContent).toContain( - 'Found 5 matches for pattern "world"', + 'Found 3 matches for pattern "world" in the workspace directory', ); - // Matches from first directory + // Matches from target directory expect(result.llmContent).toContain('fileA.txt'); expect(result.llmContent).toContain('L1: hello world'); expect(result.llmContent).toContain('L2: second line with world'); expect(result.llmContent).toContain('fileC.txt'); expect(result.llmContent).toContain('L1: another world in sub dir'); - - // Matches from second directory (with directory name prefix) - const secondDirName = path.basename(secondDir); - expect(result.llmContent).toContain( - `File: ${path.join(secondDirName, 'other.txt')}`, - ); - expect(result.llmContent).toContain('L2: world in second'); - expect(result.llmContent).toContain( - `File: ${path.join(secondDirName, 'another.js')}`, - ); - expect(result.llmContent).toContain('L1: function world()'); - - // Clean up - await fs.rm(secondDir, { recursive: true, force: true }); }); it('should search only specified path within workspace directories', async () => { @@ -346,16 +310,18 @@ describe('GrepTool', () => { it('should generate correct description with pattern only', () => { const params: GrepToolParams = { pattern: 'testPattern' }; const invocation = grepTool.build(params); - expect(invocation.getDescription()).toBe("'testPattern'"); + expect(invocation.getDescription()).toBe("'testPattern' in path './'"); }); - it('should generate correct description with pattern and include', () => { + it('should generate correct description with pattern and glob', () => { const params: GrepToolParams = { pattern: 'testPattern', - include: '*.ts', + glob: '*.ts', }; const invocation = grepTool.build(params); - expect(invocation.getDescription()).toBe("'testPattern' in *.ts"); + expect(invocation.getDescription()).toBe( + "'testPattern' in path './' (filter: '*.ts')", + ); }); it('should generate correct description with pattern and path', async () => { @@ -366,49 +332,37 @@ describe('GrepTool', () => { path: path.join('src', 'app'), }; const invocation = grepTool.build(params); - // The path will be relative to the tempRootDir, so we check for containment. - expect(invocation.getDescription()).toContain("'testPattern' within"); - expect(invocation.getDescription()).toContain(path.join('src', 'app')); - }); - - it('should indicate searching across all workspace directories when no path specified', () => { - // Create a mock config with multiple directories - const multiDirConfig = { - getTargetDir: () => tempRootDir, - getWorkspaceContext: () => - createMockWorkspaceContext(tempRootDir, ['/another/dir']), - getFileExclusions: () => ({ - getGlobExcludes: () => [], - }), - } as unknown as Config; - - const multiDirGrepTool = new GrepTool(multiDirConfig); - const params: GrepToolParams = { pattern: 'testPattern' }; - const invocation = multiDirGrepTool.build(params); - expect(invocation.getDescription()).toBe( - "'testPattern' across all workspace directories", + expect(invocation.getDescription()).toContain( + "'testPattern' in path 'src", ); + expect(invocation.getDescription()).toContain("app'"); }); - it('should generate correct description with pattern, include, and path', async () => { + it('should indicate searching workspace directory when no path specified', () => { + const params: GrepToolParams = { pattern: 'testPattern' }; + const invocation = grepTool.build(params); + expect(invocation.getDescription()).toBe("'testPattern' in path './'"); + }); + + it('should generate correct description with pattern, glob, and path', async () => { const dirPath = path.join(tempRootDir, 'src', 'app'); await fs.mkdir(dirPath, { recursive: true }); const params: GrepToolParams = { pattern: 'testPattern', - include: '*.ts', + glob: '*.ts', path: path.join('src', 'app'), }; const invocation = grepTool.build(params); expect(invocation.getDescription()).toContain( - "'testPattern' in *.ts within", + "'testPattern' in path 'src", ); - expect(invocation.getDescription()).toContain(path.join('src', 'app')); + expect(invocation.getDescription()).toContain("(filter: '*.ts')"); }); it('should use ./ for root path in description', () => { const params: GrepToolParams = { pattern: 'testPattern', path: '.' }; const invocation = grepTool.build(params); - expect(invocation.getDescription()).toBe("'testPattern' within ./"); + expect(invocation.getDescription()).toBe("'testPattern' in path '.'"); }); }); @@ -422,67 +376,50 @@ describe('GrepTool', () => { } }); - it('should limit results to default 20 matches', async () => { + it('should show all results when no limit is specified', async () => { const params: GrepToolParams = { pattern: 'testword' }; const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain('Found 20 matches'); - expect(result.llmContent).toContain( - 'showing first 20 of 30+ total matches', - ); - expect(result.llmContent).toContain('WARNING: Results truncated'); - expect(result.returnDisplay).toContain( - 'Found 20 matches (truncated from 30+)', - ); + // New implementation shows all matches when limit is not specified + expect(result.llmContent).toContain('Found 30 matches'); + expect(result.llmContent).not.toContain('truncated'); + expect(result.returnDisplay).toBe('Found 30 matches'); }); - it('should respect custom maxResults parameter', async () => { - const params: GrepToolParams = { pattern: 'testword', maxResults: 5 }; + it('should respect custom limit parameter', async () => { + const params: GrepToolParams = { pattern: 'testword', limit: 5 }; const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain('Found 5 matches'); - expect(result.llmContent).toContain( - 'showing first 5 of 30+ total matches', - ); - expect(result.llmContent).toContain('current: 5'); - expect(result.returnDisplay).toContain( - 'Found 5 matches (truncated from 30+)', - ); + // Should find 30 total but limit to 5 + expect(result.llmContent).toContain('Found 30 matches'); + expect(result.llmContent).toContain('25 lines truncated'); + expect(result.returnDisplay).toContain('Found 30 matches (truncated)'); }); it('should not show truncation warning when all results fit', async () => { - const params: GrepToolParams = { pattern: 'testword', maxResults: 50 }; + const params: GrepToolParams = { pattern: 'testword', limit: 50 }; const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); expect(result.llmContent).toContain('Found 30 matches'); - expect(result.llmContent).not.toContain('WARNING: Results truncated'); - expect(result.llmContent).not.toContain('showing first'); + expect(result.llmContent).not.toContain('truncated'); expect(result.returnDisplay).toBe('Found 30 matches'); }); - it('should validate maxResults parameter', () => { - const invalidParams = [ - { pattern: 'test', maxResults: 0 }, - { pattern: 'test', maxResults: 101 }, - { pattern: 'test', maxResults: -1 }, - { pattern: 'test', maxResults: 1.5 }, - ]; - - invalidParams.forEach((params) => { - const error = grepTool.validateToolParams(params as GrepToolParams); - expect(error).toBeTruthy(); // Just check that validation fails - expect(error).toMatch(/maxResults|must be/); // Check it's about maxResults validation - }); + it('should not validate limit parameter', () => { + // limit parameter has no validation constraints in the new implementation + const params = { pattern: 'test', limit: 5 }; + const error = grepTool.validateToolParams(params as GrepToolParams); + expect(error).toBeNull(); }); - it('should accept valid maxResults parameter', () => { + it('should accept valid limit parameter', () => { const validParams = [ - { pattern: 'test', maxResults: 1 }, - { pattern: 'test', maxResults: 50 }, - { pattern: 'test', maxResults: 100 }, + { pattern: 'test', limit: 1 }, + { pattern: 'test', limit: 50 }, + { pattern: 'test', limit: 100 }, ]; validParams.forEach((params) => { diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index 08f651ac..1aed46c0 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import fs from 'node:fs'; import fsPromises from 'node:fs/promises'; import path from 'node:path'; import { EOL } from 'node:os'; @@ -13,13 +12,15 @@ import { globStream } from 'glob'; import type { ToolInvocation, ToolResult } from './tools.js'; import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js'; import { ToolNames } from './tool-names.js'; -import { makeRelative, shortenPath } from '../utils/paths.js'; +import { resolveAndValidatePath } from '../utils/paths.js'; import { getErrorMessage, isNodeError } from '../utils/errors.js'; import { isGitRepository } from '../utils/gitUtils.js'; import type { Config } from '../config/config.js'; import type { FileExclusions } from '../utils/ignorePatterns.js'; import { ToolErrorType } from './tool-error.js'; +const MAX_LLM_CONTENT_LENGTH = 20_000; + // --- Interfaces --- /** @@ -37,14 +38,14 @@ export interface GrepToolParams { path?: string; /** - * File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}") + * Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}") */ - include?: string; + glob?: string; /** - * Maximum number of matches to return (optional, defaults to 20) + * Maximum number of matching lines to return (optional, shows all if not specified) */ - maxResults?: number; + limit?: number; } /** @@ -70,121 +71,57 @@ class GrepToolInvocation extends BaseToolInvocation< this.fileExclusions = config.getFileExclusions(); } - /** - * Checks if a path is within the root directory and resolves it. - * @param relativePath Path relative to the root directory (or undefined for root). - * @returns The absolute path if valid and exists, or null if no path specified (to search all directories). - * @throws {Error} If path is outside root, doesn't exist, or isn't a directory. - */ - private resolveAndValidatePath(relativePath?: string): string | null { - // If no path specified, return null to indicate searching all workspace directories - if (!relativePath) { - return null; - } - - const targetPath = path.resolve(this.config.getTargetDir(), relativePath); - - // Security Check: Ensure the resolved path is within workspace boundaries - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(targetPath)) { - const directories = workspaceContext.getDirectories(); - throw new Error( - `Path validation failed: Attempted path "${relativePath}" resolves outside the allowed workspace directories: ${directories.join(', ')}`, - ); - } - - // Check existence and type after resolving - try { - const stats = fs.statSync(targetPath); - if (!stats.isDirectory()) { - throw new Error(`Path is not a directory: ${targetPath}`); - } - } catch (error: unknown) { - if (isNodeError(error) && error.code !== 'ENOENT') { - throw new Error(`Path does not exist: ${targetPath}`); - } - throw new Error( - `Failed to access path stats for ${targetPath}: ${error}`, - ); - } - - return targetPath; - } - async execute(signal: AbortSignal): Promise { try { - const workspaceContext = this.config.getWorkspaceContext(); - const searchDirAbs = this.resolveAndValidatePath(this.params.path); + // Default to target directory if no path is provided + const searchDirAbs = resolveAndValidatePath( + this.config, + this.params.path, + ); const searchDirDisplay = this.params.path || '.'; - // Determine which directories to search - let searchDirectories: readonly string[]; - if (searchDirAbs === null) { - // No path specified - search all workspace directories - searchDirectories = workspaceContext.getDirectories(); - } else { - // Specific path provided - search only that directory - searchDirectories = [searchDirAbs]; - } + // Perform grep search + const rawMatches = await this.performGrepSearch({ + pattern: this.params.pattern, + path: searchDirAbs, + glob: this.params.glob, + signal, + }); - // Collect matches from all search directories - let allMatches: GrepMatch[] = []; - const maxResults = this.params.maxResults ?? 20; // Default to 20 results - let totalMatchesFound = 0; - let searchTruncated = false; + // Build search description + const searchLocationDescription = this.params.path + ? `in path "${searchDirDisplay}"` + : `in the workspace directory`; - for (const searchDir of searchDirectories) { - const matches = await this.performGrepSearch({ - pattern: this.params.pattern, - path: searchDir, - include: this.params.include, - signal, - }); + const filterDescription = this.params.glob + ? ` (filter: "${this.params.glob}")` + : ''; - totalMatchesFound += matches.length; - - // Add directory prefix if searching multiple directories - if (searchDirectories.length > 1) { - const dirName = path.basename(searchDir); - matches.forEach((match) => { - match.filePath = path.join(dirName, match.filePath); - }); - } - - // Apply result limiting - const remainingSlots = maxResults - allMatches.length; - if (remainingSlots <= 0) { - searchTruncated = true; - break; - } - - if (matches.length > remainingSlots) { - allMatches = allMatches.concat(matches.slice(0, remainingSlots)); - searchTruncated = true; - break; - } else { - allMatches = allMatches.concat(matches); - } - } - - let searchLocationDescription: string; - if (searchDirAbs === null) { - const numDirs = workspaceContext.getDirectories().length; - searchLocationDescription = - numDirs > 1 - ? `across ${numDirs} workspace directories` - : `in the workspace directory`; - } else { - searchLocationDescription = `in path "${searchDirDisplay}"`; - } - - if (allMatches.length === 0) { - const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}.`; + // Check if we have any matches + if (rawMatches.length === 0) { + const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}.`; return { llmContent: noMatchMsg, returnDisplay: `No matches found` }; } + // Apply line limit if specified + let truncatedByLineLimit = false; + let matchesToInclude = rawMatches; + if ( + this.params.limit !== undefined && + rawMatches.length > this.params.limit + ) { + matchesToInclude = rawMatches.slice(0, this.params.limit); + truncatedByLineLimit = true; + } + + const totalMatches = rawMatches.length; + const matchTerm = totalMatches === 1 ? 'match' : 'matches'; + + // Build header + const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`; + // Group matches by file - const matchesByFile = allMatches.reduce( + const matchesByFile = matchesToInclude.reduce( (acc, match) => { const fileKey = match.filePath; if (!acc[fileKey]) { @@ -197,46 +134,51 @@ class GrepToolInvocation extends BaseToolInvocation< {} as Record, ); - const matchCount = allMatches.length; - const matchTerm = matchCount === 1 ? 'match' : 'matches'; - - // Build the header with truncation info if needed - let headerText = `Found ${matchCount} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}`; - - if (searchTruncated) { - headerText += ` (showing first ${matchCount} of ${totalMatchesFound}+ total matches)`; - } - - let llmContent = `${headerText}: ---- -`; - + // Build grep output + let grepOutput = ''; for (const filePath in matchesByFile) { - llmContent += `File: ${filePath}\n`; + grepOutput += `File: ${filePath}\n`; matchesByFile[filePath].forEach((match) => { const trimmedLine = match.line.trim(); - llmContent += `L${match.lineNumber}: ${trimmedLine}\n`; + grepOutput += `L${match.lineNumber}: ${trimmedLine}\n`; }); - llmContent += '---\n'; + grepOutput += '---\n'; } - // Add truncation guidance if results were limited - if (searchTruncated) { - llmContent += `\nWARNING: Results truncated to prevent context overflow. To see more results: -- Use a more specific pattern to reduce matches -- Add file filters with the 'include' parameter (e.g., "*.js", "src/**") -- Specify a narrower 'path' to search in a subdirectory -- Increase 'maxResults' parameter if you need more matches (current: ${maxResults})`; + // Apply character limit as safety net + let truncatedByCharLimit = false; + if (grepOutput.length > MAX_LLM_CONTENT_LENGTH) { + grepOutput = grepOutput.slice(0, MAX_LLM_CONTENT_LENGTH) + '...'; + truncatedByCharLimit = true; } - let displayText = `Found ${matchCount} ${matchTerm}`; - if (searchTruncated) { - displayText += ` (truncated from ${totalMatchesFound}+)`; + // Count how many lines we actually included after character truncation + const finalLines = grepOutput + .split('\n') + .filter( + (line) => + line.trim() && !line.startsWith('File:') && !line.startsWith('---'), + ); + const includedLines = finalLines.length; + + // Build result + let llmContent = header + grepOutput; + + // Add truncation notice if needed + if (truncatedByLineLimit || truncatedByCharLimit) { + const omittedMatches = totalMatches - includedLines; + llmContent += ` [${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`; + } + + // Build display message + let displayMessage = `Found ${totalMatches} ${matchTerm}`; + if (truncatedByLineLimit || truncatedByCharLimit) { + displayMessage += ` (truncated)`; } return { llmContent: llmContent.trim(), - returnDisplay: displayText, + returnDisplay: displayMessage, }; } catch (error) { console.error(`Error during GrepLogic execution: ${error}`); @@ -329,50 +271,26 @@ class GrepToolInvocation extends BaseToolInvocation< * @returns A string describing the grep */ getDescription(): string { - let description = `'${this.params.pattern}'`; - if (this.params.include) { - description += ` in ${this.params.include}`; - } - if (this.params.path) { - const resolvedPath = path.resolve( - this.config.getTargetDir(), - this.params.path, - ); - if ( - resolvedPath === this.config.getTargetDir() || - this.params.path === '.' - ) { - description += ` within ./`; - } else { - const relativePath = makeRelative( - resolvedPath, - this.config.getTargetDir(), - ); - description += ` within ${shortenPath(relativePath)}`; - } - } else { - // When no path is specified, indicate searching all workspace directories - const workspaceContext = this.config.getWorkspaceContext(); - const directories = workspaceContext.getDirectories(); - if (directories.length > 1) { - description += ` across all workspace directories`; - } + let description = `'${this.params.pattern}' in path '${this.params.path || './'}'`; + if (this.params.glob) { + description += ` (filter: '${this.params.glob}')`; } + return description; } /** * Performs the actual search using the prioritized strategies. - * @param options Search options including pattern, absolute path, and include glob. + * @param options Search options including pattern, absolute path, and glob filter. * @returns A promise resolving to an array of match objects. */ private async performGrepSearch(options: { pattern: string; path: string; // Expects absolute path - include?: string; + glob?: string; signal: AbortSignal; }): Promise { - const { pattern, path: absolutePath, include } = options; + const { pattern, path: absolutePath, glob } = options; let strategyUsed = 'none'; try { @@ -390,8 +308,8 @@ class GrepToolInvocation extends BaseToolInvocation< '--ignore-case', pattern, ]; - if (include) { - gitArgs.push('--', include); + if (glob) { + gitArgs.push('--', glob); } try { @@ -457,8 +375,8 @@ class GrepToolInvocation extends BaseToolInvocation< }) .filter((dir): dir is string => !!dir); commonExcludes.forEach((dir) => grepArgs.push(`--exclude-dir=${dir}`)); - if (include) { - grepArgs.push(`--include=${include}`); + if (glob) { + grepArgs.push(`--include=${glob}`); } grepArgs.push(pattern); grepArgs.push('.'); @@ -537,7 +455,7 @@ class GrepToolInvocation extends BaseToolInvocation< 'GrepLogic: Falling back to JavaScript grep implementation.', ); strategyUsed = 'javascript fallback'; - const globPattern = include ? include : '**/*'; + const globPattern = glob ? glob : '**/*'; const ignorePatterns = this.fileExclusions.getGlobExcludes(); const filesIterator = globStream(globPattern, { @@ -603,32 +521,30 @@ export class GrepTool extends BaseDeclarativeTool { constructor(private readonly config: Config) { super( GrepTool.Name, - 'SearchText', - 'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.', + 'Grep', + 'A powerful search tool for finding patterns in files\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx")\n - Case-insensitive by default\n - Use Task tool for open-ended searches requiring multiple rounds\n', Kind.Search, { properties: { pattern: { - description: - "The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').", type: 'string', + description: + 'The regular expression pattern to search for in file contents', + }, + glob: { + type: 'string', + description: + 'Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}")', }, path: { - description: - 'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.', type: 'string', - }, - include: { description: - "Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).", - type: 'string', + 'File or directory to search in. Defaults to current working directory.', }, - maxResults: { - description: - 'Optional: Maximum number of matches to return to prevent context overflow (default: 20, max: 100). Use lower values for broad searches, higher for specific searches.', + limit: { type: 'number', - minimum: 1, - maximum: 100, + description: + 'Limit output to first N matching lines. Optional - shows all matches if not specified.', }, }, required: ['pattern'], @@ -637,47 +553,6 @@ export class GrepTool extends BaseDeclarativeTool { ); } - /** - * Checks if a path is within the root directory and resolves it. - * @param relativePath Path relative to the root directory (or undefined for root). - * @returns The absolute path if valid and exists, or null if no path specified (to search all directories). - * @throws {Error} If path is outside root, doesn't exist, or isn't a directory. - */ - private resolveAndValidatePath(relativePath?: string): string | null { - // If no path specified, return null to indicate searching all workspace directories - if (!relativePath) { - return null; - } - - const targetPath = path.resolve(this.config.getTargetDir(), relativePath); - - // Security Check: Ensure the resolved path is within workspace boundaries - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(targetPath)) { - const directories = workspaceContext.getDirectories(); - throw new Error( - `Path validation failed: Attempted path "${relativePath}" resolves outside the allowed workspace directories: ${directories.join(', ')}`, - ); - } - - // Check existence and type after resolving - try { - const stats = fs.statSync(targetPath); - if (!stats.isDirectory()) { - throw new Error(`Path is not a directory: ${targetPath}`); - } - } catch (error: unknown) { - if (isNodeError(error) && error.code !== 'ENOENT') { - throw new Error(`Path does not exist: ${targetPath}`); - } - throw new Error( - `Failed to access path stats for ${targetPath}: ${error}`, - ); - } - - return targetPath; - } - /** * Validates the parameters for the tool * @param params Parameters to validate @@ -686,27 +561,17 @@ export class GrepTool extends BaseDeclarativeTool { protected override validateToolParamValues( params: GrepToolParams, ): string | null { + // Validate pattern is a valid regex try { new RegExp(params.pattern); } catch (error) { - return `Invalid regular expression pattern provided: ${params.pattern}. Error: ${getErrorMessage(error)}`; - } - - // Validate maxResults if provided - if (params.maxResults !== undefined) { - if ( - !Number.isInteger(params.maxResults) || - params.maxResults < 1 || - params.maxResults > 100 - ) { - return `maxResults must be an integer between 1 and 100, got: ${params.maxResults}`; - } + return `Invalid regular expression pattern: ${params.pattern}. Error: ${getErrorMessage(error)}`; } // Only validate path if one is provided if (params.path) { try { - this.resolveAndValidatePath(params.path); + resolveAndValidatePath(this.config, params.path); } catch (error) { return getErrorMessage(error); } diff --git a/packages/core/src/tools/ripGrep.test.ts b/packages/core/src/tools/ripGrep.test.ts index 19ac5ce3..b8ed191f 100644 --- a/packages/core/src/tools/ripGrep.test.ts +++ b/packages/core/src/tools/ripGrep.test.ts @@ -184,17 +184,15 @@ describe('RipGrepTool', () => { }; // Check for the core error message, as the full path might vary expect(grepTool.validateToolParams(params)).toContain( - 'Failed to access path stats for', + 'Path does not exist:', ); expect(grepTool.validateToolParams(params)).toContain('nonexistent'); }); - it('should return error if path is a file, not a directory', async () => { + it('should allow path to be a file', () => { const filePath = path.join(tempRootDir, 'fileA.txt'); const params: RipGrepToolParams = { pattern: 'hello', path: filePath }; - expect(grepTool.validateToolParams(params)).toContain( - `Path is not a directory: ${filePath}`, - ); + expect(grepTool.validateToolParams(params)).toBeNull(); }); }); @@ -432,7 +430,7 @@ describe('RipGrepTool', () => { const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); - expect(String(result.llmContent).length).toBeLessThanOrEqual(20_000); + expect(String(result.llmContent).length).toBeLessThanOrEqual(21_000); expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./); expect(result.returnDisplay).toContain('truncated'); }); @@ -567,6 +565,26 @@ describe('RipGrepTool', () => { ); }); + it('should search within a single file when path is a file', async () => { + mockSpawn.mockImplementationOnce( + createMockSpawn({ + outputData: `fileA.txt:1:hello world${EOL}fileA.txt:2:second line with world${EOL}`, + exitCode: 0, + }), + ); + + const params: RipGrepToolParams = { + pattern: 'world', + path: path.join(tempRootDir, 'fileA.txt'), + }; + const invocation = grepTool.build(params); + const result = await invocation.execute(abortSignal); + expect(result.llmContent).toContain('Found 2 matches'); + expect(result.llmContent).toContain('fileA.txt:1:hello world'); + expect(result.llmContent).toContain('fileA.txt:2:second line with world'); + expect(result.returnDisplay).toBe('Found 2 matches'); + }); + it('should throw an error if ripgrep is not available', async () => { // Make ensureRipgrepBinary throw (ensureRipgrepPath as Mock).mockRejectedValue( @@ -648,7 +666,9 @@ describe('RipGrepTool', () => { describe('error handling and edge cases', () => { it('should handle workspace boundary violations', () => { const params: RipGrepToolParams = { pattern: 'test', path: '../outside' }; - expect(() => grepTool.build(params)).toThrow(/Path validation failed/); + expect(() => grepTool.build(params)).toThrow( + /Path is not within workspace/, + ); }); it('should handle empty directories gracefully', async () => { @@ -1132,7 +1152,9 @@ describe('RipGrepTool', () => { glob: '*.ts', }; const invocation = grepTool.build(params); - expect(invocation.getDescription()).toBe("'testPattern' in *.ts"); + expect(invocation.getDescription()).toBe( + "'testPattern' (filter: '*.ts')", + ); }); it('should generate correct description with pattern and path', async () => { @@ -1143,9 +1165,10 @@ describe('RipGrepTool', () => { path: path.join('src', 'app'), }; const invocation = grepTool.build(params); - // The path will be relative to the tempRootDir, so we check for containment. - expect(invocation.getDescription()).toContain("'testPattern' within"); - expect(invocation.getDescription()).toContain(path.join('src', 'app')); + expect(invocation.getDescription()).toContain( + "'testPattern' in path 'src", + ); + expect(invocation.getDescription()).toContain("app'"); }); it('should generate correct description with default search path', () => { @@ -1164,15 +1187,15 @@ describe('RipGrepTool', () => { }; const invocation = grepTool.build(params); expect(invocation.getDescription()).toContain( - "'testPattern' in *.ts within", + "'testPattern' in path 'src", ); - expect(invocation.getDescription()).toContain(path.join('src', 'app')); + expect(invocation.getDescription()).toContain("(filter: '*.ts')"); }); - it('should use ./ for root path in description', () => { + it('should use path when specified in description', () => { const params: RipGrepToolParams = { pattern: 'testPattern', path: '.' }; const invocation = grepTool.build(params); - expect(invocation.getDescription()).toBe("'testPattern' within ./"); + expect(invocation.getDescription()).toBe("'testPattern' in path '.'"); }); }); }); diff --git a/packages/core/src/tools/ripGrep.ts b/packages/core/src/tools/ripGrep.ts index 073db8e9..c119de5b 100644 --- a/packages/core/src/tools/ripGrep.ts +++ b/packages/core/src/tools/ripGrep.ts @@ -11,8 +11,8 @@ import { spawn } from 'node:child_process'; import type { ToolInvocation, ToolResult } from './tools.js'; import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js'; import { ToolNames } from './tool-names.js'; -import { makeRelative, shortenPath } from '../utils/paths.js'; -import { getErrorMessage, isNodeError } from '../utils/errors.js'; +import { resolveAndValidatePath } from '../utils/paths.js'; +import { getErrorMessage } from '../utils/errors.js'; import type { Config } from '../config/config.js'; import { ensureRipgrepPath } from '../utils/ripgrepUtils.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -57,50 +57,13 @@ class GrepToolInvocation extends BaseToolInvocation< super(params); } - /** - * Checks if a path is within the root directory and resolves it. - * @param relativePath Path relative to the root directory (or undefined for root). - * @returns The absolute path to search within. - * @throws {Error} If path is outside root, doesn't exist, or isn't a directory. - */ - private resolveAndValidatePath(relativePath?: string): string { - const targetDir = this.config.getTargetDir(); - const targetPath = relativePath - ? path.resolve(targetDir, relativePath) - : targetDir; - - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(targetPath)) { - const directories = workspaceContext.getDirectories(); - throw new Error( - `Path validation failed: Attempted path "${relativePath}" resolves outside the allowed workspace directories: ${directories.join(', ')}`, - ); - } - - return this.ensureDirectory(targetPath); - } - - private ensureDirectory(targetPath: string): string { - try { - const stats = fs.statSync(targetPath); - if (!stats.isDirectory()) { - throw new Error(`Path is not a directory: ${targetPath}`); - } - } catch (error: unknown) { - if (isNodeError(error) && error.code !== 'ENOENT') { - throw new Error(`Path does not exist: ${targetPath}`); - } - throw new Error( - `Failed to access path stats for ${targetPath}: ${error}`, - ); - } - - return targetPath; - } - async execute(signal: AbortSignal): Promise { try { - const searchDirAbs = this.resolveAndValidatePath(this.params.path); + const searchDirAbs = resolveAndValidatePath( + this.config, + this.params.path, + { allowFiles: true }, + ); const searchDirDisplay = this.params.path || '.'; // Get raw ripgrep output @@ -133,9 +96,6 @@ class GrepToolInvocation extends BaseToolInvocation< // Build header early to calculate available space const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`; - const maxTruncationNoticeLength = 100; // "[... N more matches truncated]" - const maxGrepOutputLength = - MAX_LLM_CONTENT_LENGTH - header.length - maxTruncationNoticeLength; // Apply line limit first (if specified) let truncatedByLineLimit = false; @@ -148,19 +108,32 @@ class GrepToolInvocation extends BaseToolInvocation< truncatedByLineLimit = true; } - // Join lines back into grep output - let grepOutput = linesToInclude.join(EOL); - - // Apply character limit as safety net + // Build output and track how many lines we include, respecting character limit + const parts: string[] = []; + let includedLines = 0; let truncatedByCharLimit = false; - if (grepOutput.length > maxGrepOutputLength) { - grepOutput = grepOutput.slice(0, maxGrepOutputLength) + '...'; - truncatedByCharLimit = true; + let currentLength = 0; + + for (const line of linesToInclude) { + const sep = includedLines > 0 ? 1 : 0; + + includedLines++; + + if (currentLength + line.length <= MAX_LLM_CONTENT_LENGTH) { + parts.push(line); + currentLength = currentLength + line.length + sep; + } else { + const remaining = Math.max( + MAX_LLM_CONTENT_LENGTH - currentLength - sep, + 10, + ); + parts.push(line.slice(0, remaining) + '...'); + truncatedByCharLimit = true; + break; + } } - // Count how many lines we actually included after character truncation - const finalLines = grepOutput.split(EOL).filter((line) => line.trim()); - const includedLines = finalLines.length; + const grepOutput = parts.join('\n'); // Build result let llmContent = header + grepOutput; @@ -168,7 +141,7 @@ class GrepToolInvocation extends BaseToolInvocation< // Add truncation notice if needed if (truncatedByLineLimit || truncatedByCharLimit) { const omittedMatches = totalMatches - includedLines; - llmContent += ` [${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`; + llmContent += `\n---\n[${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`; } // Build display message (show real count, not truncated) @@ -193,7 +166,7 @@ class GrepToolInvocation extends BaseToolInvocation< private async performRipgrepSearch(options: { pattern: string; - path: string; + path: string; // Can be a file or directory glob?: string; signal: AbortSignal; }): Promise { @@ -302,34 +275,13 @@ class GrepToolInvocation extends BaseToolInvocation< */ getDescription(): string { let description = `'${this.params.pattern}'`; - if (this.params.glob) { - description += ` in ${this.params.glob}`; - } if (this.params.path) { - const resolvedPath = path.resolve( - this.config.getTargetDir(), - this.params.path, - ); - if ( - resolvedPath === this.config.getTargetDir() || - this.params.path === '.' - ) { - description += ` within ./`; - } else { - const relativePath = makeRelative( - resolvedPath, - this.config.getTargetDir(), - ); - description += ` within ${shortenPath(relativePath)}`; - } - } else { - // When no path is specified, indicate searching all workspace directories - const workspaceContext = this.config.getWorkspaceContext(); - const directories = workspaceContext.getDirectories(); - if (directories.length > 1) { - description += ` across all workspace directories`; - } + description += ` in path '${this.params.path}'`; } + if (this.params.glob) { + description += ` (filter: '${this.params.glob}')`; + } + return description; } } @@ -378,47 +330,6 @@ export class RipGrepTool extends BaseDeclarativeTool< ); } - /** - * Checks if a path is within the root directory and resolves it. - * @param relativePath Path relative to the root directory (or undefined for root). - * @returns The absolute path to search within. - * @throws {Error} If path is outside root, doesn't exist, or isn't a directory. - */ - private resolveAndValidatePath(relativePath?: string): string { - // If no path specified, search within the workspace root directory - if (!relativePath) { - return this.config.getTargetDir(); - } - - const targetPath = path.resolve(this.config.getTargetDir(), relativePath); - - // Security Check: Ensure the resolved path is within workspace boundaries - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(targetPath)) { - const directories = workspaceContext.getDirectories(); - throw new Error( - `Path validation failed: Attempted path "${relativePath}" resolves outside the allowed workspace directories: ${directories.join(', ')}`, - ); - } - - // Check existence and type after resolving - try { - const stats = fs.statSync(targetPath); - if (!stats.isDirectory()) { - throw new Error(`Path is not a directory: ${targetPath}`); - } - } catch (error: unknown) { - if (isNodeError(error) && error.code !== 'ENOENT') { - throw new Error(`Path does not exist: ${targetPath}`); - } - throw new Error( - `Failed to access path stats for ${targetPath}: ${error}`, - ); - } - - return targetPath; - } - /** * Validates the parameters for the tool * @param params Parameters to validate @@ -445,7 +356,7 @@ export class RipGrepTool extends BaseDeclarativeTool< // Only validate path if one is provided if (params.path) { try { - this.resolveAndValidatePath(params.path); + resolveAndValidatePath(this.config, params.path, { allowFiles: true }); } catch (error) { return getErrorMessage(error); } diff --git a/packages/core/src/utils/paths.test.ts b/packages/core/src/utils/paths.test.ts index 0e964672..6359ba81 100644 --- a/packages/core/src/utils/paths.test.ts +++ b/packages/core/src/utils/paths.test.ts @@ -4,8 +4,53 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import { escapePath, unescapePath, isSubpath } from './paths.js'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest'; +import { + escapePath, + resolvePath, + validatePath, + resolveAndValidatePath, + unescapePath, + isSubpath, +} from './paths.js'; +import type { Config } from '../config/config.js'; + +function createConfigStub({ + targetDir, + allowedDirectories, +}: { + targetDir: string; + allowedDirectories: string[]; +}): Config { + const resolvedTargetDir = path.resolve(targetDir); + const resolvedDirectories = allowedDirectories.map((dir) => + path.resolve(dir), + ); + + const workspaceContext = { + isPathWithinWorkspace(testPath: string) { + const resolvedPath = path.resolve(testPath); + return resolvedDirectories.some((dir) => { + const relative = path.relative(dir, resolvedPath); + return ( + relative === '' || + (!relative.startsWith('..') && !path.isAbsolute(relative)) + ); + }); + }, + getDirectories() { + return resolvedDirectories; + }, + }; + + return { + getTargetDir: () => resolvedTargetDir, + getWorkspaceContext: () => workspaceContext, + } as unknown as Config; +} describe('escapePath', () => { it('should escape spaces', () => { @@ -314,3 +359,240 @@ describe('isSubpath on Windows', () => { expect(isSubpath('Users\\Test\\file.txt', 'Users\\Test')).toBe(false); }); }); + +describe('resolvePath', () => { + it('resolves relative paths against the provided base directory', () => { + const result = resolvePath('/home/user/project', 'src/main.ts'); + expect(result).toBe(path.resolve('/home/user/project', 'src/main.ts')); + }); + + it('resolves relative paths against cwd when baseDir is undefined', () => { + const cwd = process.cwd(); + const result = resolvePath(undefined, 'src/main.ts'); + expect(result).toBe(path.resolve(cwd, 'src/main.ts')); + }); + + it('returns absolute paths unchanged', () => { + const absolutePath = '/absolute/path/to/file.ts'; + const result = resolvePath('/some/base', absolutePath); + expect(result).toBe(absolutePath); + }); + + it('expands tilde to home directory', () => { + const homeDir = os.homedir(); + const result = resolvePath(undefined, '~'); + expect(result).toBe(homeDir); + }); + + it('expands tilde-prefixed paths to home directory', () => { + const homeDir = os.homedir(); + const result = resolvePath(undefined, '~/documents/file.txt'); + expect(result).toBe(path.join(homeDir, 'documents/file.txt')); + }); + + it('uses baseDir when provided for relative paths', () => { + const baseDir = '/custom/base'; + const result = resolvePath(baseDir, './relative/path'); + expect(result).toBe(path.resolve(baseDir, './relative/path')); + }); + + it('handles tilde expansion regardless of baseDir', () => { + const homeDir = os.homedir(); + const result = resolvePath('/some/base', '~/file.txt'); + expect(result).toBe(path.join(homeDir, 'file.txt')); + }); + + it('handles dot paths correctly', () => { + const result = resolvePath('/base/dir', '.'); + expect(result).toBe(path.resolve('/base/dir', '.')); + }); + + it('handles parent directory references', () => { + const result = resolvePath('/base/dir/subdir', '..'); + expect(result).toBe(path.resolve('/base/dir/subdir', '..')); + }); +}); + +describe('validatePath', () => { + let workspaceRoot: string; + let config: Config; + + beforeAll(() => { + workspaceRoot = fs.mkdtempSync( + path.join(os.tmpdir(), 'validate-path-test-'), + ); + fs.mkdirSync(path.join(workspaceRoot, 'subdir')); + config = createConfigStub({ + targetDir: workspaceRoot, + allowedDirectories: [workspaceRoot], + }); + }); + + afterAll(() => { + fs.rmSync(workspaceRoot, { recursive: true, force: true }); + }); + + it('validates paths within workspace boundaries', () => { + const validPath = path.join(workspaceRoot, 'subdir'); + expect(() => validatePath(config, validPath)).not.toThrow(); + }); + + it('throws when path is outside workspace boundaries', () => { + const outsidePath = path.join(os.tmpdir(), 'outside'); + expect(() => validatePath(config, outsidePath)).toThrowError( + /Path is not within workspace/, + ); + }); + + it('throws when path does not exist', () => { + const nonExistentPath = path.join(workspaceRoot, 'nonexistent'); + expect(() => validatePath(config, nonExistentPath)).toThrowError( + /Path does not exist:/, + ); + }); + + it('throws when path is a file, not a directory (default behavior)', () => { + const filePath = path.join(workspaceRoot, 'test-file.txt'); + fs.writeFileSync(filePath, 'content'); + try { + expect(() => validatePath(config, filePath)).toThrowError( + /Path is not a directory/, + ); + } finally { + fs.rmSync(filePath); + } + }); + + it('allows files when allowFiles option is true', () => { + const filePath = path.join(workspaceRoot, 'test-file.txt'); + fs.writeFileSync(filePath, 'content'); + try { + expect(() => + validatePath(config, filePath, { allowFiles: true }), + ).not.toThrow(); + } finally { + fs.rmSync(filePath); + } + }); + + it('validates paths at workspace root', () => { + expect(() => validatePath(config, workspaceRoot)).not.toThrow(); + }); + + it('validates paths in allowed directories', () => { + const extraDir = fs.mkdtempSync(path.join(os.tmpdir(), 'validate-extra-')); + try { + const configWithExtra = createConfigStub({ + targetDir: workspaceRoot, + allowedDirectories: [workspaceRoot, extraDir], + }); + expect(() => validatePath(configWithExtra, extraDir)).not.toThrow(); + } finally { + fs.rmSync(extraDir, { recursive: true, force: true }); + } + }); +}); + +describe('resolveAndValidatePath', () => { + let workspaceRoot: string; + let config: Config; + + beforeAll(() => { + workspaceRoot = fs.mkdtempSync( + path.join(os.tmpdir(), 'resolve-and-validate-'), + ); + fs.mkdirSync(path.join(workspaceRoot, 'subdir')); + config = createConfigStub({ + targetDir: workspaceRoot, + allowedDirectories: [workspaceRoot], + }); + }); + + afterAll(() => { + fs.rmSync(workspaceRoot, { recursive: true, force: true }); + }); + + it('returns the target directory when no path is provided', () => { + expect(resolveAndValidatePath(config)).toBe(workspaceRoot); + }); + + it('resolves relative paths within the workspace', () => { + const expected = path.join(workspaceRoot, 'subdir'); + expect(resolveAndValidatePath(config, 'subdir')).toBe(expected); + }); + + it('allows absolute paths that are permitted by the workspace context', () => { + const extraDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'resolve-and-validate-extra-'), + ); + try { + const configWithExtra = createConfigStub({ + targetDir: workspaceRoot, + allowedDirectories: [workspaceRoot, extraDir], + }); + expect(resolveAndValidatePath(configWithExtra, extraDir)).toBe(extraDir); + } finally { + fs.rmSync(extraDir, { recursive: true, force: true }); + } + }); + + it('expands tilde-prefixed paths using the home directory', () => { + const fakeHome = fs.mkdtempSync( + path.join(os.tmpdir(), 'resolve-and-validate-home-'), + ); + const homeSubdir = path.join(fakeHome, 'project'); + fs.mkdirSync(homeSubdir); + + const homedirSpy = vi.spyOn(os, 'homedir').mockReturnValue(fakeHome); + try { + const configWithHome = createConfigStub({ + targetDir: workspaceRoot, + allowedDirectories: [workspaceRoot, fakeHome], + }); + expect(resolveAndValidatePath(configWithHome, '~/project')).toBe( + homeSubdir, + ); + expect(resolveAndValidatePath(configWithHome, '~')).toBe(fakeHome); + } finally { + homedirSpy.mockRestore(); + fs.rmSync(fakeHome, { recursive: true, force: true }); + } + }); + + it('throws when the path resolves outside of the workspace', () => { + expect(() => resolveAndValidatePath(config, '../outside')).toThrowError( + /Path is not within workspace/, + ); + }); + + it('throws when the path does not exist', () => { + expect(() => resolveAndValidatePath(config, 'missing')).toThrowError( + /Path does not exist:/, + ); + }); + + it('throws when the path points to a file (default behavior)', () => { + const filePath = path.join(workspaceRoot, 'file.txt'); + fs.writeFileSync(filePath, 'content'); + try { + expect(() => resolveAndValidatePath(config, 'file.txt')).toThrowError( + `Path is not a directory: ${filePath}`, + ); + } finally { + fs.rmSync(filePath); + } + }); + + it('allows file paths when allowFiles option is true', () => { + const filePath = path.join(workspaceRoot, 'file.txt'); + fs.writeFileSync(filePath, 'content'); + try { + const result = resolveAndValidatePath(config, 'file.txt', { + allowFiles: true, + }); + expect(result).toBe(filePath); + } finally { + fs.rmSync(filePath); + } + }); +}); diff --git a/packages/core/src/utils/paths.ts b/packages/core/src/utils/paths.ts index 3bf301c8..c5986c68 100644 --- a/packages/core/src/utils/paths.ts +++ b/packages/core/src/utils/paths.ts @@ -4,9 +4,12 @@ * SPDX-License-Identifier: Apache-2.0 */ +import fs from 'node:fs'; import path from 'node:path'; import os from 'node:os'; import * as crypto from 'node:crypto'; +import type { Config } from '../config/config.js'; +import { isNodeError } from './errors.js'; export const QWEN_DIR = '.qwen'; export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json'; @@ -191,3 +194,93 @@ export function isSubpath(parentPath: string, childPath: string): boolean { !pathModule.isAbsolute(relative) ); } + +/** + * Resolves a path with tilde (~) expansion and relative path resolution. + * Handles tilde expansion for home directory and resolves relative paths + * against the provided base directory or current working directory. + * + * @param baseDir The base directory to resolve relative paths against (defaults to current working directory) + * @param relativePath The path to resolve (can be relative, absolute, or tilde-prefixed) + * @returns The resolved absolute path + */ +export function resolvePath( + baseDir: string | undefined = process.cwd(), + relativePath: string, +): string { + const homeDir = os.homedir(); + + if (relativePath === '~') { + return homeDir; + } else if (relativePath.startsWith('~/')) { + return path.join(homeDir, relativePath.slice(2)); + } else if (path.isAbsolute(relativePath)) { + return relativePath; + } else { + return path.resolve(baseDir, relativePath); + } +} + +export interface PathValidationOptions { + /** + * If true, allows both files and directories. If false (default), only allows directories. + */ + allowFiles?: boolean; +} + +/** + * Validates that a resolved path exists within the workspace boundaries. + * + * @param config The configuration object containing workspace context + * @param resolvedPath The absolute path to validate + * @param options Validation options + * @throws Error if the path is outside workspace boundaries, doesn't exist, or is not a directory (when allowFiles is false) + */ +export function validatePath( + config: Config, + resolvedPath: string, + options: PathValidationOptions = {}, +): void { + const { allowFiles = false } = options; + const workspaceContext = config.getWorkspaceContext(); + + if (!workspaceContext.isPathWithinWorkspace(resolvedPath)) { + throw new Error('Path is not within workspace'); + } + + try { + const stats = fs.statSync(resolvedPath); + if (!allowFiles && !stats.isDirectory()) { + throw new Error(`Path is not a directory: ${resolvedPath}`); + } + } catch (error: unknown) { + if (isNodeError(error) && error.code === 'ENOENT') { + throw new Error(`Path does not exist: ${resolvedPath}`); + } + throw error; + } +} + +/** + * Resolves a path relative to the workspace root and verifies that it exists + * within the workspace boundaries defined in the config. + * + * @param config The configuration object + * @param relativePath The relative path to resolve (optional, defaults to target directory) + * @param options Validation options (e.g., allowFiles to permit file paths) + */ +export function resolveAndValidatePath( + config: Config, + relativePath?: string, + options: PathValidationOptions = {}, +): string { + const targetDir = config.getTargetDir(); + + if (!relativePath) { + return targetDir; + } + + const resolvedPath = resolvePath(targetDir, relativePath); + validatePath(config, resolvedPath, options); + return resolvedPath; +} From c3d427730e24a1571e933a38d41d9c56df835092 Mon Sep 17 00:00:00 2001 From: tanzhenxin Date: Fri, 7 Nov 2025 17:28:16 +0800 Subject: [PATCH 3/8] =?UTF-8?q?=F0=9F=8E=AF=20Feature:=20Customizable=20Mo?= =?UTF-8?q?del=20Training=20and=20Tool=20Output=20Management=20(#981)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/cli/configuration.md | 42 ++- packages/cli/src/config/config.ts | 1 + packages/cli/src/config/settings.ts | 1 + packages/cli/src/config/settingsSchema.ts | 10 + .../src/ui/hooks/atCommandProcessor.test.ts | 2 + packages/core/src/config/config.test.ts | 14 +- packages/core/src/config/config.ts | 20 +- .../core/src/core/coreToolScheduler.test.ts | 270 +++++++++++++++++- packages/core/src/core/coreToolScheduler.ts | 210 +++++++------- packages/core/src/core/geminiChat.test.ts | 255 ----------------- packages/core/src/core/geminiChat.ts | 77 +---- .../services/chatCompressionService.test.ts | 50 ++++ .../src/services/chatCompressionService.ts | 9 +- packages/core/src/tools/glob.test.ts | 1 + packages/core/src/tools/glob.ts | 12 +- packages/core/src/tools/grep.test.ts | 4 + packages/core/src/tools/grep.ts | 19 +- packages/core/src/tools/read-file.test.ts | 13 +- packages/core/src/tools/read-file.ts | 14 +- .../core/src/tools/read-many-files.test.ts | 13 +- packages/core/src/tools/read-many-files.ts | 17 +- packages/core/src/tools/ripGrep.test.ts | 6 +- packages/core/src/tools/ripGrep.ts | 58 ++-- packages/core/src/tools/tool-names.ts | 2 + packages/core/src/tools/web-fetch.ts | 3 +- packages/core/src/tools/web-search/index.ts | 3 +- .../core/src/utils/environmentContext.test.ts | 101 +++++++ packages/core/src/utils/environmentContext.ts | 4 + packages/core/src/utils/fileUtils.test.ts | 70 ++--- packages/core/src/utils/fileUtils.ts | 87 ++++-- packages/core/src/utils/pathReader.test.ts | 2 + packages/core/src/utils/pathReader.ts | 12 +- 32 files changed, 795 insertions(+), 607 deletions(-) diff --git a/docs/cli/configuration.md b/docs/cli/configuration.md index bc7fce20..d42c24dd 100644 --- a/docs/cli/configuration.md +++ b/docs/cli/configuration.md @@ -160,9 +160,30 @@ Settings are organized into categories. All settings should be placed within the - **Default:** `undefined` - **`model.chatCompression.contextPercentageThreshold`** (number): - - **Description:** Sets the threshold for chat history compression as a percentage of the model's total token limit. This is a value between 0 and 1 that applies to both automatic compression and the manual `/compress` command. For example, a value of `0.6` will trigger compression when the chat history exceeds 60% of the token limit. + - **Description:** Sets the threshold for chat history compression as a percentage of the model's total token limit. This is a value between 0 and 1 that applies to both automatic compression and the manual `/compress` command. For example, a value of `0.6` will trigger compression when the chat history exceeds 60% of the token limit. Use `0` to disable compression entirely. - **Default:** `0.7` +- **`model.generationConfig`** (object): + - **Description:** Advanced overrides passed to the underlying content generator. Supports request controls such as `timeout`, `maxRetries`, and `disableCacheControl`, along with fine-tuning knobs under `samplingParams` (for example `temperature`, `top_p`, `max_tokens`). Leave unset to rely on provider defaults. + - **Default:** `undefined` + - **Example:** + + ```json + { + "model": { + "generationConfig": { + "timeout": 60000, + "disableCacheControl": false, + "samplingParams": { + "temperature": 0.2, + "top_p": 0.8, + "max_tokens": 1024 + } + } + } + } + ``` + - **`model.skipNextSpeakerCheck`** (boolean): - **Description:** Skip the next speaker check. - **Default:** `false` @@ -171,6 +192,10 @@ Settings are organized into categories. All settings should be placed within the - **Description:** Disables loop detection checks. Loop detection prevents infinite loops in AI responses but can generate false positives that interrupt legitimate workflows. Enable this option if you experience frequent false positive loop detection interruptions. - **Default:** `false` +- **`model.skipStartupContext`** (boolean): + - **Description:** Skips sending the startup workspace context (environment summary and acknowledgement) at the beginning of each session. Enable this if you prefer to provide context manually or want to save tokens on startup. + - **Default:** `false` + - **`model.enableOpenAILogging`** (boolean): - **Description:** Enables logging of OpenAI API calls for debugging and analysis. When enabled, API requests and responses are logged to JSON files. - **Default:** `false` @@ -266,6 +291,21 @@ Settings are organized into categories. All settings should be placed within the - **Description:** Use the bundled ripgrep binary. When set to `false`, the system-level `rg` command will be used instead. This setting is only effective when `tools.useRipgrep` is `true`. - **Default:** `true` +- **`tools.enableToolOutputTruncation`** (boolean): + - **Description:** Enable truncation of large tool outputs. + - **Default:** `true` + - **Requires restart:** Yes + +- **`tools.truncateToolOutputThreshold`** (number): + - **Description:** Truncate tool output if it is larger than this many characters. Applies to Shell, Grep, Glob, ReadFile and ReadManyFiles tools. + - **Default:** `25000` + - **Requires restart:** Yes + +- **`tools.truncateToolOutputLines`** (number): + - **Description:** Maximum lines or entries kept when truncating tool output. Applies to Shell, Grep, Glob, ReadFile and ReadManyFiles tools. + - **Default:** `1000` + - **Requires restart:** Yes + #### `mcp` - **`mcp.serverCommand`** (string): diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index d747e128..50a11991 100755 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -791,6 +791,7 @@ export async function loadCliConfig( skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck, enablePromptCompletion: settings.general?.enablePromptCompletion ?? false, skipLoopDetection: settings.model?.skipLoopDetection ?? false, + skipStartupContext: settings.model?.skipStartupContext ?? false, vlmSwitchMode, truncateToolOutputThreshold: settings.tools?.truncateToolOutputThreshold, truncateToolOutputLines: settings.tools?.truncateToolOutputLines, diff --git a/packages/cli/src/config/settings.ts b/packages/cli/src/config/settings.ts index 65e73668..edc7709f 100644 --- a/packages/cli/src/config/settings.ts +++ b/packages/cli/src/config/settings.ts @@ -131,6 +131,7 @@ const MIGRATION_MAP: Record = { sessionTokenLimit: 'model.sessionTokenLimit', contentGenerator: 'model.generationConfig', skipLoopDetection: 'model.skipLoopDetection', + skipStartupContext: 'model.skipStartupContext', enableOpenAILogging: 'model.enableOpenAILogging', tavilyApiKey: 'advanced.tavilyApiKey', vlmSwitchMode: 'experimental.vlmSwitchMode', diff --git a/packages/cli/src/config/settingsSchema.ts b/packages/cli/src/config/settingsSchema.ts index da504c29..b36ee397 100644 --- a/packages/cli/src/config/settingsSchema.ts +++ b/packages/cli/src/config/settingsSchema.ts @@ -549,6 +549,16 @@ const SETTINGS_SCHEMA = { description: 'Disable all loop detection checks (streaming and LLM).', showInDialog: true, }, + skipStartupContext: { + type: 'boolean', + label: 'Skip Startup Context', + category: 'Model', + requiresRestart: true, + default: false, + description: + 'Avoid sending the workspace startup context at the beginning of each session.', + showInDialog: true, + }, enableOpenAILogging: { type: 'boolean', label: 'Enable OpenAI Logging', diff --git a/packages/cli/src/ui/hooks/atCommandProcessor.test.ts b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts index 32876b32..d8634028 100644 --- a/packages/cli/src/ui/hooks/atCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts @@ -80,6 +80,8 @@ describe('handleAtCommand', () => { getReadManyFilesExcludes: () => [], }), getUsageStatisticsEnabled: () => false, + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, } as unknown as Config; const registry = new ToolRegistry(mockConfig); diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 72ecdc80..6f07de53 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -738,13 +738,13 @@ describe('Server Config (config.ts)', () => { it('should return the calculated threshold when it is smaller than the default', () => { const config = new Config(baseParams); - vi.mocked(tokenLimit).mockReturnValue(32000); + vi.mocked(tokenLimit).mockReturnValue(8000); vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue( - 1000, + 2000, ); - // 4 * (32000 - 1000) = 4 * 31000 = 124000 - // default is 4_000_000 - expect(config.getTruncateToolOutputThreshold()).toBe(124000); + // 4 * (8000 - 2000) = 4 * 6000 = 24000 + // default is 25_000 + expect(config.getTruncateToolOutputThreshold()).toBe(24000); }); it('should return the default threshold when the calculated value is larger', () => { @@ -754,8 +754,8 @@ describe('Server Config (config.ts)', () => { 500_000, ); // 4 * (2_000_000 - 500_000) = 4 * 1_500_000 = 6_000_000 - // default is 4_000_000 - expect(config.getTruncateToolOutputThreshold()).toBe(4_000_000); + // default is 25_000 + expect(config.getTruncateToolOutputThreshold()).toBe(25_000); }); it('should use a custom truncateToolOutputThreshold if provided', () => { diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 754551b4..68e9521a 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -161,7 +161,7 @@ export interface ExtensionInstallMetadata { autoUpdate?: boolean; } -export const DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD = 4_000_000; +export const DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD = 25_000; export const DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES = 1000; export class MCPServerConfig { @@ -288,6 +288,7 @@ export interface ConfigParameters { eventEmitter?: EventEmitter; useSmartEdit?: boolean; output?: OutputSettings; + skipStartupContext?: boolean; } export class Config { @@ -377,6 +378,7 @@ export class Config { private readonly extensionManagement: boolean = true; private readonly enablePromptCompletion: boolean = false; private readonly skipLoopDetection: boolean; + private readonly skipStartupContext: boolean; private readonly vlmSwitchMode: string | undefined; private initialized: boolean = false; readonly storage: Storage; @@ -469,6 +471,7 @@ export class Config { this.interactive = params.interactive ?? false; this.trustedFolder = params.trustedFolder; this.skipLoopDetection = params.skipLoopDetection ?? false; + this.skipStartupContext = params.skipStartupContext ?? false; // Web search this.webSearch = params.webSearch; @@ -1041,6 +1044,10 @@ export class Config { return this.skipLoopDetection; } + getSkipStartupContext(): boolean { + return this.skipStartupContext; + } + getVlmSwitchMode(): string | undefined { return this.vlmSwitchMode; } @@ -1050,6 +1057,13 @@ export class Config { } getTruncateToolOutputThreshold(): number { + if ( + !this.enableToolOutputTruncation || + this.truncateToolOutputThreshold <= 0 + ) { + return Number.POSITIVE_INFINITY; + } + return Math.min( // Estimate remaining context window in characters (1 token ~= 4 chars). 4 * @@ -1060,6 +1074,10 @@ export class Config { } getTruncateToolOutputLines(): number { + if (!this.enableToolOutputTruncation || this.truncateToolOutputLines <= 0) { + return Number.POSITIVE_INFINITY; + } + return this.truncateToolOutputLines; } diff --git a/packages/core/src/core/coreToolScheduler.test.ts b/packages/core/src/core/coreToolScheduler.test.ts index a4cdde4e..715dfd8f 100644 --- a/packages/core/src/core/coreToolScheduler.test.ts +++ b/packages/core/src/core/coreToolScheduler.test.ts @@ -1540,6 +1540,268 @@ describe('CoreToolScheduler request queueing', () => { }); }); +describe('CoreToolScheduler Sequential Execution', () => { + it('should execute tool calls in a batch sequentially', async () => { + // Arrange + let firstCallFinished = false; + const executeFn = vi + .fn() + .mockImplementation(async (args: { call: number }) => { + if (args.call === 1) { + // First call, wait for a bit to simulate work + await new Promise((resolve) => setTimeout(resolve, 50)); + firstCallFinished = true; + return { llmContent: 'First call done' }; + } + if (args.call === 2) { + // Second call, should only happen after the first is finished + if (!firstCallFinished) { + throw new Error( + 'Second tool call started before the first one finished!', + ); + } + return { llmContent: 'Second call done' }; + } + return { llmContent: 'default' }; + }); + + const mockTool = new MockTool({ name: 'mockTool', execute: executeFn }); + const declarativeTool = mockTool; + + const mockToolRegistry = { + getTool: () => declarativeTool, + getToolByName: () => declarativeTool, + getFunctionDeclarations: () => [], + tools: new Map(), + discovery: {}, + registerTool: () => {}, + getToolByDisplayName: () => declarativeTool, + getTools: () => [], + discoverTools: async () => {}, + getAllTools: () => [], + getToolsByServer: () => [], + } as unknown as ToolRegistry; + + const onAllToolCallsComplete = vi.fn(); + const onToolCallsUpdate = vi.fn(); + + const mockConfig = { + getSessionId: () => 'test-session-id', + getUsageStatisticsEnabled: () => true, + getDebugMode: () => false, + getApprovalMode: () => ApprovalMode.YOLO, // Use YOLO to avoid confirmation prompts + getAllowedTools: () => [], + getContentGeneratorConfig: () => ({ + model: 'test-model', + authType: 'oauth-personal', + }), + getShellExecutionConfig: () => ({ + terminalWidth: 90, + terminalHeight: 30, + }), + storage: { + getProjectTempDir: () => '/tmp', + }, + getToolRegistry: () => mockToolRegistry, + getTruncateToolOutputThreshold: () => + DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD, + getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES, + getUseSmartEdit: () => false, + getUseModelRouter: () => false, + getGeminiClient: () => null, + } as unknown as Config; + + const scheduler = new CoreToolScheduler({ + config: mockConfig, + onAllToolCallsComplete, + onToolCallsUpdate, + getPreferredEditor: () => 'vscode', + onEditorClose: vi.fn(), + }); + + const abortController = new AbortController(); + const requests = [ + { + callId: '1', + name: 'mockTool', + args: { call: 1 }, + isClientInitiated: false, + prompt_id: 'prompt-1', + }, + { + callId: '2', + name: 'mockTool', + args: { call: 2 }, + isClientInitiated: false, + prompt_id: 'prompt-1', + }, + ]; + + // Act + await scheduler.schedule(requests, abortController.signal); + + // Assert + await vi.waitFor(() => { + expect(onAllToolCallsComplete).toHaveBeenCalled(); + }); + + // Check that execute was called twice + expect(executeFn).toHaveBeenCalledTimes(2); + + // Check the order of calls + const calls = executeFn.mock.calls; + expect(calls[0][0]).toEqual({ call: 1 }); + expect(calls[1][0]).toEqual({ call: 2 }); + + // The onAllToolCallsComplete should be called once with both results + const completedCalls = onAllToolCallsComplete.mock + .calls[0][0] as ToolCall[]; + expect(completedCalls).toHaveLength(2); + expect(completedCalls[0].status).toBe('success'); + expect(completedCalls[1].status).toBe('success'); + }); + + it('should cancel subsequent tools when the signal is aborted.', async () => { + // Arrange + const abortController = new AbortController(); + let secondCallStarted = false; + + const executeFn = vi + .fn() + .mockImplementation(async (args: { call: number }) => { + if (args.call === 1) { + return { llmContent: 'First call done' }; + } + if (args.call === 2) { + secondCallStarted = true; + // This call will be cancelled while it's "running". + await new Promise((resolve) => setTimeout(resolve, 100)); + // It should not return a value because it will be cancelled. + return { llmContent: 'Second call should not complete' }; + } + if (args.call === 3) { + return { llmContent: 'Third call done' }; + } + return { llmContent: 'default' }; + }); + + const mockTool = new MockTool({ name: 'mockTool', execute: executeFn }); + const declarativeTool = mockTool; + + const mockToolRegistry = { + getTool: () => declarativeTool, + getToolByName: () => declarativeTool, + getFunctionDeclarations: () => [], + tools: new Map(), + discovery: {}, + registerTool: () => {}, + getToolByDisplayName: () => declarativeTool, + getTools: () => [], + discoverTools: async () => {}, + getAllTools: () => [], + getToolsByServer: () => [], + } as unknown as ToolRegistry; + + const onAllToolCallsComplete = vi.fn(); + const onToolCallsUpdate = vi.fn(); + + const mockConfig = { + getSessionId: () => 'test-session-id', + getUsageStatisticsEnabled: () => true, + getDebugMode: () => false, + getApprovalMode: () => ApprovalMode.YOLO, + getAllowedTools: () => [], + getContentGeneratorConfig: () => ({ + model: 'test-model', + authType: 'oauth-personal', + }), + getShellExecutionConfig: () => ({ + terminalWidth: 90, + terminalHeight: 30, + }), + storage: { + getProjectTempDir: () => '/tmp', + }, + getToolRegistry: () => mockToolRegistry, + getTruncateToolOutputThreshold: () => + DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD, + getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES, + getUseSmartEdit: () => false, + getUseModelRouter: () => false, + getGeminiClient: () => null, + } as unknown as Config; + + const scheduler = new CoreToolScheduler({ + config: mockConfig, + onAllToolCallsComplete, + onToolCallsUpdate, + getPreferredEditor: () => 'vscode', + onEditorClose: vi.fn(), + }); + + const requests = [ + { + callId: '1', + name: 'mockTool', + args: { call: 1 }, + isClientInitiated: false, + prompt_id: 'prompt-1', + }, + { + callId: '2', + name: 'mockTool', + args: { call: 2 }, + isClientInitiated: false, + prompt_id: 'prompt-1', + }, + { + callId: '3', + name: 'mockTool', + args: { call: 3 }, + isClientInitiated: false, + prompt_id: 'prompt-1', + }, + ]; + + // Act + const schedulePromise = scheduler.schedule( + requests, + abortController.signal, + ); + + // Wait for the second call to start, then abort. + await vi.waitFor(() => { + expect(secondCallStarted).toBe(true); + }); + abortController.abort(); + + await schedulePromise; + + // Assert + await vi.waitFor(() => { + expect(onAllToolCallsComplete).toHaveBeenCalled(); + }); + + // Check that execute was called for all three tools initially + expect(executeFn).toHaveBeenCalledTimes(3); + expect(executeFn).toHaveBeenCalledWith({ call: 1 }); + expect(executeFn).toHaveBeenCalledWith({ call: 2 }); + expect(executeFn).toHaveBeenCalledWith({ call: 3 }); + + const completedCalls = onAllToolCallsComplete.mock + .calls[0][0] as ToolCall[]; + expect(completedCalls).toHaveLength(3); + + const call1 = completedCalls.find((c) => c.request.callId === '1'); + const call2 = completedCalls.find((c) => c.request.callId === '2'); + const call3 = completedCalls.find((c) => c.request.callId === '3'); + + expect(call1?.status).toBe('success'); + expect(call2?.status).toBe('cancelled'); + expect(call3?.status).toBe('cancelled'); + }); +}); + describe('truncateAndSaveToFile', () => { const mockWriteFile = vi.mocked(fs.writeFile); const THRESHOLD = 40_000; @@ -1719,14 +1981,14 @@ describe('truncateAndSaveToFile', () => { ); expect(result.content).toContain( - 'read_file tool with the absolute file path above', + 'Tool output was too large and has been truncated', ); - expect(result.content).toContain('read_file tool with offset=0, limit=100'); + expect(result.content).toContain('The full output has been saved to:'); expect(result.content).toContain( - 'read_file tool with offset=N to skip N lines', + 'To read the complete output, use the read_file tool with the absolute file path above', ); expect(result.content).toContain( - 'read_file tool with limit=M to read only M lines', + 'The truncated output below shows the beginning and end of the content', ); }); diff --git a/packages/core/src/core/coreToolScheduler.ts b/packages/core/src/core/coreToolScheduler.ts index beec2a1c..f4a26706 100644 --- a/packages/core/src/core/coreToolScheduler.ts +++ b/packages/core/src/core/coreToolScheduler.ts @@ -299,10 +299,7 @@ export async function truncateAndSaveToFile( return { content: `Tool output was too large and has been truncated. The full output has been saved to: ${outputFile} -To read the complete output, use the ${ReadFileTool.Name} tool with the absolute file path above. For large files, you can use the offset and limit parameters to read specific sections: -- ${ReadFileTool.Name} tool with offset=0, limit=100 to see the first 100 lines -- ${ReadFileTool.Name} tool with offset=N to skip N lines from the beginning -- ${ReadFileTool.Name} tool with limit=M to read only M lines at a time +To read the complete output, use the ${ReadFileTool.Name} tool with the absolute file path above. The truncated output below shows the beginning and end of the content. The marker '... [CONTENT TRUNCATED] ...' indicates where content was removed. This allows you to efficiently examine different parts of the output without loading the entire file. Truncated part of the output: @@ -846,7 +843,7 @@ export class CoreToolScheduler { ); } } - this.attemptExecutionOfScheduledCalls(signal); + await this.attemptExecutionOfScheduledCalls(signal); void this.checkAndNotifyCompletion(); } finally { this.isScheduling = false; @@ -921,7 +918,7 @@ export class CoreToolScheduler { } this.setStatusInternal(callId, 'scheduled'); } - this.attemptExecutionOfScheduledCalls(signal); + await this.attemptExecutionOfScheduledCalls(signal); } /** @@ -967,7 +964,9 @@ export class CoreToolScheduler { }); } - private attemptExecutionOfScheduledCalls(signal: AbortSignal): void { + private async attemptExecutionOfScheduledCalls( + signal: AbortSignal, + ): Promise { const allCallsFinalOrScheduled = this.toolCalls.every( (call) => call.status === 'scheduled' || @@ -981,8 +980,8 @@ export class CoreToolScheduler { (call) => call.status === 'scheduled', ); - callsToExecute.forEach((toolCall) => { - if (toolCall.status !== 'scheduled') return; + for (const toolCall of callsToExecute) { + if (toolCall.status !== 'scheduled') continue; const scheduledCall = toolCall; const { callId, name: toolName } = scheduledCall.request; @@ -1033,107 +1032,106 @@ export class CoreToolScheduler { ); } - promise - .then(async (toolResult: ToolResult) => { - if (signal.aborted) { - this.setStatusInternal( - callId, - 'cancelled', - 'User cancelled tool execution.', - ); - return; - } + try { + const toolResult: ToolResult = await promise; + if (signal.aborted) { + this.setStatusInternal( + callId, + 'cancelled', + 'User cancelled tool execution.', + ); + continue; + } - if (toolResult.error === undefined) { - let content = toolResult.llmContent; - let outputFile: string | undefined = undefined; - const contentLength = - typeof content === 'string' ? content.length : undefined; - if ( - typeof content === 'string' && - toolName === ShellTool.Name && - this.config.getEnableToolOutputTruncation() && - this.config.getTruncateToolOutputThreshold() > 0 && - this.config.getTruncateToolOutputLines() > 0 - ) { - const originalContentLength = content.length; - const threshold = this.config.getTruncateToolOutputThreshold(); - const lines = this.config.getTruncateToolOutputLines(); - const truncatedResult = await truncateAndSaveToFile( - content, - callId, - this.config.storage.getProjectTempDir(), - threshold, - lines, - ); - content = truncatedResult.content; - outputFile = truncatedResult.outputFile; - - if (outputFile) { - logToolOutputTruncated( - this.config, - new ToolOutputTruncatedEvent( - scheduledCall.request.prompt_id, - { - toolName, - originalContentLength, - truncatedContentLength: content.length, - threshold, - lines, - }, - ), - ); - } - } - - const response = convertToFunctionResponse( - toolName, - callId, + if (toolResult.error === undefined) { + let content = toolResult.llmContent; + let outputFile: string | undefined = undefined; + const contentLength = + typeof content === 'string' ? content.length : undefined; + if ( + typeof content === 'string' && + toolName === ShellTool.Name && + this.config.getEnableToolOutputTruncation() && + this.config.getTruncateToolOutputThreshold() > 0 && + this.config.getTruncateToolOutputLines() > 0 + ) { + const originalContentLength = content.length; + const threshold = this.config.getTruncateToolOutputThreshold(); + const lines = this.config.getTruncateToolOutputLines(); + const truncatedResult = await truncateAndSaveToFile( content, - ); - const successResponse: ToolCallResponseInfo = { callId, - responseParts: response, - resultDisplay: toolResult.returnDisplay, - error: undefined, - errorType: undefined, - outputFile, - contentLength, - }; - this.setStatusInternal(callId, 'success', successResponse); - } else { - // It is a failure - const error = new Error(toolResult.error.message); - const errorResponse = createErrorResponse( + this.config.storage.getProjectTempDir(), + threshold, + lines, + ); + content = truncatedResult.content; + outputFile = truncatedResult.outputFile; + + if (outputFile) { + logToolOutputTruncated( + this.config, + new ToolOutputTruncatedEvent( + scheduledCall.request.prompt_id, + { + toolName, + originalContentLength, + truncatedContentLength: content.length, + threshold, + lines, + }, + ), + ); + } + } + + const response = convertToFunctionResponse( + toolName, + callId, + content, + ); + const successResponse: ToolCallResponseInfo = { + callId, + responseParts: response, + resultDisplay: toolResult.returnDisplay, + error: undefined, + errorType: undefined, + outputFile, + contentLength, + }; + this.setStatusInternal(callId, 'success', successResponse); + } else { + // It is a failure + const error = new Error(toolResult.error.message); + const errorResponse = createErrorResponse( + scheduledCall.request, + error, + toolResult.error.type, + ); + this.setStatusInternal(callId, 'error', errorResponse); + } + } catch (executionError: unknown) { + if (signal.aborted) { + this.setStatusInternal( + callId, + 'cancelled', + 'User cancelled tool execution.', + ); + } else { + this.setStatusInternal( + callId, + 'error', + createErrorResponse( scheduledCall.request, - error, - toolResult.error.type, - ); - this.setStatusInternal(callId, 'error', errorResponse); - } - }) - .catch((executionError: Error) => { - if (signal.aborted) { - this.setStatusInternal( - callId, - 'cancelled', - 'User cancelled tool execution.', - ); - } else { - this.setStatusInternal( - callId, - 'error', - createErrorResponse( - scheduledCall.request, - executionError instanceof Error - ? executionError - : new Error(String(executionError)), - ToolErrorType.UNHANDLED_EXCEPTION, - ), - ); - } - }); - }); + executionError instanceof Error + ? executionError + : new Error(String(executionError)), + ToolErrorType.UNHANDLED_EXCEPTION, + ), + ); + } + } + } } } diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts index 7d4314b7..94ef927d 100644 --- a/packages/core/src/core/geminiChat.test.ts +++ b/packages/core/src/core/geminiChat.test.ts @@ -23,8 +23,6 @@ import { setSimulate429 } from '../utils/testUtils.js'; import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; import { AuthType } from './contentGenerator.js'; import { type RetryOptions } from '../utils/retry.js'; -import type { ToolRegistry } from '../tools/tool-registry.js'; -import { Kind } from '../tools/tools.js'; import { uiTelemetryService } from '../telemetry/uiTelemetry.js'; // Mock fs module to prevent actual file system operations during tests @@ -1305,259 +1303,6 @@ describe('GeminiChat', () => { expect(turn4.parts[0].text).toBe('second response'); }); - describe('stopBeforeSecondMutator', () => { - beforeEach(() => { - // Common setup for these tests: mock the tool registry. - const mockToolRegistry = { - getTool: vi.fn((toolName: string) => { - if (toolName === 'edit') { - return { kind: Kind.Edit }; - } - return { kind: Kind.Other }; - }), - } as unknown as ToolRegistry; - vi.mocked(mockConfig.getToolRegistry).mockReturnValue(mockToolRegistry); - }); - - it('should stop streaming before a second mutator tool call', async () => { - const responses = [ - { - candidates: [ - { content: { role: 'model', parts: [{ text: 'First part. ' }] } }, - ], - }, - { - candidates: [ - { - content: { - role: 'model', - parts: [{ functionCall: { name: 'edit', args: {} } }], - }, - }, - ], - }, - { - candidates: [ - { - content: { - role: 'model', - parts: [{ functionCall: { name: 'fetch', args: {} } }], - }, - }, - ], - }, - // This chunk contains the second mutator and should be clipped. - { - candidates: [ - { - content: { - role: 'model', - parts: [ - { functionCall: { name: 'edit', args: {} } }, - { text: 'some trailing text' }, - ], - }, - }, - ], - }, - // This chunk should never be reached. - { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'This should not appear.' }], - }, - }, - ], - }, - ] as unknown as GenerateContentResponse[]; - - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - (async function* () { - for (const response of responses) { - yield response; - } - })(), - ); - - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test message' }, - 'prompt-id-mutator-test', - ); - for await (const _ of stream) { - // Consume the stream to trigger history recording. - } - - const history = chat.getHistory(); - expect(history.length).toBe(2); - - const modelTurn = history[1]!; - expect(modelTurn.role).toBe('model'); - expect(modelTurn?.parts?.length).toBe(3); - expect(modelTurn?.parts![0]!.text).toBe('First part. '); - expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit'); - expect(modelTurn.parts![2]!.functionCall?.name).toBe('fetch'); - }); - - it('should not stop streaming if only one mutator is present', async () => { - const responses = [ - { - candidates: [ - { content: { role: 'model', parts: [{ text: 'Part 1. ' }] } }, - ], - }, - { - candidates: [ - { - content: { - role: 'model', - parts: [{ functionCall: { name: 'edit', args: {} } }], - }, - }, - ], - }, - { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'Part 2.' }], - }, - finishReason: 'STOP', - }, - ], - }, - ] as unknown as GenerateContentResponse[]; - - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - (async function* () { - for (const response of responses) { - yield response; - } - })(), - ); - - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test message' }, - 'prompt-id-one-mutator', - ); - for await (const _ of stream) { - /* consume */ - } - - const history = chat.getHistory(); - const modelTurn = history[1]!; - expect(modelTurn?.parts?.length).toBe(3); - expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit'); - expect(modelTurn.parts![2]!.text).toBe('Part 2.'); - }); - - it('should clip the chunk containing the second mutator, preserving prior parts', async () => { - const responses = [ - { - candidates: [ - { - content: { - role: 'model', - parts: [{ functionCall: { name: 'edit', args: {} } }], - }, - }, - ], - }, - // This chunk has a valid part before the second mutator. - // The valid part should be kept, the rest of the chunk discarded. - { - candidates: [ - { - content: { - role: 'model', - parts: [ - { text: 'Keep this text. ' }, - { functionCall: { name: 'edit', args: {} } }, - { text: 'Discard this text.' }, - ], - }, - finishReason: 'STOP', - }, - ], - }, - ] as unknown as GenerateContentResponse[]; - - const stream = (async function* () { - for (const response of responses) { - yield response; - } - })(); - - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - stream, - ); - - const resultStream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-clip-chunk', - ); - for await (const _ of resultStream) { - /* consume */ - } - - const history = chat.getHistory(); - const modelTurn = history[1]!; - expect(modelTurn?.parts?.length).toBe(2); - expect(modelTurn.parts![0]!.functionCall?.name).toBe('edit'); - expect(modelTurn.parts![1]!.text).toBe('Keep this text. '); - }); - - it('should handle two mutators in the same chunk (parallel call scenario)', async () => { - const responses = [ - { - candidates: [ - { - content: { - role: 'model', - parts: [ - { text: 'Some text. ' }, - { functionCall: { name: 'edit', args: {} } }, - { functionCall: { name: 'edit', args: {} } }, - ], - }, - finishReason: 'STOP', - }, - ], - }, - ] as unknown as GenerateContentResponse[]; - - const stream = (async function* () { - for (const response of responses) { - yield response; - } - })(); - - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - stream, - ); - - const resultStream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-parallel-mutators', - ); - for await (const _ of resultStream) { - /* consume */ - } - - const history = chat.getHistory(); - const modelTurn = history[1]!; - expect(modelTurn?.parts?.length).toBe(2); - expect(modelTurn.parts![0]!.text).toBe('Some text. '); - expect(modelTurn.parts![1]!.functionCall?.name).toBe('edit'); - }); - }); - describe('Model Resolution', () => { const mockResponse = { candidates: [ diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index b21a7023..79249733 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -7,16 +7,15 @@ // DISCLAIMER: This is a copied version of https://github.com/googleapis/js-genai/blob/main/src/chats.ts with the intention of working around a key bug // where function responses are not treated as "valid" responses: https://b.corp.google.com/issues/420354090 -import { +import type { GenerateContentResponse, - type Content, - type GenerateContentConfig, - type SendMessageParameters, - type Part, - type Tool, - FinishReason, - ApiError, + Content, + GenerateContentConfig, + SendMessageParameters, + Part, + Tool, } from '@google/genai'; +import { ApiError } from '@google/genai'; import { toParts } from '../code_assist/converter.js'; import { createUserContent } from '@google/genai'; import { retryWithBackoff } from '../utils/retry.js'; @@ -25,7 +24,7 @@ import { DEFAULT_GEMINI_FLASH_MODEL, getEffectiveModel, } from '../config/models.js'; -import { hasCycleInSchema, MUTATOR_KINDS } from '../tools/tools.js'; +import { hasCycleInSchema } from '../tools/tools.js'; import type { StructuredError } from './turn.js'; import { logContentRetry, @@ -511,7 +510,7 @@ export class GeminiChat { let hasToolCall = false; let hasFinishReason = false; - for await (const chunk of this.stopBeforeSecondMutator(streamResponse)) { + for await (const chunk of streamResponse) { hasFinishReason = chunk?.candidates?.some((candidate) => candidate.finishReason) ?? false; if (isValidResponse(chunk)) { @@ -629,64 +628,6 @@ export class GeminiChat { }); } } - - /** - * Truncates the chunkStream right before the second function call to a - * function that mutates state. This may involve trimming parts from a chunk - * as well as omtting some chunks altogether. - * - * We do this because it improves tool call quality if the model gets - * feedback from one mutating function call before it makes the next one. - */ - private async *stopBeforeSecondMutator( - chunkStream: AsyncGenerator, - ): AsyncGenerator { - let foundMutatorFunctionCall = false; - - for await (const chunk of chunkStream) { - const candidate = chunk.candidates?.[0]; - const content = candidate?.content; - if (!candidate || !content?.parts) { - yield chunk; - continue; - } - - const truncatedParts: Part[] = []; - for (const part of content.parts) { - if (this.isMutatorFunctionCall(part)) { - if (foundMutatorFunctionCall) { - // This is the second mutator call. - // Truncate and return immedaitely. - const newChunk = new GenerateContentResponse(); - newChunk.candidates = [ - { - ...candidate, - content: { - ...content, - parts: truncatedParts, - }, - finishReason: FinishReason.STOP, - }, - ]; - yield newChunk; - return; - } - foundMutatorFunctionCall = true; - } - truncatedParts.push(part); - } - - yield chunk; - } - } - - private isMutatorFunctionCall(part: Part): boolean { - if (!part?.functionCall?.name) { - return false; - } - const tool = this.config.getToolRegistry().getTool(part.functionCall.name); - return !!tool && MUTATOR_KINDS.includes(tool.kind); - } } /** Visible for Testing */ diff --git a/packages/core/src/services/chatCompressionService.test.ts b/packages/core/src/services/chatCompressionService.test.ts index f5c7c8a0..5e1c75c5 100644 --- a/packages/core/src/services/chatCompressionService.test.ts +++ b/packages/core/src/services/chatCompressionService.test.ts @@ -181,6 +181,56 @@ describe('ChatCompressionService', () => { expect(result.newHistory).toBeNull(); }); + it('should return NOOP when contextPercentageThreshold is 0', async () => { + const history: Content[] = [ + { role: 'user', parts: [{ text: 'msg1' }] }, + { role: 'model', parts: [{ text: 'msg2' }] }, + ]; + vi.mocked(mockChat.getHistory).mockReturnValue(history); + vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800); + vi.mocked(mockConfig.getChatCompression).mockReturnValue({ + contextPercentageThreshold: 0, + }); + + const mockGenerateContent = vi.fn(); + vi.mocked(mockConfig.getContentGenerator).mockReturnValue({ + generateContent: mockGenerateContent, + } as unknown as ContentGenerator); + + const result = await service.compress( + mockChat, + mockPromptId, + false, + mockModel, + mockConfig, + false, + ); + + expect(result.info).toMatchObject({ + compressionStatus: CompressionStatus.NOOP, + originalTokenCount: 0, + newTokenCount: 0, + }); + expect(mockGenerateContent).not.toHaveBeenCalled(); + expect(tokenLimit).not.toHaveBeenCalled(); + + const forcedResult = await service.compress( + mockChat, + mockPromptId, + true, + mockModel, + mockConfig, + false, + ); + expect(forcedResult.info).toMatchObject({ + compressionStatus: CompressionStatus.NOOP, + originalTokenCount: 0, + newTokenCount: 0, + }); + expect(mockGenerateContent).not.toHaveBeenCalled(); + expect(tokenLimit).not.toHaveBeenCalled(); + }); + it('should compress if over token threshold', async () => { const history: Content[] = [ { role: 'user', parts: [{ text: 'msg1' }] }, diff --git a/packages/core/src/services/chatCompressionService.ts b/packages/core/src/services/chatCompressionService.ts index 68761fa2..f692be3e 100644 --- a/packages/core/src/services/chatCompressionService.ts +++ b/packages/core/src/services/chatCompressionService.ts @@ -86,10 +86,14 @@ export class ChatCompressionService { hasFailedCompressionAttempt: boolean, ): Promise<{ newHistory: Content[] | null; info: ChatCompressionInfo }> { const curatedHistory = chat.getHistory(true); + const threshold = + config.getChatCompression()?.contextPercentageThreshold ?? + COMPRESSION_TOKEN_THRESHOLD; // Regardless of `force`, don't do anything if the history is empty. if ( curatedHistory.length === 0 || + threshold <= 0 || (hasFailedCompressionAttempt && !force) ) { return { @@ -104,13 +108,8 @@ export class ChatCompressionService { const originalTokenCount = uiTelemetryService.getLastPromptTokenCount(); - const contextPercentageThreshold = - config.getChatCompression()?.contextPercentageThreshold; - // Don't compress if not forced and we are under the limit. if (!force) { - const threshold = - contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD; if (originalTokenCount < threshold * tokenLimit(model)) { return { newHistory: null, diff --git a/packages/core/src/tools/glob.test.ts b/packages/core/src/tools/glob.test.ts index 0cf1f9e3..3729c251 100644 --- a/packages/core/src/tools/glob.test.ts +++ b/packages/core/src/tools/glob.test.ts @@ -37,6 +37,7 @@ describe('GlobTool', () => { getFileExclusions: () => ({ getGlobExcludes: () => [], }), + getTruncateToolOutputLines: () => 1000, } as unknown as Config; beforeEach(async () => { diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index 2e9fa58e..b717ca3e 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -161,11 +161,15 @@ class GlobToolInvocation extends BaseToolInvocation< ); const totalFileCount = sortedEntries.length; - const truncated = totalFileCount > MAX_FILE_COUNT; + const fileLimit = Math.min( + MAX_FILE_COUNT, + this.config.getTruncateToolOutputLines(), + ); + const truncated = totalFileCount > fileLimit; - // Limit to MAX_FILE_COUNT if needed + // Limit to fileLimit if needed const entriesToShow = truncated - ? sortedEntries.slice(0, MAX_FILE_COUNT) + ? sortedEntries.slice(0, fileLimit) : sortedEntries; const sortedAbsolutePaths = entriesToShow.map((entry) => @@ -178,7 +182,7 @@ class GlobToolInvocation extends BaseToolInvocation< // Add truncation notice if needed if (truncated) { - const omittedFiles = totalFileCount - MAX_FILE_COUNT; + const omittedFiles = totalFileCount - fileLimit; const fileTerm = omittedFiles === 1 ? 'file' : 'files'; resultMessage += `\n---\n[${omittedFiles} ${fileTerm} truncated] ...`; } diff --git a/packages/core/src/tools/grep.test.ts b/packages/core/src/tools/grep.test.ts index 497fbb7d..d613ff03 100644 --- a/packages/core/src/tools/grep.test.ts +++ b/packages/core/src/tools/grep.test.ts @@ -43,6 +43,8 @@ describe('GrepTool', () => { getFileExclusions: () => ({ getGlobExcludes: () => [], }), + getTruncateToolOutputThreshold: () => 25000, + getTruncateToolOutputLines: () => 1000, } as unknown as Config; beforeEach(async () => { @@ -282,6 +284,8 @@ describe('GrepTool', () => { getFileExclusions: () => ({ getGlobExcludes: () => [], }), + getTruncateToolOutputThreshold: () => 25000, + getTruncateToolOutputLines: () => 1000, } as unknown as Config; const multiDirGrepTool = new GrepTool(multiDirConfig); diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index 1aed46c0..f8e19451 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -19,8 +19,6 @@ import type { Config } from '../config/config.js'; import type { FileExclusions } from '../utils/ignorePatterns.js'; import { ToolErrorType } from './tool-error.js'; -const MAX_LLM_CONTENT_LENGTH = 20_000; - // --- Interfaces --- /** @@ -103,14 +101,17 @@ class GrepToolInvocation extends BaseToolInvocation< return { llmContent: noMatchMsg, returnDisplay: `No matches found` }; } + const charLimit = this.config.getTruncateToolOutputThreshold(); + const lineLimit = Math.min( + this.config.getTruncateToolOutputLines(), + this.params.limit ?? Number.POSITIVE_INFINITY, + ); + // Apply line limit if specified let truncatedByLineLimit = false; let matchesToInclude = rawMatches; - if ( - this.params.limit !== undefined && - rawMatches.length > this.params.limit - ) { - matchesToInclude = rawMatches.slice(0, this.params.limit); + if (rawMatches.length > lineLimit) { + matchesToInclude = rawMatches.slice(0, lineLimit); truncatedByLineLimit = true; } @@ -147,8 +148,8 @@ class GrepToolInvocation extends BaseToolInvocation< // Apply character limit as safety net let truncatedByCharLimit = false; - if (grepOutput.length > MAX_LLM_CONTENT_LENGTH) { - grepOutput = grepOutput.slice(0, MAX_LLM_CONTENT_LENGTH) + '...'; + if (Number.isFinite(charLimit) && grepOutput.length > charLimit) { + grepOutput = grepOutput.slice(0, charLimit) + '...'; truncatedByCharLimit = true; } diff --git a/packages/core/src/tools/read-file.test.ts b/packages/core/src/tools/read-file.test.ts index dfb12c94..a7aa6648 100644 --- a/packages/core/src/tools/read-file.test.ts +++ b/packages/core/src/tools/read-file.test.ts @@ -41,6 +41,8 @@ describe('ReadFileTool', () => { storage: { getProjectTempDir: () => path.join(tempRootDir, '.temp'), }, + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, } as unknown as Config; tool = new ReadFileTool(mockConfigInstance); }); @@ -281,11 +283,9 @@ describe('ReadFileTool', () => { >; const result = await invocation.execute(abortSignal); - expect(result.llmContent).toContain( - 'IMPORTANT: The file content has been truncated', + expect(result.returnDisplay).toContain( + 'Read lines 1-2 of 3 from longlines.txt (truncated)', ); - expect(result.llmContent).toContain('--- FILE CONTENT (truncated) ---'); - expect(result.returnDisplay).toContain('some lines were shortened'); }); it('should handle image file and return appropriate content', async () => { @@ -417,10 +417,7 @@ describe('ReadFileTool', () => { const result = await invocation.execute(abortSignal); expect(result.llmContent).toContain( - 'IMPORTANT: The file content has been truncated', - ); - expect(result.llmContent).toContain( - 'Status: Showing lines 6-8 of 20 total lines', + 'Showing lines 6-8 of 20 total lines', ); expect(result.llmContent).toContain('Line 6'); expect(result.llmContent).toContain('Line 7'); diff --git a/packages/core/src/tools/read-file.ts b/packages/core/src/tools/read-file.ts index fa26b3c6..99982ca6 100644 --- a/packages/core/src/tools/read-file.ts +++ b/packages/core/src/tools/read-file.ts @@ -67,8 +67,7 @@ class ReadFileToolInvocation extends BaseToolInvocation< async execute(): Promise { const result = await processSingleFileContent( this.params.absolute_path, - this.config.getTargetDir(), - this.config.getFileSystemService(), + this.config, this.params.offset, this.params.limit, ); @@ -88,16 +87,7 @@ class ReadFileToolInvocation extends BaseToolInvocation< if (result.isTruncated) { const [start, end] = result.linesShown!; const total = result.originalLineCount!; - const nextOffset = this.params.offset - ? this.params.offset + end - start + 1 - : end; - llmContent = ` -IMPORTANT: The file content has been truncated. -Status: Showing lines ${start}-${end} of ${total} total lines. -Action: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}. - ---- FILE CONTENT (truncated) --- -${result.llmContent}`; + llmContent = `Showing lines ${start}-${end} of ${total} total lines.\n\n---\n\n${result.llmContent}`; } else { llmContent = result.llmContent || ''; } diff --git a/packages/core/src/tools/read-many-files.test.ts b/packages/core/src/tools/read-many-files.test.ts index 0b4fefb5..758fb5d6 100644 --- a/packages/core/src/tools/read-many-files.test.ts +++ b/packages/core/src/tools/read-many-files.test.ts @@ -88,6 +88,8 @@ describe('ReadManyFilesTool', () => { buildExcludePatterns: () => DEFAULT_FILE_EXCLUDES, getReadManyFilesExcludes: () => DEFAULT_FILE_EXCLUDES, }), + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, } as Partial as Config; tool = new ReadManyFilesTool(mockConfig); @@ -500,6 +502,8 @@ describe('ReadManyFilesTool', () => { buildExcludePatterns: () => [], getReadManyFilesExcludes: () => [], }), + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, } as Partial as Config; tool = new ReadManyFilesTool(mockConfig); @@ -552,15 +556,10 @@ describe('ReadManyFilesTool', () => { c.includes('large-file.txt'), ); - expect(normalFileContent).not.toContain( - '[WARNING: This file was truncated.', - ); + expect(normalFileContent).not.toContain('Showing lines'); expect(truncatedFileContent).toContain( - "[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]", + 'Showing lines 1-250 of 2500 total lines.', ); - // Check that the actual content is still there but truncated - expect(truncatedFileContent).toContain('L200'); - expect(truncatedFileContent).not.toContain('L2400'); }); it('should read files with special characters like [] and () in the path', async () => { diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index 63fcf78a..c262cf4b 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -17,7 +17,6 @@ import { processSingleFileContent, DEFAULT_ENCODING, getSpecificMimeType, - DEFAULT_MAX_LINES_TEXT_FILE, } from '../utils/fileUtils.js'; import type { PartListUnion } from '@google/genai'; import { @@ -278,8 +277,10 @@ ${finalExclusionPatternsForDescription } const sortedFiles = Array.from(filesToConsider).sort(); - const file_line_limit = - DEFAULT_MAX_LINES_TEXT_FILE / Math.max(1, sortedFiles.length); + const truncateToolOutputLines = this.config.getTruncateToolOutputLines(); + const file_line_limit = Number.isFinite(truncateToolOutputLines) + ? Math.floor(truncateToolOutputLines / Math.max(1, sortedFiles.length)) + : undefined; const fileProcessingPromises = sortedFiles.map( async (filePath): Promise => { @@ -316,8 +317,7 @@ ${finalExclusionPatternsForDescription // Use processSingleFileContent for all file types now const fileReadResult = await processSingleFileContent( filePath, - this.config.getTargetDir(), - this.config.getFileSystemService(), + this.config, 0, file_line_limit, ); @@ -376,9 +376,12 @@ ${finalExclusionPatternsForDescription ); let fileContentForLlm = ''; if (fileReadResult.isTruncated) { - fileContentForLlm += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`; + const [start, end] = fileReadResult.linesShown!; + const total = fileReadResult.originalLineCount!; + fileContentForLlm = `Showing lines ${start}-${end} of ${total} total lines.\n---\n${fileReadResult.llmContent}`; + } else { + fileContentForLlm = fileReadResult.llmContent; } - fileContentForLlm += fileReadResult.llmContent; contentParts.push(`${separator}\n\n${fileContentForLlm}\n\n`); } else { // This is a Part for image/pdf, which we don't add the separator to. diff --git a/packages/core/src/tools/ripGrep.test.ts b/packages/core/src/tools/ripGrep.test.ts index b8ed191f..a2f813f4 100644 --- a/packages/core/src/tools/ripGrep.test.ts +++ b/packages/core/src/tools/ripGrep.test.ts @@ -103,6 +103,8 @@ describe('RipGrepTool', () => { getWorkingDir: () => tempRootDir, getDebugMode: () => false, getUseBuiltinRipgrep: () => true, + getTruncateToolOutputThreshold: () => 25000, + getTruncateToolOutputLines: () => 1000, } as unknown as Config; beforeEach(async () => { @@ -417,7 +419,7 @@ describe('RipGrepTool', () => { }); it('should truncate llm content when exceeding maximum length', async () => { - const longMatch = 'fileA.txt:1:' + 'a'.repeat(25_000); + const longMatch = 'fileA.txt:1:' + 'a'.repeat(30_000); mockSpawn.mockImplementationOnce( createMockSpawn({ @@ -430,7 +432,7 @@ describe('RipGrepTool', () => { const invocation = grepTool.build(params); const result = await invocation.execute(abortSignal); - expect(String(result.llmContent).length).toBeLessThanOrEqual(21_000); + expect(String(result.llmContent).length).toBeLessThanOrEqual(26_000); expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./); expect(result.returnDisplay).toContain('truncated'); }); diff --git a/packages/core/src/tools/ripGrep.ts b/packages/core/src/tools/ripGrep.ts index c119de5b..80273f31 100644 --- a/packages/core/src/tools/ripGrep.ts +++ b/packages/core/src/tools/ripGrep.ts @@ -19,8 +19,6 @@ import { SchemaValidator } from '../utils/schemaValidator.js'; import type { FileFilteringOptions } from '../config/constants.js'; import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js'; -const MAX_LLM_CONTENT_LENGTH = 20_000; - /** * Parameters for the GrepTool (Simplified) */ @@ -97,43 +95,49 @@ class GrepToolInvocation extends BaseToolInvocation< // Build header early to calculate available space const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`; + const charLimit = this.config.getTruncateToolOutputThreshold(); + const lineLimit = Math.min( + this.config.getTruncateToolOutputLines(), + this.params.limit ?? Number.POSITIVE_INFINITY, + ); + // Apply line limit first (if specified) let truncatedByLineLimit = false; let linesToInclude = allLines; - if ( - this.params.limit !== undefined && - allLines.length > this.params.limit - ) { - linesToInclude = allLines.slice(0, this.params.limit); + if (allLines.length > lineLimit) { + linesToInclude = allLines.slice(0, lineLimit); truncatedByLineLimit = true; } // Build output and track how many lines we include, respecting character limit - const parts: string[] = []; - let includedLines = 0; + let grepOutput = ''; let truncatedByCharLimit = false; - let currentLength = 0; + let includedLines = 0; + if (Number.isFinite(charLimit)) { + const parts: string[] = []; + let currentLength = 0; - for (const line of linesToInclude) { - const sep = includedLines > 0 ? 1 : 0; + for (const line of linesToInclude) { + const sep = includedLines > 0 ? 1 : 0; + includedLines++; - includedLines++; - - if (currentLength + line.length <= MAX_LLM_CONTENT_LENGTH) { - parts.push(line); - currentLength = currentLength + line.length + sep; - } else { - const remaining = Math.max( - MAX_LLM_CONTENT_LENGTH - currentLength - sep, - 10, - ); - parts.push(line.slice(0, remaining) + '...'); - truncatedByCharLimit = true; - break; + const projectedLength = currentLength + line.length + sep; + if (projectedLength <= charLimit) { + parts.push(line); + currentLength = projectedLength; + } else { + const remaining = Math.max(charLimit - currentLength - sep, 10); + parts.push(line.slice(0, remaining) + '...'); + truncatedByCharLimit = true; + break; + } } - } - const grepOutput = parts.join('\n'); + grepOutput = parts.join('\n'); + } else { + grepOutput = linesToInclude.join('\n'); + includedLines = linesToInclude.length; + } // Build result let llmContent = header + grepOutput; diff --git a/packages/core/src/tools/tool-names.ts b/packages/core/src/tools/tool-names.ts index 09109205..799d0d72 100644 --- a/packages/core/src/tools/tool-names.ts +++ b/packages/core/src/tools/tool-names.ts @@ -21,4 +21,6 @@ export const ToolNames = { MEMORY: 'save_memory', TASK: 'task', EXIT_PLAN_MODE: 'exit_plan_mode', + WEB_FETCH: 'web_fetch', + WEB_SEARCH: 'web_search', } as const; diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts index 0d253d00..2927e891 100644 --- a/packages/core/src/tools/web-fetch.ts +++ b/packages/core/src/tools/web-fetch.ts @@ -23,6 +23,7 @@ import { ToolConfirmationOutcome, } from './tools.js'; import { DEFAULT_QWEN_MODEL } from '../config/models.js'; +import { ToolNames } from './tool-names.js'; const URL_FETCH_TIMEOUT_MS = 10000; const MAX_CONTENT_LENGTH = 100000; @@ -190,7 +191,7 @@ export class WebFetchTool extends BaseDeclarativeTool< WebFetchToolParams, ToolResult > { - static readonly Name: string = 'web_fetch'; + static readonly Name: string = ToolNames.WEB_FETCH; constructor(private readonly config: Config) { super( diff --git a/packages/core/src/tools/web-search/index.ts b/packages/core/src/tools/web-search/index.ts index f9962b52..2860a333 100644 --- a/packages/core/src/tools/web-search/index.ts +++ b/packages/core/src/tools/web-search/index.ts @@ -30,6 +30,7 @@ import type { WebSearchProviderConfig, DashScopeProviderConfig, } from './types.js'; +import { ToolNames } from '../tool-names.js'; class WebSearchToolInvocation extends BaseToolInvocation< WebSearchToolParams, @@ -274,7 +275,7 @@ export class WebSearchTool extends BaseDeclarativeTool< WebSearchToolParams, WebSearchToolResult > { - static readonly Name: string = 'web_search'; + static readonly Name: string = ToolNames.WEB_SEARCH; constructor(private readonly config: Config) { super( diff --git a/packages/core/src/utils/environmentContext.test.ts b/packages/core/src/utils/environmentContext.test.ts index aa436c6e..944e0906 100644 --- a/packages/core/src/utils/environmentContext.test.ts +++ b/packages/core/src/utils/environmentContext.test.ts @@ -13,9 +13,11 @@ import { afterEach, type Mock, } from 'vitest'; +import type { Content } from '@google/genai'; import { getEnvironmentContext, getDirectoryContextString, + getInitialChatHistory, } from './environmentContext.js'; import type { Config } from '../config/config.js'; import { getFolderStructure } from './getFolderStructure.js'; @@ -213,3 +215,102 @@ describe('getEnvironmentContext', () => { expect(parts[1].text).toBe('\n--- Error reading full file context ---'); }); }); + +describe('getInitialChatHistory', () => { + let mockConfig: Partial; + + beforeEach(() => { + vi.mocked(getFolderStructure).mockResolvedValue('Mock Folder Structure'); + mockConfig = { + getSkipStartupContext: vi.fn().mockReturnValue(false), + getWorkspaceContext: vi.fn().mockReturnValue({ + getDirectories: vi.fn().mockReturnValue(['/test/dir']), + }), + getFileService: vi.fn(), + getFullContext: vi.fn().mockReturnValue(false), + getToolRegistry: vi.fn().mockReturnValue({ getTool: vi.fn() }), + }; + }); + + afterEach(() => { + vi.clearAllMocks(); + vi.restoreAllMocks(); + }); + + it('includes startup context when skipStartupContext is false', async () => { + const history = await getInitialChatHistory(mockConfig as Config); + + expect(mockConfig.getSkipStartupContext).toHaveBeenCalled(); + expect(history).toHaveLength(2); + expect(history).toEqual([ + expect.objectContaining({ + role: 'user', + parts: [ + expect.objectContaining({ + text: expect.stringContaining( + "I'm currently working in the directory", + ), + }), + ], + }), + { + role: 'model', + parts: [{ text: 'Got it. Thanks for the context!' }], + }, + ]); + }); + + it('returns only extra history when skipStartupContext is true', async () => { + mockConfig.getSkipStartupContext = vi.fn().mockReturnValue(true); + mockConfig.getWorkspaceContext = vi.fn(() => { + throw new Error( + 'getWorkspaceContext should not be called when skipping startup context', + ); + }); + mockConfig.getFullContext = vi.fn(() => { + throw new Error( + 'getFullContext should not be called when skipping startup context', + ); + }); + mockConfig.getToolRegistry = vi.fn(() => { + throw new Error( + 'getToolRegistry should not be called when skipping startup context', + ); + }); + const extraHistory: Content[] = [ + { role: 'user', parts: [{ text: 'custom context' }] }, + ]; + + const history = await getInitialChatHistory( + mockConfig as Config, + extraHistory, + ); + + expect(mockConfig.getSkipStartupContext).toHaveBeenCalled(); + expect(history).toEqual(extraHistory); + expect(history).not.toBe(extraHistory); + }); + + it('returns empty history when skipping startup context without extras', async () => { + mockConfig.getSkipStartupContext = vi.fn().mockReturnValue(true); + mockConfig.getWorkspaceContext = vi.fn(() => { + throw new Error( + 'getWorkspaceContext should not be called when skipping startup context', + ); + }); + mockConfig.getFullContext = vi.fn(() => { + throw new Error( + 'getFullContext should not be called when skipping startup context', + ); + }); + mockConfig.getToolRegistry = vi.fn(() => { + throw new Error( + 'getToolRegistry should not be called when skipping startup context', + ); + }); + + const history = await getInitialChatHistory(mockConfig as Config); + + expect(history).toEqual([]); + }); +}); diff --git a/packages/core/src/utils/environmentContext.ts b/packages/core/src/utils/environmentContext.ts index 48840734..2bbe12dd 100644 --- a/packages/core/src/utils/environmentContext.ts +++ b/packages/core/src/utils/environmentContext.ts @@ -112,6 +112,10 @@ export async function getInitialChatHistory( config: Config, extraHistory?: Content[], ): Promise { + if (config.getSkipStartupContext()) { + return extraHistory ? [...extraHistory] : []; + } + const envParts = await getEnvironmentContext(config); const envContextString = envParts.map((part) => part.text || '').join('\n\n'); diff --git a/packages/core/src/utils/fileUtils.test.ts b/packages/core/src/utils/fileUtils.test.ts index c3500cdd..92af55e4 100644 --- a/packages/core/src/utils/fileUtils.test.ts +++ b/packages/core/src/utils/fileUtils.test.ts @@ -30,7 +30,7 @@ import { readFileWithEncoding, fileExists, } from './fileUtils.js'; -import { StandardFileSystemService } from '../services/fileSystemService.js'; +import type { Config } from '../config/config.js'; vi.mock('mime/lite', () => ({ default: { getType: vi.fn() }, @@ -50,6 +50,12 @@ describe('fileUtils', () => { let nonexistentFilePath: string; let directoryPath: string; + const mockConfig = { + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, + getTargetDir: () => tempRootDir, + } as unknown as Config; + beforeEach(() => { vi.resetAllMocks(); // Reset all mocks, including mime.getType @@ -664,8 +670,7 @@ describe('fileUtils', () => { actualNodeFs.writeFileSync(testTextFilePath, content); const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.llmContent).toBe(content); expect(result.returnDisplay).toBe(''); @@ -675,8 +680,7 @@ describe('fileUtils', () => { it('should handle file not found', async () => { const result = await processSingleFileContent( nonexistentFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.error).toContain('File not found'); expect(result.returnDisplay).toContain('File not found'); @@ -689,8 +693,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.error).toContain('Simulated read error'); expect(result.returnDisplay).toContain('Simulated read error'); @@ -704,8 +707,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testImageFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.error).toContain('Simulated image read error'); expect(result.returnDisplay).toContain('Simulated image read error'); @@ -717,8 +719,7 @@ describe('fileUtils', () => { mockMimeGetType.mockReturnValue('image/png'); const result = await processSingleFileContent( testImageFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect( (result.llmContent as { inlineData: unknown }).inlineData, @@ -739,8 +740,7 @@ describe('fileUtils', () => { mockMimeGetType.mockReturnValue('application/pdf'); const result = await processSingleFileContent( testPdfFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect( (result.llmContent as { inlineData: unknown }).inlineData, @@ -768,8 +768,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testSvgFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.llmContent).toBe(svgContent); @@ -786,8 +785,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testBinaryFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.llmContent).toContain( 'Cannot display content of binary file', @@ -796,11 +794,7 @@ describe('fileUtils', () => { }); it('should handle path being a directory', async () => { - const result = await processSingleFileContent( - directoryPath, - tempRootDir, - new StandardFileSystemService(), - ); + const result = await processSingleFileContent(directoryPath, mockConfig); expect(result.error).toContain('Path is a directory'); expect(result.returnDisplay).toContain('Path is a directory'); }); @@ -811,8 +805,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 5, 5, ); // Read lines 6-10 @@ -832,8 +825,7 @@ describe('fileUtils', () => { // Read from line 11 to 20. The start is not 0, so it's truncated. const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 10, 10, ); @@ -852,8 +844,7 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 0, 10, ); @@ -875,17 +866,16 @@ describe('fileUtils', () => { const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.llmContent).toContain('Short line'); expect(result.llmContent).toContain( longLine.substring(0, 2000) + '... [truncated]', ); - expect(result.llmContent).toContain('Another short line'); + expect(result.llmContent).not.toContain('Another short line'); expect(result.returnDisplay).toBe( - 'Read all 3 lines from test.txt (some lines were shortened)', + 'Read lines 1-2 of 3 from test.txt (truncated)', ); expect(result.isTruncated).toBe(true); }); @@ -897,8 +887,7 @@ describe('fileUtils', () => { // Read 5 lines, but there are 11 total const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 0, 5, ); @@ -916,15 +905,14 @@ describe('fileUtils', () => { // Read all 11 lines, including the long one const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 0, 11, ); expect(result.isTruncated).toBe(true); expect(result.returnDisplay).toBe( - 'Read all 11 lines from test.txt (some lines were shortened)', + 'Read lines 1-11 of 11 from test.txt (truncated)', ); }); @@ -942,14 +930,13 @@ describe('fileUtils', () => { // Read 10 lines out of 20, including the long line const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, 0, 10, ); expect(result.isTruncated).toBe(true); expect(result.returnDisplay).toBe( - 'Read lines 1-10 of 20 from test.txt (some lines were shortened)', + 'Read lines 1-5 of 20 from test.txt (truncated)', ); }); @@ -966,8 +953,7 @@ describe('fileUtils', () => { try { const result = await processSingleFileContent( testTextFilePath, - tempRootDir, - new StandardFileSystemService(), + mockConfig, ); expect(result.error).toContain('File size exceeds the 20MB limit'); diff --git a/packages/core/src/utils/fileUtils.ts b/packages/core/src/utils/fileUtils.ts index b321ac54..940e9794 100644 --- a/packages/core/src/utils/fileUtils.ts +++ b/packages/core/src/utils/fileUtils.ts @@ -9,13 +9,9 @@ import fsPromises from 'node:fs/promises'; import path from 'node:path'; import type { PartUnion } from '@google/genai'; import mime from 'mime/lite'; -import type { FileSystemService } from '../services/fileSystemService.js'; import { ToolErrorType } from '../tools/tool-error.js'; import { BINARY_EXTENSIONS } from './ignorePatterns.js'; - -// Constants for text file processing -export const DEFAULT_MAX_LINES_TEXT_FILE = 2000; -const MAX_LINE_LENGTH_TEXT_FILE = 2000; +import type { Config } from '../config/config.js'; // Default values for encoding and separator format export const DEFAULT_ENCODING: BufferEncoding = 'utf-8'; @@ -306,18 +302,18 @@ export interface ProcessedFileReadResult { /** * Reads and processes a single file, handling text, images, and PDFs. * @param filePath Absolute path to the file. - * @param rootDirectory Absolute path to the project root for relative path display. + * @param config Config instance for truncation settings. * @param offset Optional offset for text files (0-based line number). * @param limit Optional limit for text files (number of lines to read). * @returns ProcessedFileReadResult object. */ export async function processSingleFileContent( filePath: string, - rootDirectory: string, - fileSystemService: FileSystemService, + config: Config, offset?: number, limit?: number, ): Promise { + const rootDirectory = config.getTargetDir(); try { if (!fs.existsSync(filePath)) { // Sync check is acceptable before async read @@ -379,45 +375,76 @@ export async function processSingleFileContent( case 'text': { // Use BOM-aware reader to avoid leaving a BOM character in content and to support UTF-16/32 transparently const content = await readFileWithEncoding(filePath); - const lines = content.split('\n'); + const lines = content.split('\n').map((line) => line.trimEnd()); const originalLineCount = lines.length; const startLine = offset || 0; - const effectiveLimit = - limit === undefined ? DEFAULT_MAX_LINES_TEXT_FILE : limit; + const configLineLimit = config.getTruncateToolOutputLines(); + const configCharLimit = config.getTruncateToolOutputThreshold(); + const effectiveLimit = limit === undefined ? configLineLimit : limit; + // Ensure endLine does not exceed originalLineCount const endLine = Math.min(startLine + effectiveLimit, originalLineCount); // Ensure selectedLines doesn't try to slice beyond array bounds if startLine is too high const actualStartLine = Math.min(startLine, originalLineCount); const selectedLines = lines.slice(actualStartLine, endLine); - let linesWereTruncatedInLength = false; - const formattedLines = selectedLines.map((line) => { - if (line.length > MAX_LINE_LENGTH_TEXT_FILE) { - linesWereTruncatedInLength = true; - return ( - line.substring(0, MAX_LINE_LENGTH_TEXT_FILE) + '... [truncated]' - ); + // Apply character limit truncation + let llmContent = ''; + let contentLengthTruncated = false; + let linesIncluded = 0; + + if (Number.isFinite(configCharLimit)) { + const formattedLines: string[] = []; + let currentLength = 0; + + for (const line of selectedLines) { + const sep = linesIncluded > 0 ? 1 : 0; // newline separator + linesIncluded++; + + const projectedLength = currentLength + line.length + sep; + if (projectedLength <= configCharLimit) { + formattedLines.push(line); + currentLength = projectedLength; + } else { + // Truncate the current line to fit + const remaining = Math.max( + configCharLimit - currentLength - sep, + 10, + ); + formattedLines.push( + line.substring(0, remaining) + '... [truncated]', + ); + contentLengthTruncated = true; + break; + } } - return line; - }); + + llmContent = formattedLines.join('\n'); + } else { + // No character limit, use all selected lines + llmContent = selectedLines.join('\n'); + linesIncluded = selectedLines.length; + } + + // Calculate actual end line shown + const actualEndLine = contentLengthTruncated + ? actualStartLine + linesIncluded + : endLine; const contentRangeTruncated = - startLine > 0 || endLine < originalLineCount; - const isTruncated = contentRangeTruncated || linesWereTruncatedInLength; - const llmContent = formattedLines.join('\n'); + startLine > 0 || actualEndLine < originalLineCount; + const isTruncated = contentRangeTruncated || contentLengthTruncated; // By default, return nothing to streamline the common case of a successful read_file. let returnDisplay = ''; - if (contentRangeTruncated) { + if (isTruncated) { returnDisplay = `Read lines ${ actualStartLine + 1 - }-${endLine} of ${originalLineCount} from ${relativePathForDisplay}`; - if (linesWereTruncatedInLength) { - returnDisplay += ' (some lines were shortened)'; + }-${actualEndLine} of ${originalLineCount} from ${relativePathForDisplay}`; + if (contentLengthTruncated) { + returnDisplay += ' (truncated)'; } - } else if (linesWereTruncatedInLength) { - returnDisplay = `Read all ${originalLineCount} lines from ${relativePathForDisplay} (some lines were shortened)`; } return { @@ -425,7 +452,7 @@ export async function processSingleFileContent( returnDisplay, isTruncated, originalLineCount, - linesShown: [actualStartLine + 1, endLine], + linesShown: [actualStartLine + 1, actualEndLine], }; } case 'image': diff --git a/packages/core/src/utils/pathReader.test.ts b/packages/core/src/utils/pathReader.test.ts index 8a1bb52c..fd6ff224 100644 --- a/packages/core/src/utils/pathReader.test.ts +++ b/packages/core/src/utils/pathReader.test.ts @@ -29,6 +29,8 @@ const createMockConfig = ( getTargetDir: () => cwd, getFileSystemService: () => fileSystemService, getFileService: () => mockFileService, + getTruncateToolOutputThreshold: () => 2500, + getTruncateToolOutputLines: () => 500, } as unknown as Config; }; diff --git a/packages/core/src/utils/pathReader.ts b/packages/core/src/utils/pathReader.ts index bf60a1a1..37cbb629 100644 --- a/packages/core/src/utils/pathReader.ts +++ b/packages/core/src/utils/pathReader.ts @@ -83,11 +83,7 @@ export async function readPathFromWorkspace( for (const filePath of finalFiles) { const relativePathForDisplay = path.relative(absolutePath, filePath); allParts.push({ text: `--- ${relativePathForDisplay} ---\n` }); - const result = await processSingleFileContent( - filePath, - config.getTargetDir(), - config.getFileSystemService(), - ); + const result = await processSingleFileContent(filePath, config); allParts.push(result.llmContent); allParts.push({ text: '\n' }); // Add a newline for separation } @@ -108,11 +104,7 @@ export async function readPathFromWorkspace( } // It's a single file, process it directly. - const result = await processSingleFileContent( - absolutePath, - config.getTargetDir(), - config.getFileSystemService(), - ); + const result = await processSingleFileContent(absolutePath, config); return [result.llmContent]; } } From 5390f662fcabc8c702c06888a6ce7b9e31084e2d Mon Sep 17 00:00:00 2001 From: pomelo Date: Fri, 7 Nov 2025 17:28:37 +0800 Subject: [PATCH 4/8] fix: VSCode detection null check and debug message optimization (#983) --- packages/cli/src/ui/AppContainer.tsx | 7 ++++++- packages/core/src/ide/detect-ide.ts | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/packages/cli/src/ui/AppContainer.tsx b/packages/cli/src/ui/AppContainer.tsx index 059d1dc4..2e66610a 100644 --- a/packages/cli/src/ui/AppContainer.tsx +++ b/packages/cli/src/ui/AppContainer.tsx @@ -551,6 +551,11 @@ export const AppContainer = (props: AppContainerProps) => { [visionSwitchResolver], ); + // onDebugMessage should log to console, not update footer debugMessage + const onDebugMessage = useCallback((message: string) => { + console.debug(message); + }, []); + const performMemoryRefresh = useCallback(async () => { historyManager.addItem( { @@ -628,7 +633,7 @@ export const AppContainer = (props: AppContainerProps) => { historyManager.addItem, config, settings, - setDebugMessage, + onDebugMessage, handleSlashCommand, shellModeActive, () => settings.merged.general?.preferredEditor as EditorType, diff --git a/packages/core/src/ide/detect-ide.ts b/packages/core/src/ide/detect-ide.ts index 1f3e805d..c00d9a62 100644 --- a/packages/core/src/ide/detect-ide.ts +++ b/packages/core/src/ide/detect-ide.ts @@ -60,7 +60,10 @@ function verifyVSCode( if (ide.name !== IDE_DEFINITIONS.vscode.name) { return ide; } - if (ideProcessInfo.command.toLowerCase().includes('code')) { + if ( + ideProcessInfo.command && + ideProcessInfo.command.toLowerCase().includes('code') + ) { return IDE_DEFINITIONS.vscode; } return IDE_DEFINITIONS.vscodefork; From 7a472e4fcf8c4042aa6d71bb1ce67b7c5d66bb36 Mon Sep 17 00:00:00 2001 From: tanzhenxin Date: Fri, 7 Nov 2025 17:34:38 +0800 Subject: [PATCH 5/8] chore: pump version to 0.2.0 (#991) --- package-lock.json | 55 +++++++++------------- package.json | 4 +- packages/cli/package.json | 4 +- packages/core/package.json | 2 +- packages/test-utils/package.json | 2 +- packages/vscode-ide-companion/package.json | 2 +- 6 files changed, 30 insertions(+), 39 deletions(-) diff --git a/package-lock.json b/package-lock.json index 6f68eccf..98bdf73e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@qwen-code/qwen-code", - "version": "0.1.5", + "version": "0.2.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@qwen-code/qwen-code", - "version": "0.1.5", + "version": "0.2.0", "workspaces": [ "packages/*" ], @@ -555,7 +555,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -579,7 +578,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" } @@ -2120,7 +2118,6 @@ "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=8.0.0" } @@ -3282,7 +3279,6 @@ "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", "license": "MIT", - "peer": true, "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", @@ -3721,7 +3717,6 @@ "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "csstype": "^3.0.2" } @@ -3732,7 +3727,6 @@ "integrity": "sha512-4hOiT/dwO8Ko0gV1m/TJZYk3y0KBnY9vzDh7W+DH17b2HFSOGgdj33dhihPeuy3l0q23+4e+hoXHV6hCC4dCXw==", "dev": true, "license": "MIT", - "peer": true, "peerDependencies": { "@types/react": "^19.0.0" } @@ -3938,7 +3932,6 @@ "integrity": "sha512-6sMvZePQrnZH2/cJkwRpkT7DxoAWh+g6+GFRK6bV3YQo7ogi3SX5rgF6099r5Q53Ma5qeT7LGmOmuIutF4t3lA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.35.0", "@typescript-eslint/types": "8.35.0", @@ -4707,7 +4700,6 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -5062,7 +5054,8 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/array-includes": { "version": "3.1.9", @@ -6227,6 +6220,7 @@ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", "license": "MIT", + "peer": true, "dependencies": { "safe-buffer": "5.2.1" }, @@ -7260,7 +7254,6 @@ "integrity": "sha512-GsGizj2Y1rCWDu6XoEekL3RLilp0voSePurjZIkxL3wlm5o5EC9VpgaP7lrCvjnkuLvzFBQWB3vWB3K5KQTveQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", @@ -7730,6 +7723,7 @@ "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", "license": "MIT", + "peer": true, "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", @@ -7791,6 +7785,7 @@ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.6" } @@ -7800,6 +7795,7 @@ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "license": "MIT", + "peer": true, "dependencies": { "ms": "2.0.0" } @@ -7809,6 +7805,7 @@ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.8" } @@ -7975,6 +7972,7 @@ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "license": "MIT", + "peer": true, "dependencies": { "debug": "2.6.9", "encodeurl": "~2.0.0", @@ -7993,6 +7991,7 @@ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "license": "MIT", + "peer": true, "dependencies": { "ms": "2.0.0" } @@ -8001,13 +8000,15 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/finalhandler/node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.8" } @@ -9046,7 +9047,6 @@ "resolved": "https://registry.npmjs.org/ink/-/ink-6.2.3.tgz", "integrity": "sha512-fQkfEJjKbLXIcVWEE3MvpYSnwtbbmRsmeNDNz1pIuOFlwE+UF2gsy228J36OXKZGWJWZJKUigphBSqCNMcARtg==", "license": "MIT", - "peer": true, "dependencies": { "@alcalzone/ansi-tokenize": "^0.2.0", "ansi-escapes": "^7.0.0", @@ -10950,6 +10950,7 @@ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.6" } @@ -12157,7 +12158,8 @@ "version": "0.1.12", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/path-type": { "version": "3.0.0", @@ -12661,7 +12663,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } @@ -12672,7 +12673,6 @@ "integrity": "sha512-cq/o30z9W2Wb4rzBefjv5fBalHU0rJGZCHAkf/RHSBWSSYwh8PlQTqqOJmgIIbBtpj27T6FIPXeomIjZtCNVqA==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "shell-quote": "^1.6.1", "ws": "^7" @@ -12706,7 +12706,6 @@ "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "scheduler": "^0.26.0" }, @@ -14516,7 +14515,6 @@ "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -14690,8 +14688,7 @@ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "dev": true, - "license": "0BSD", - "peer": true + "license": "0BSD" }, "node_modules/tsx": { "version": "4.20.3", @@ -14699,7 +14696,6 @@ "integrity": "sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "~0.25.0", "get-tsconfig": "^4.7.5" @@ -14884,7 +14880,6 @@ "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -15154,6 +15149,7 @@ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.4.0" } @@ -15209,7 +15205,6 @@ "integrity": "sha512-ixXJB1YRgDIw2OszKQS9WxGHKwLdCsbQNkpJN171udl6szi/rIySHL6/Os3s2+oE4P/FLD4dxg4mD7Wust+u5g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.6", @@ -15323,7 +15318,6 @@ "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -15337,7 +15331,6 @@ "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@types/chai": "^5.2.2", "@vitest/expect": "3.2.4", @@ -16016,7 +16009,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } @@ -16032,7 +16024,7 @@ }, "packages/cli": { "name": "@qwen-code/qwen-code", - "version": "0.1.5", + "version": "0.2.0", "dependencies": { "@google/genai": "1.16.0", "@iarna/toml": "^2.2.5", @@ -16147,7 +16139,7 @@ }, "packages/core": { "name": "@qwen-code/qwen-code-core", - "version": "0.1.5", + "version": "0.2.0", "hasInstallScript": true, "dependencies": { "@google/genai": "1.16.0", @@ -16277,7 +16269,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -16287,7 +16278,7 @@ }, "packages/test-utils": { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.1.5", + "version": "0.2.0", "dev": true, "license": "Apache-2.0", "devDependencies": { @@ -16299,7 +16290,7 @@ }, "packages/vscode-ide-companion": { "name": "qwen-code-vscode-ide-companion", - "version": "0.1.5", + "version": "0.2.0", "license": "LICENSE", "dependencies": { "@modelcontextprotocol/sdk": "^1.15.1", diff --git a/package.json b/package.json index 0208d435..a681f83b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.1.5", + "version": "0.2.0", "engines": { "node": ">=20.0.0" }, @@ -13,7 +13,7 @@ "url": "git+https://github.com/QwenLM/qwen-code.git" }, "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.5" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.0" }, "scripts": { "start": "cross-env node scripts/start.js", diff --git a/packages/cli/package.json b/packages/cli/package.json index 3456a365..6bd719d1 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.1.5", + "version": "0.2.0", "description": "Qwen Code", "repository": { "type": "git", @@ -25,7 +25,7 @@ "dist" ], "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.5" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.0" }, "dependencies": { "@google/genai": "1.16.0", diff --git a/packages/core/package.json b/packages/core/package.json index 3b864724..e9e3b349 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-core", - "version": "0.1.5", + "version": "0.2.0", "description": "Qwen Code Core", "repository": { "type": "git", diff --git a/packages/test-utils/package.json b/packages/test-utils/package.json index 089b883c..9ea3aba4 100644 --- a/packages/test-utils/package.json +++ b/packages/test-utils/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.1.5", + "version": "0.2.0", "private": true, "main": "src/index.ts", "license": "Apache-2.0", diff --git a/packages/vscode-ide-companion/package.json b/packages/vscode-ide-companion/package.json index f76d4113..f5d40a73 100644 --- a/packages/vscode-ide-companion/package.json +++ b/packages/vscode-ide-companion/package.json @@ -2,7 +2,7 @@ "name": "qwen-code-vscode-ide-companion", "displayName": "Qwen Code Companion", "description": "Enable Qwen Code with direct access to your VS Code workspace.", - "version": "0.1.5", + "version": "0.2.0", "publisher": "qwenlm", "icon": "assets/icon.png", "repository": { From 3c01c7153bb523dca258418557ad3ce8a18d9e29 Mon Sep 17 00:00:00 2001 From: Mingholy Date: Fri, 7 Nov 2025 19:55:23 +0800 Subject: [PATCH 6/8] feat: enhance zed integration with TodoWriteTool and TaskTool support (#992) - Implemented detection and handling for TodoWriteTool to route updates as plan entries instead of tool call events. - Added sub-agent tool tracking for TaskTool, allowing for event emission and cleanup. - Updated event listeners to manage sub-agent tool calls and approval requests effectively. --- .../cli/src/zed-integration/zedIntegration.ts | 437 ++++++++++++++++-- packages/core/src/index.ts | 2 + packages/core/src/subagents/index.ts | 3 +- 3 files changed, 413 insertions(+), 29 deletions(-) diff --git a/packages/cli/src/zed-integration/zedIntegration.ts b/packages/cli/src/zed-integration/zedIntegration.ts index 49e73991..4a01ed7e 100644 --- a/packages/cli/src/zed-integration/zedIntegration.ts +++ b/packages/cli/src/zed-integration/zedIntegration.ts @@ -12,6 +12,12 @@ import type { GeminiChat, ToolCallConfirmationDetails, ToolResult, + SubAgentEventEmitter, + SubAgentToolCallEvent, + SubAgentToolResultEvent, + SubAgentApprovalRequestEvent, + AnyDeclarativeTool, + AnyToolInvocation, } from '@qwen-code/qwen-code-core'; import { AuthType, @@ -28,6 +34,10 @@ import { getErrorStatus, isWithinRoot, isNodeError, + SubAgentEventType, + TaskTool, + Kind, + TodoWriteTool, } from '@qwen-code/qwen-code-core'; import * as acp from './acp.js'; import { AcpFileSystemService } from './fileSystemService.js'; @@ -403,9 +413,34 @@ class Session { ); } + // Detect TodoWriteTool early - route to plan updates instead of tool_call events + const isTodoWriteTool = + fc.name === TodoWriteTool.Name || tool.name === TodoWriteTool.Name; + + // Declare subAgentToolEventListeners outside try block for cleanup in catch + let subAgentToolEventListeners: Array<() => void> = []; + try { const invocation = tool.build(args); + // Detect TaskTool and set up sub-agent tool tracking + const isTaskTool = tool.name === TaskTool.Name; + + if (isTaskTool && 'eventEmitter' in invocation) { + // Access eventEmitter from TaskTool invocation + const taskEventEmitter = ( + invocation as { + eventEmitter: SubAgentEventEmitter; + } + ).eventEmitter; + + // Set up sub-agent tool tracking + subAgentToolEventListeners = this.setupSubAgentToolTracking( + taskEventEmitter, + abortSignal, + ); + } + const confirmationDetails = await invocation.shouldConfirmExecute(abortSignal); @@ -460,7 +495,8 @@ class Session { throw new Error(`Unexpected: ${resultOutcome}`); } } - } else { + } else if (!isTodoWriteTool) { + // Skip tool_call event for TodoWriteTool await this.sendUpdate({ sessionUpdate: 'tool_call', toolCallId: callId, @@ -473,14 +509,61 @@ class Session { } const toolResult: ToolResult = await invocation.execute(abortSignal); - const content = toToolCallContent(toolResult); - await this.sendUpdate({ - sessionUpdate: 'tool_call_update', - toolCallId: callId, - status: 'completed', - content: content ? [content] : [], - }); + // Clean up event listeners + subAgentToolEventListeners.forEach((cleanup) => cleanup()); + + // Handle TodoWriteTool: extract todos and send plan update + if (isTodoWriteTool) { + // Extract todos from args (initial state) + let todos: Array<{ + id: string; + content: string; + status: 'pending' | 'in_progress' | 'completed'; + }> = []; + + if (Array.isArray(args['todos'])) { + todos = args['todos'] as Array<{ + id: string; + content: string; + status: 'pending' | 'in_progress' | 'completed'; + }>; + } + + // If returnDisplay has todos (e.g., modified by user), use those instead + if ( + toolResult.returnDisplay && + typeof toolResult.returnDisplay === 'object' && + 'type' in toolResult.returnDisplay && + toolResult.returnDisplay.type === 'todo_list' && + 'todos' in toolResult.returnDisplay && + Array.isArray(toolResult.returnDisplay.todos) + ) { + todos = toolResult.returnDisplay.todos; + } + + // Convert todos to plan entries and send plan update + if (todos.length > 0 || Array.isArray(args['todos'])) { + const planEntries = convertTodosToPlanEntries(todos); + await this.sendUpdate({ + sessionUpdate: 'plan', + entries: planEntries, + }); + } + + // Skip tool_call_update event for TodoWriteTool + // Still log and return function response for LLM + } else { + // Normal tool handling: send tool_call_update + const content = toToolCallContent(toolResult); + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], + }); + } const durationMs = Date.now() - startTime; logToolCall(this.config, { @@ -500,6 +583,9 @@ class Session { return convertToFunctionResponse(fc.name, callId, toolResult.llmContent); } catch (e) { + // Ensure cleanup on error + subAgentToolEventListeners.forEach((cleanup) => cleanup()); + const error = e instanceof Error ? e : new Error(String(e)); await this.sendUpdate({ @@ -515,6 +601,300 @@ class Session { } } + /** + * Sets up event listeners to track sub-agent tool calls within a TaskTool execution. + * Converts subagent tool call events into zedIntegration session updates. + * + * @param eventEmitter - The SubAgentEventEmitter from TaskTool + * @param abortSignal - Signal to abort tracking if parent is cancelled + * @returns Array of cleanup functions to remove event listeners + */ + private setupSubAgentToolTracking( + eventEmitter: SubAgentEventEmitter, + abortSignal: AbortSignal, + ): Array<() => void> { + const cleanupFunctions: Array<() => void> = []; + const toolRegistry = this.config.getToolRegistry(); + + // Track subagent tool call states + const subAgentToolStates = new Map< + string, + { + tool?: AnyDeclarativeTool; + invocation?: AnyToolInvocation; + args?: Record; + } + >(); + + // Listen for tool call start + const onToolCall = (...args: unknown[]) => { + const event = args[0] as SubAgentToolCallEvent; + if (abortSignal.aborted) return; + + const subAgentTool = toolRegistry.getTool(event.name); + let subAgentInvocation: AnyToolInvocation | undefined; + let toolKind: acp.ToolKind = 'other'; + let locations: acp.ToolCallLocation[] = []; + + if (subAgentTool) { + try { + subAgentInvocation = subAgentTool.build(event.args); + toolKind = this.mapToolKind(subAgentTool.kind); + locations = subAgentInvocation.toolLocations().map((loc) => ({ + path: loc.path, + line: loc.line ?? null, + })); + } catch (e) { + // If building fails, continue with defaults + console.warn(`Failed to build subagent tool ${event.name}:`, e); + } + } + + // Save state for subsequent updates + subAgentToolStates.set(event.callId, { + tool: subAgentTool, + invocation: subAgentInvocation, + args: event.args, + }); + + // Check if this is TodoWriteTool - if so, skip sending tool_call event + // Plan update will be sent in onToolResult when we have the final state + if (event.name === TodoWriteTool.Name) { + return; + } + + // Send tool call start update with rawInput + void this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: event.callId, + status: 'in_progress', + title: event.description || event.name, + content: [], + locations, + kind: toolKind, + rawInput: event.args, + }); + }; + + // Listen for tool call result + const onToolResult = (...args: unknown[]) => { + const event = args[0] as SubAgentToolResultEvent; + if (abortSignal.aborted) return; + + const state = subAgentToolStates.get(event.callId); + + // Check if this is TodoWriteTool - if so, route to plan updates + if (event.name === TodoWriteTool.Name) { + let todos: + | Array<{ + id: string; + content: string; + status: 'pending' | 'in_progress' | 'completed'; + }> + | undefined; + + // Try to extract todos from resultDisplay first (final state) + if (event.resultDisplay) { + try { + // resultDisplay might be a JSON stringified object + const parsed = + typeof event.resultDisplay === 'string' + ? JSON.parse(event.resultDisplay) + : event.resultDisplay; + + if ( + typeof parsed === 'object' && + parsed !== null && + 'type' in parsed && + parsed.type === 'todo_list' && + 'todos' in parsed && + Array.isArray(parsed.todos) + ) { + todos = parsed.todos; + } + } catch { + // If parsing fails, ignore - resultDisplay might not be JSON + } + } + + // Fallback to args if resultDisplay doesn't have todos + if (!todos && state?.args && Array.isArray(state.args['todos'])) { + todos = state.args['todos'] as Array<{ + id: string; + content: string; + status: 'pending' | 'in_progress' | 'completed'; + }>; + } + + // Send plan update if we have todos + if (todos) { + const planEntries = convertTodosToPlanEntries(todos); + void this.sendUpdate({ + sessionUpdate: 'plan', + entries: planEntries, + }); + } + + // Skip sending tool_call_update event for TodoWriteTool + // Clean up state + subAgentToolStates.delete(event.callId); + return; + } + + let content: acp.ToolCallContent[] = []; + + // If there's a result display, try to convert to ToolCallContent + if (event.resultDisplay && state?.invocation) { + // resultDisplay is typically a string + if (typeof event.resultDisplay === 'string') { + content = [ + { + type: 'content', + content: { + type: 'text', + text: event.resultDisplay, + }, + }, + ]; + } + } + + // Send tool call completion update + void this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: event.callId, + status: event.success ? 'completed' : 'failed', + content: content.length > 0 ? content : [], + title: state?.invocation?.getDescription() ?? event.name, + kind: state?.tool ? this.mapToolKind(state.tool.kind) : null, + locations: + state?.invocation?.toolLocations().map((loc) => ({ + path: loc.path, + line: loc.line ?? null, + })) ?? null, + rawInput: state?.args, + }); + + // Clean up state + subAgentToolStates.delete(event.callId); + }; + + // Listen for permission requests + const onToolWaitingApproval = async (...args: unknown[]) => { + const event = args[0] as SubAgentApprovalRequestEvent; + if (abortSignal.aborted) return; + + const state = subAgentToolStates.get(event.callId); + const content: acp.ToolCallContent[] = []; + + // Handle different confirmation types + if (event.confirmationDetails.type === 'edit') { + const editDetails = event.confirmationDetails as unknown as { + type: 'edit'; + fileName: string; + originalContent: string | null; + newContent: string; + }; + content.push({ + type: 'diff', + path: editDetails.fileName, + oldText: editDetails.originalContent ?? '', + newText: editDetails.newContent, + }); + } + + // Build permission request options from confirmation details + // event.confirmationDetails already contains all fields except onConfirm, + // which we add here to satisfy the type requirement for toPermissionOptions + const fullConfirmationDetails = { + ...event.confirmationDetails, + onConfirm: async () => { + // This is a placeholder - the actual response is handled via event.respond + }, + } as unknown as ToolCallConfirmationDetails; + + const params: acp.RequestPermissionRequest = { + sessionId: this.id, + options: toPermissionOptions(fullConfirmationDetails), + toolCall: { + toolCallId: event.callId, + status: 'pending', + title: event.description || event.name, + content, + locations: + state?.invocation?.toolLocations().map((loc) => ({ + path: loc.path, + line: loc.line ?? null, + })) ?? [], + kind: state?.tool ? this.mapToolKind(state.tool.kind) : 'other', + rawInput: state?.args, + }, + }; + + try { + // Request permission from zed client + const output = await this.client.requestPermission(params); + const outcome = + output.outcome.outcome === 'cancelled' + ? ToolConfirmationOutcome.Cancel + : z + .nativeEnum(ToolConfirmationOutcome) + .parse(output.outcome.optionId); + + // Respond to subagent with the outcome + await event.respond(outcome); + } catch (error) { + // If permission request fails, cancel the tool call + console.error( + `Permission request failed for subagent tool ${event.name}:`, + error, + ); + await event.respond(ToolConfirmationOutcome.Cancel); + } + }; + + // Register event listeners + eventEmitter.on(SubAgentEventType.TOOL_CALL, onToolCall); + eventEmitter.on(SubAgentEventType.TOOL_RESULT, onToolResult); + eventEmitter.on( + SubAgentEventType.TOOL_WAITING_APPROVAL, + onToolWaitingApproval, + ); + + // Return cleanup functions + cleanupFunctions.push(() => { + eventEmitter.off(SubAgentEventType.TOOL_CALL, onToolCall); + eventEmitter.off(SubAgentEventType.TOOL_RESULT, onToolResult); + eventEmitter.off( + SubAgentEventType.TOOL_WAITING_APPROVAL, + onToolWaitingApproval, + ); + }); + + return cleanupFunctions; + } + + /** + * Maps core Tool Kind enum to ACP ToolKind string literals. + * + * @param kind - The core Kind enum value + * @returns The corresponding ACP ToolKind string literal + */ + private mapToolKind(kind: Kind): acp.ToolKind { + const kindMap: Record = { + [Kind.Read]: 'read', + [Kind.Edit]: 'edit', + [Kind.Delete]: 'delete', + [Kind.Move]: 'move', + [Kind.Search]: 'search', + [Kind.Execute]: 'execute', + [Kind.Think]: 'think', + [Kind.Fetch]: 'fetch', + [Kind.Other]: 'other', + }; + return kindMap[kind] ?? 'other'; + } + async #resolvePrompt( message: acp.ContentBlock[], abortSignal: AbortSignal, @@ -859,6 +1239,27 @@ class Session { } } +/** + * Converts todo items to plan entries format for zed integration. + * Maps todo status to plan status and assigns a default priority. + * + * @param todos - Array of todo items with id, content, and status + * @returns Array of plan entries with content, priority, and status + */ +function convertTodosToPlanEntries( + todos: Array<{ + id: string; + content: string; + status: 'pending' | 'in_progress' | 'completed'; + }>, +): acp.PlanEntry[] { + return todos.map((todo) => ({ + content: todo.content, + priority: 'medium' as const, // Default priority since todos don't have priority + status: todo.status, + })); +} + function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { if (toolResult.error?.message) { throw new Error(toolResult.error.message); @@ -870,26 +1271,6 @@ function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { type: 'content', content: { type: 'text', text: toolResult.returnDisplay }, }; - } else if ( - 'type' in toolResult.returnDisplay && - toolResult.returnDisplay.type === 'todo_list' - ) { - // Handle TodoResultDisplay - convert to text representation - const todoText = toolResult.returnDisplay.todos - .map((todo) => { - const statusIcon = { - pending: '○', - in_progress: '◐', - completed: '●', - }[todo.status]; - return `${statusIcon} ${todo.content}`; - }) - .join('\n'); - - return { - type: 'content', - content: { type: 'text', text: todoText }, - }; } else if ( 'type' in toolResult.returnDisplay && toolResult.returnDisplay.type === 'plan_summary' diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index dd675380..883fb114 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -102,6 +102,8 @@ export * from './tools/web-search/index.js'; export * from './tools/read-many-files.js'; export * from './tools/mcp-client.js'; export * from './tools/mcp-tool.js'; +export * from './tools/task.js'; +export * from './tools/todoWrite.js'; // MCP OAuth export { MCPOAuthProvider } from './mcp/oauth-provider.js'; diff --git a/packages/core/src/subagents/index.ts b/packages/core/src/subagents/index.ts index 2b0468a9..5560b4fd 100644 --- a/packages/core/src/subagents/index.ts +++ b/packages/core/src/subagents/index.ts @@ -62,9 +62,10 @@ export type { SubAgentToolResultEvent, SubAgentFinishEvent, SubAgentErrorEvent, + SubAgentApprovalRequestEvent, } from './subagent-events.js'; -export { SubAgentEventEmitter } from './subagent-events.js'; +export { SubAgentEventEmitter, SubAgentEventType } from './subagent-events.js'; // Statistics and formatting export type { From 6aaac12d70e6cd589c75a6070290016ad13377c6 Mon Sep 17 00:00:00 2001 From: Matthieu Beaumont Date: Sat, 8 Nov 2025 14:54:43 +0100 Subject: [PATCH 7/8] fix(acp): replace EOL with newline for content splitting - Replace `EOL` from `node:os` with `\n` for consistent line splitting in ACP connection output processing - This ensures cross-platform compatibility since `EOL` is platform-specific while `\n` is universally used in text decoding - The change maintains the same behavior on all platforms by using standard newline characters --- packages/cli/src/zed-integration/acp.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/cli/src/zed-integration/acp.ts b/packages/cli/src/zed-integration/acp.ts index 74f97cc6..a260c61e 100644 --- a/packages/cli/src/zed-integration/acp.ts +++ b/packages/cli/src/zed-integration/acp.ts @@ -7,7 +7,6 @@ /* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ import { z } from 'zod'; -import { EOL } from 'node:os'; import * as schema from './schema.js'; export * from './schema.js'; @@ -173,7 +172,7 @@ class Connection { const decoder = new TextDecoder(); for await (const chunk of output) { content += decoder.decode(chunk, { stream: true }); - const lines = content.split(EOL); + const lines = content.split('\n'); content = lines.pop() || ''; for (const line of lines) { From 22edef0cb95d0c215db5d99bdca1d6d8c3767514 Mon Sep 17 00:00:00 2001 From: tanzhenxin Date: Mon, 10 Nov 2025 15:18:59 +0800 Subject: [PATCH 8/8] chore: pump version to 0.2.1 (#1005) --- package-lock.json | 12 ++++++------ package.json | 4 ++-- packages/cli/package.json | 4 ++-- packages/core/package.json | 2 +- packages/test-utils/package.json | 2 +- packages/vscode-ide-companion/package.json | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/package-lock.json b/package-lock.json index 98bdf73e..6cbcac5b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@qwen-code/qwen-code", - "version": "0.2.0", + "version": "0.2.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@qwen-code/qwen-code", - "version": "0.2.0", + "version": "0.2.1", "workspaces": [ "packages/*" ], @@ -16024,7 +16024,7 @@ }, "packages/cli": { "name": "@qwen-code/qwen-code", - "version": "0.2.0", + "version": "0.2.1", "dependencies": { "@google/genai": "1.16.0", "@iarna/toml": "^2.2.5", @@ -16139,7 +16139,7 @@ }, "packages/core": { "name": "@qwen-code/qwen-code-core", - "version": "0.2.0", + "version": "0.2.1", "hasInstallScript": true, "dependencies": { "@google/genai": "1.16.0", @@ -16278,7 +16278,7 @@ }, "packages/test-utils": { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.2.0", + "version": "0.2.1", "dev": true, "license": "Apache-2.0", "devDependencies": { @@ -16290,7 +16290,7 @@ }, "packages/vscode-ide-companion": { "name": "qwen-code-vscode-ide-companion", - "version": "0.2.0", + "version": "0.2.1", "license": "LICENSE", "dependencies": { "@modelcontextprotocol/sdk": "^1.15.1", diff --git a/package.json b/package.json index a681f83b..465eef63 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.2.0", + "version": "0.2.1", "engines": { "node": ">=20.0.0" }, @@ -13,7 +13,7 @@ "url": "git+https://github.com/QwenLM/qwen-code.git" }, "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.0" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.1" }, "scripts": { "start": "cross-env node scripts/start.js", diff --git a/packages/cli/package.json b/packages/cli/package.json index 6bd719d1..9924d7d1 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.2.0", + "version": "0.2.1", "description": "Qwen Code", "repository": { "type": "git", @@ -25,7 +25,7 @@ "dist" ], "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.0" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.1" }, "dependencies": { "@google/genai": "1.16.0", diff --git a/packages/core/package.json b/packages/core/package.json index e9e3b349..5b70b7db 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-core", - "version": "0.2.0", + "version": "0.2.1", "description": "Qwen Code Core", "repository": { "type": "git", diff --git a/packages/test-utils/package.json b/packages/test-utils/package.json index 9ea3aba4..90b9cb27 100644 --- a/packages/test-utils/package.json +++ b/packages/test-utils/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.2.0", + "version": "0.2.1", "private": true, "main": "src/index.ts", "license": "Apache-2.0", diff --git a/packages/vscode-ide-companion/package.json b/packages/vscode-ide-companion/package.json index f5d40a73..10f8df20 100644 --- a/packages/vscode-ide-companion/package.json +++ b/packages/vscode-ide-companion/package.json @@ -2,7 +2,7 @@ "name": "qwen-code-vscode-ide-companion", "displayName": "Qwen Code Companion", "description": "Enable Qwen Code with direct access to your VS Code workspace.", - "version": "0.2.0", + "version": "0.2.1", "publisher": "qwenlm", "icon": "assets/icon.png", "repository": {