Merge remote-tracking branch 'origin' into feature/stream-json-migration

This commit is contained in:
mingholy.lmh
2025-11-02 21:44:04 +08:00
36 changed files with 1765 additions and 758 deletions

View File

@@ -246,6 +246,14 @@ Settings are organized into categories. All settings should be placed within the
- It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse).
- **Default:** `undefined`
- **`tools.useRipgrep`** (boolean):
- **Description:** Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.
- **Default:** `true`
- **`tools.useBuiltinRipgrep`** (boolean):
- **Description:** Use the bundled ripgrep binary. When set to `false`, the system-level `rg` command will be used instead. This setting is only effective when `tools.useRipgrep` is `true`.
- **Default:** `true`
#### `mcp`
- **`mcp.serverCommand`** (string):

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.2",
"version": "0.1.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.1.2",
"version": "0.1.3",
"workspaces": [
"packages/*"
],
@@ -16024,7 +16024,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.1.2",
"version": "0.1.3",
"dependencies": {
"@google/genai": "1.16.0",
"@iarna/toml": "^2.2.5",
@@ -16139,7 +16139,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.1.2",
"version": "0.1.3",
"hasInstallScript": true,
"dependencies": {
"@google/genai": "1.16.0",
@@ -16278,7 +16278,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.1.2",
"version": "0.1.3",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -16290,7 +16290,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.1.2",
"version": "0.1.3",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.2",
"version": "0.1.3",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.2"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.3"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.2",
"version": "0.1.3",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -36,7 +36,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.2"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.3"
},
"dependencies": {
"@google/genai": "1.16.0",

View File

@@ -2470,6 +2470,73 @@ describe('loadCliConfig useRipgrep', () => {
});
});
describe('loadCliConfig useBuiltinRipgrep', () => {
const originalArgv = process.argv;
beforeEach(() => {
vi.resetAllMocks();
vi.mocked(os.homedir).mockReturnValue('/mock/home/user');
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
});
afterEach(() => {
process.argv = originalArgv;
vi.unstubAllEnvs();
vi.restoreAllMocks();
});
it('should be true by default when useBuiltinRipgrep is not set in settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments({} as Settings);
const settings: Settings = {};
const config = await loadCliConfig(
settings,
[],
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
'test-session',
argv,
);
expect(config.getUseBuiltinRipgrep()).toBe(true);
});
it('should be false when useBuiltinRipgrep is set to false in settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments({} as Settings);
const settings: Settings = { tools: { useBuiltinRipgrep: false } };
const config = await loadCliConfig(
settings,
[],
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
'test-session',
argv,
);
expect(config.getUseBuiltinRipgrep()).toBe(false);
});
it('should be true when useBuiltinRipgrep is explicitly set to true in settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments({} as Settings);
const settings: Settings = { tools: { useBuiltinRipgrep: true } };
const config = await loadCliConfig(
settings,
[],
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
'test-session',
argv,
);
expect(config.getUseBuiltinRipgrep()).toBe(true);
});
});
describe('screenReader configuration', () => {
const originalArgv = process.argv;

View File

@@ -217,7 +217,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
'proxy',
'Use the "proxy" setting in settings.json instead. This flag will be removed in a future version.',
)
.command('$0 [query..]', 'Launch Gemini CLI', (yargsInstance: Argv) =>
.command('$0 [query..]', 'Launch Qwen Code CLI', (yargsInstance: Argv) =>
yargsInstance
.positional('query', {
description:
@@ -817,6 +817,7 @@ export async function loadCliConfig(
interactive,
trustedFolder,
useRipgrep: settings.tools?.useRipgrep,
useBuiltinRipgrep: settings.tools?.useBuiltinRipgrep,
shouldUseNodePtyShell: settings.tools?.shell?.enableInteractiveShell,
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,

View File

@@ -66,6 +66,8 @@ import {
loadEnvironment,
migrateDeprecatedSettings,
SettingScope,
SETTINGS_VERSION,
SETTINGS_VERSION_KEY,
} from './settings.js';
import { FatalConfigError, QWEN_DIR } from '@qwen-code/qwen-code-core';
@@ -94,6 +96,7 @@ vi.mock('fs', async (importOriginal) => {
existsSync: vi.fn(),
readFileSync: vi.fn(),
writeFileSync: vi.fn(),
renameSync: vi.fn(),
mkdirSync: vi.fn(),
realpathSync: (p: string) => p,
};
@@ -171,11 +174,15 @@ describe('Settings Loading and Merging', () => {
getSystemSettingsPath(),
'utf-8',
);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.system.settings).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
});
@@ -207,10 +214,14 @@ describe('Settings Loading and Merging', () => {
expectedUserSettingsPath,
'utf-8',
);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.user.settings).toEqual({
...userSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
...userSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
});
@@ -241,9 +252,13 @@ describe('Settings Loading and Merging', () => {
'utf-8',
);
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.workspace.settings).toEqual({
...workspaceSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.merged).toEqual({
...workspaceSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
});
@@ -304,10 +319,20 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.system.settings).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.user.settings).toEqual({
...userSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.workspace.settings).toEqual({
...workspaceSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.merged).toEqual({
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
ui: {
theme: 'system-theme',
},
@@ -361,6 +386,7 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.merged).toEqual({
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
ui: {
theme: 'legacy-dark',
},
@@ -413,6 +439,132 @@ describe('Settings Loading and Merging', () => {
expect((settings.merged as TestSettings)['allowedTools']).toBeUndefined();
});
it('should add version field to migrated settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const legacySettingsContent = {
theme: 'dark',
model: 'qwen-coder',
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(legacySettingsContent);
return '{}';
},
);
loadSettings(MOCK_WORKSPACE_DIR);
// Verify that fs.writeFileSync was called with migrated settings including version
expect(fs.writeFileSync).toHaveBeenCalled();
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
const writtenContent = JSON.parse(writeCall[1] as string);
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
});
it('should not re-migrate settings that have version field', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const migratedSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
ui: {
theme: 'dark',
},
model: {
name: 'qwen-coder',
},
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(migratedSettingsContent);
return '{}';
},
);
loadSettings(MOCK_WORKSPACE_DIR);
// Verify that fs.renameSync and fs.writeFileSync were NOT called
// (because no migration was needed)
expect(fs.renameSync).not.toHaveBeenCalled();
expect(fs.writeFileSync).not.toHaveBeenCalled();
});
it('should add version field to V2 settings without version and write to disk', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
// V2 format but no version field
const v2SettingsWithoutVersion = {
ui: {
theme: 'dark',
},
model: {
name: 'qwen-coder',
},
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(v2SettingsWithoutVersion);
return '{}';
},
);
loadSettings(MOCK_WORKSPACE_DIR);
// Verify that fs.writeFileSync was called (to add version)
// but NOT fs.renameSync (no backup needed, just adding version)
expect(fs.renameSync).not.toHaveBeenCalled();
expect(fs.writeFileSync).toHaveBeenCalledTimes(1);
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
const writtenPath = writeCall[0];
const writtenContent = JSON.parse(writeCall[1] as string);
expect(writtenPath).toBe(USER_SETTINGS_PATH);
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
expect(writtenContent.ui?.theme).toBe('dark');
expect(writtenContent.model?.name).toBe('qwen-coder');
});
it('should correctly handle partially migrated settings without version field', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
// Edge case: model already in V2 format (object), but autoAccept in V1 format
const partiallyMigratedContent = {
model: {
name: 'qwen-coder',
},
autoAccept: false, // V1 key
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(partiallyMigratedContent);
return '{}';
},
);
loadSettings(MOCK_WORKSPACE_DIR);
// Verify that the migrated settings preserve the model object correctly
expect(fs.writeFileSync).toHaveBeenCalled();
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
const writtenContent = JSON.parse(writeCall[1] as string);
// Model should remain as an object, not double-nested
expect(writtenContent.model).toEqual({ name: 'qwen-coder' });
// autoAccept should be migrated to tools.autoAccept
expect(writtenContent.tools?.autoAccept).toBe(false);
// Version field should be added
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
});
it('should correctly merge and migrate legacy array properties from multiple scopes', () => {
(mockFsExistsSync as Mock).mockReturnValue(true);
const legacyUserSettings = {
@@ -515,11 +667,24 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.systemDefaults.settings).toEqual(systemDefaultsContent);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.systemDefaults.settings).toEqual({
...systemDefaultsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.system.settings).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.user.settings).toEqual({
...userSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.workspace.settings).toEqual({
...workspaceSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.merged).toEqual({
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
context: {
fileName: 'WORKSPACE_CONTEXT.md',
includeDirectories: [
@@ -866,8 +1031,14 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.user.settings).toEqual({
...userSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.workspace.settings).toEqual({
...workspaceSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.merged.mcpServers).toEqual({
'user-server': {
command: 'user-command',
@@ -1696,9 +1867,13 @@ describe('Settings Loading and Merging', () => {
'utf-8',
);
expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.system.settings).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
expect(settings.merged).toEqual({
...systemSettingsContent,
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
});
});
});
@@ -2248,6 +2423,44 @@ describe('Settings Loading and Merging', () => {
customWittyPhrases: ['test phrase'],
});
});
it('should remove version field when migrating to V1', () => {
const v2Settings = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
ui: {
theme: 'dark',
},
model: {
name: 'qwen-coder',
},
};
const v1Settings = migrateSettingsToV1(v2Settings);
// Version field should not be present in V1 settings
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
// Other fields should be properly migrated
expect(v1Settings).toEqual({
theme: 'dark',
model: 'qwen-coder',
});
});
it('should handle version field in unrecognized properties', () => {
const v2Settings = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
general: {
vimMode: true,
},
someUnrecognizedKey: 'value',
};
const v1Settings = migrateSettingsToV1(v2Settings);
// Version field should be filtered out
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
// Unrecognized keys should be preserved
expect(v1Settings['someUnrecognizedKey']).toBe('value');
expect(v1Settings['vimMode']).toBe(true);
});
});
describe('loadEnvironment', () => {
@@ -2368,6 +2581,73 @@ describe('Settings Loading and Merging', () => {
};
expect(needsMigration(settings)).toBe(false);
});
describe('with version field', () => {
it('should return false when version field indicates current or newer version', () => {
const settingsWithVersion = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
theme: 'dark', // Even though this is a V1 key, version field takes precedence
};
expect(needsMigration(settingsWithVersion)).toBe(false);
});
it('should return false when version field indicates a newer version', () => {
const settingsWithNewerVersion = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION + 1,
theme: 'dark',
};
expect(needsMigration(settingsWithNewerVersion)).toBe(false);
});
it('should return true when version field indicates an older version', () => {
const settingsWithOldVersion = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION - 1,
theme: 'dark',
};
expect(needsMigration(settingsWithOldVersion)).toBe(true);
});
it('should use fallback logic when version field is not a number', () => {
const settingsWithInvalidVersion = {
[SETTINGS_VERSION_KEY]: 'not-a-number',
theme: 'dark',
};
expect(needsMigration(settingsWithInvalidVersion)).toBe(true);
});
it('should use fallback logic when version field is missing', () => {
const settingsWithoutVersion = {
theme: 'dark',
};
expect(needsMigration(settingsWithoutVersion)).toBe(true);
});
});
describe('edge case: partially migrated settings', () => {
it('should return true for partially migrated settings without version field', () => {
// This simulates the dangerous edge case: model already in V2 format,
// but other fields in V1 format
const partiallyMigrated = {
model: {
name: 'qwen-coder',
},
autoAccept: false, // V1 key
};
expect(needsMigration(partiallyMigrated)).toBe(true);
});
it('should return false for partially migrated settings WITH version field', () => {
// With version field, we trust that it's been properly migrated
const partiallyMigratedWithVersion = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
model: {
name: 'qwen-coder',
},
autoAccept: false, // This would look like V1 but version says it's V2
};
expect(needsMigration(partiallyMigratedWithVersion)).toBe(false);
});
});
});
describe('migrateDeprecatedSettings', () => {

View File

@@ -56,6 +56,10 @@ export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
const MIGRATE_V2_OVERWRITE = true;
// Settings version to track migration state
export const SETTINGS_VERSION = 2;
export const SETTINGS_VERSION_KEY = '$version';
const MIGRATION_MAP: Record<string, string> = {
accessibility: 'ui.accessibility',
allowedTools: 'tools.allowed',
@@ -216,8 +220,16 @@ function setNestedProperty(
}
export function needsMigration(settings: Record<string, unknown>): boolean {
// A file needs migration if it contains any top-level key that is moved to a
// nested location in V2.
// Check version field first - if present and matches current version, no migration needed
if (SETTINGS_VERSION_KEY in settings) {
const version = settings[SETTINGS_VERSION_KEY];
if (typeof version === 'number' && version >= SETTINGS_VERSION) {
return false;
}
}
// Fallback to legacy detection: A file needs migration if it contains any
// top-level key that is moved to a nested location in V2.
const hasV1Keys = Object.entries(MIGRATION_MAP).some(([v1Key, v2Path]) => {
if (v1Key === v2Path || !(v1Key in settings)) {
return false;
@@ -250,6 +262,21 @@ function migrateSettingsToV2(
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (flatKeys.has(oldKey)) {
// Safety check: If this key is a V2 container (like 'model') and it's
// already an object, it's likely already in V2 format. Skip migration
// to prevent double-nesting (e.g., model.name.name).
if (
KNOWN_V2_CONTAINERS.has(oldKey) &&
typeof flatSettings[oldKey] === 'object' &&
flatSettings[oldKey] !== null &&
!Array.isArray(flatSettings[oldKey])
) {
// This is already a V2 container, carry it over as-is
v2Settings[oldKey] = flatSettings[oldKey];
flatKeys.delete(oldKey);
continue;
}
setNestedProperty(v2Settings, newPath, flatSettings[oldKey]);
flatKeys.delete(oldKey);
}
@@ -287,6 +314,9 @@ function migrateSettingsToV2(
}
}
// Set version field to indicate this is a V2 settings file
v2Settings[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
return v2Settings;
}
@@ -336,6 +366,11 @@ export function migrateSettingsToV1(
// Carry over any unrecognized keys
for (const remainingKey of v2Keys) {
// Skip the version field - it's only for V2 format
if (remainingKey === SETTINGS_VERSION_KEY) {
continue;
}
const value = v2Settings[remainingKey];
if (value === undefined) {
continue;
@@ -621,6 +656,22 @@ export function loadSettings(
}
settingsObject = migratedSettings;
}
} else if (!(SETTINGS_VERSION_KEY in settingsObject)) {
// No migration needed, but version field is missing - add it for future optimizations
settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
if (MIGRATE_V2_OVERWRITE) {
try {
fs.writeFileSync(
filePath,
JSON.stringify(settingsObject, null, 2),
'utf-8',
);
} catch (e) {
console.error(
`Error adding version to settings file: ${getErrorMessage(e)}`,
);
}
}
}
return { settings: settingsObject as Settings, rawJson: content };
}

View File

@@ -847,6 +847,16 @@ const SETTINGS_SCHEMA = {
'Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.',
showInDialog: true,
},
useBuiltinRipgrep: {
type: 'boolean',
label: 'Use Builtin Ripgrep',
category: 'Tools',
requiresRestart: false,
default: true,
description:
'Use the bundled ripgrep binary. When set to false, the system-level "rg" command will be used instead. This setting is only effective when useRipgrep is true.',
showInDialog: true,
},
enableToolOutputTruncation: {
type: 'boolean',
label: 'Enable Tool Output Truncation',

View File

@@ -389,7 +389,11 @@ export async function main() {
let input = config.getQuestion();
const startupWarnings = [
...(await getStartupWarnings()),
...(await getUserStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
];
// Render UI, passing necessary config values. Check that there is no command line question.

View File

@@ -27,7 +27,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
getDetectedIdeDisplayName: vi.fn().mockReturnValue('VSCode'),
}),
},
sessionId: 'test-session-id',
};
});
vi.mock('node:process', () => ({
@@ -59,6 +58,7 @@ describe('bugCommand', () => {
getModel: () => 'qwen3-coder-plus',
getBugCommand: () => undefined,
getIdeMode: () => true,
getSessionId: () => 'test-session-id',
},
settings: {
merged: {
@@ -102,6 +102,7 @@ describe('bugCommand', () => {
getModel: () => 'qwen3-coder-plus',
getBugCommand: () => ({ urlTemplate: customTemplate }),
getIdeMode: () => true,
getSessionId: () => 'test-session-id',
},
settings: {
merged: {
@@ -143,6 +144,7 @@ describe('bugCommand', () => {
getModel: () => 'qwen3-coder-plus',
getBugCommand: () => undefined,
getIdeMode: () => true,
getSessionId: () => 'test-session-id',
getContentGeneratorConfig: () => ({
baseUrl: 'https://api.openai.com/v1',
}),

View File

@@ -15,7 +15,7 @@ import { MessageType } from '../types.js';
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
import { formatMemoryUsage } from '../utils/formatters.js';
import { getCliVersion } from '../../utils/version.js';
import { IdeClient, sessionId, AuthType } from '@qwen-code/qwen-code-core';
import { IdeClient, AuthType } from '@qwen-code/qwen-code-core';
export const bugCommand: SlashCommand = {
name: 'bug',
@@ -48,7 +48,7 @@ export const bugCommand: SlashCommand = {
let info = `
* **CLI Version:** ${cliVersion}
* **Git Commit:** ${GIT_COMMIT_INFO}
* **Session ID:** ${sessionId}
* **Session ID:** ${config?.getSessionId() || 'unknown'}
* **Operating System:** ${osVersion}
* **Sandbox Environment:** ${sandboxEnv}
* **Auth Type:** ${selectedAuthType}`;

View File

@@ -23,7 +23,7 @@ export const ToolsList: React.FC<ToolsListProps> = ({
}) => (
<Box flexDirection="column" marginBottom={1}>
<Text bold color={theme.text.primary}>
Available Gemini CLI tools:
Available Qwen Code CLI tools:
</Text>
<Box height={1} />
{tools.length > 0 ? (

View File

@@ -1,7 +1,7 @@
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
"Available Gemini CLI tools:
"Available Qwen Code CLI tools:
- Test Tool One (test-tool-one)
This is the first test tool.
@@ -16,14 +16,14 @@ exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
`;
exports[`<ToolsList /> > renders correctly with no tools 1`] = `
"Available Gemini CLI tools:
"Available Qwen Code CLI tools:
No tools available
"
`;
exports[`<ToolsList /> > renders correctly without descriptions 1`] = `
"Available Gemini CLI tools:
"Available Qwen Code CLI tools:
- Test Tool One
- Test Tool Two

View File

@@ -22,12 +22,22 @@ vi.mock('os', async (importOriginal) => {
describe('getUserStartupWarnings', () => {
let testRootDir: string;
let homeDir: string;
let startupOptions: {
workspaceRoot: string;
useRipgrep: boolean;
useBuiltinRipgrep: boolean;
};
beforeEach(async () => {
testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'warnings-test-'));
homeDir = path.join(testRootDir, 'home');
await fs.mkdir(homeDir, { recursive: true });
vi.mocked(os.homedir).mockReturnValue(homeDir);
startupOptions = {
workspaceRoot: testRootDir,
useRipgrep: true,
useBuiltinRipgrep: true,
};
});
afterEach(async () => {
@@ -37,7 +47,10 @@ describe('getUserStartupWarnings', () => {
describe('home directory check', () => {
it('should return a warning when running in home directory', async () => {
const warnings = await getUserStartupWarnings(homeDir);
const warnings = await getUserStartupWarnings({
...startupOptions,
workspaceRoot: homeDir,
});
expect(warnings).toContainEqual(
expect.stringContaining('home directory'),
);
@@ -46,7 +59,10 @@ describe('getUserStartupWarnings', () => {
it('should not return a warning when running in a project directory', async () => {
const projectDir = path.join(testRootDir, 'project');
await fs.mkdir(projectDir);
const warnings = await getUserStartupWarnings(projectDir);
const warnings = await getUserStartupWarnings({
...startupOptions,
workspaceRoot: projectDir,
});
expect(warnings).not.toContainEqual(
expect.stringContaining('home directory'),
);
@@ -56,7 +72,10 @@ describe('getUserStartupWarnings', () => {
describe('root directory check', () => {
it('should return a warning when running in a root directory', async () => {
const rootDir = path.parse(testRootDir).root;
const warnings = await getUserStartupWarnings(rootDir);
const warnings = await getUserStartupWarnings({
...startupOptions,
workspaceRoot: rootDir,
});
expect(warnings).toContainEqual(
expect.stringContaining('root directory'),
);
@@ -68,7 +87,10 @@ describe('getUserStartupWarnings', () => {
it('should not return a warning when running in a non-root directory', async () => {
const projectDir = path.join(testRootDir, 'project');
await fs.mkdir(projectDir);
const warnings = await getUserStartupWarnings(projectDir);
const warnings = await getUserStartupWarnings({
...startupOptions,
workspaceRoot: projectDir,
});
expect(warnings).not.toContainEqual(
expect.stringContaining('root directory'),
);
@@ -78,7 +100,10 @@ describe('getUserStartupWarnings', () => {
describe('error handling', () => {
it('should handle errors when checking directory', async () => {
const nonExistentPath = path.join(testRootDir, 'non-existent');
const warnings = await getUserStartupWarnings(nonExistentPath);
const warnings = await getUserStartupWarnings({
...startupOptions,
workspaceRoot: nonExistentPath,
});
const expectedWarning =
'Could not verify the current directory due to a file system error.';
expect(warnings).toEqual([expectedWarning, expectedWarning]);

View File

@@ -7,19 +7,26 @@
import fs from 'node:fs/promises';
import * as os from 'node:os';
import path from 'node:path';
import { canUseRipgrep } from '@qwen-code/qwen-code-core';
type WarningCheckOptions = {
workspaceRoot: string;
useRipgrep: boolean;
useBuiltinRipgrep: boolean;
};
type WarningCheck = {
id: string;
check: (workspaceRoot: string) => Promise<string | null>;
check: (options: WarningCheckOptions) => Promise<string | null>;
};
// Individual warning checks
const homeDirectoryCheck: WarningCheck = {
id: 'home-directory',
check: async (workspaceRoot: string) => {
check: async (options: WarningCheckOptions) => {
try {
const [workspaceRealPath, homeRealPath] = await Promise.all([
fs.realpath(workspaceRoot),
fs.realpath(options.workspaceRoot),
fs.realpath(os.homedir()),
]);
@@ -35,9 +42,9 @@ const homeDirectoryCheck: WarningCheck = {
const rootDirectoryCheck: WarningCheck = {
id: 'root-directory',
check: async (workspaceRoot: string) => {
check: async (options: WarningCheckOptions) => {
try {
const workspaceRealPath = await fs.realpath(workspaceRoot);
const workspaceRealPath = await fs.realpath(options.workspaceRoot);
const errorMessage =
'Warning: You are running Qwen Code in the root directory. Your entire folder structure will be used for context. It is strongly recommended to run in a project-specific directory.';
@@ -53,17 +60,33 @@ const rootDirectoryCheck: WarningCheck = {
},
};
const ripgrepAvailabilityCheck: WarningCheck = {
id: 'ripgrep-availability',
check: async (options: WarningCheckOptions) => {
if (!options.useRipgrep) {
return null;
}
const isAvailable = await canUseRipgrep(options.useBuiltinRipgrep);
if (!isAvailable) {
return 'Ripgrep not available: Please install ripgrep globally to enable faster file content search. Falling back to built-in grep.';
}
return null;
},
};
// All warning checks
const WARNING_CHECKS: readonly WarningCheck[] = [
homeDirectoryCheck,
rootDirectoryCheck,
ripgrepAvailabilityCheck,
];
export async function getUserStartupWarnings(
workspaceRoot: string = process.cwd(),
options: WarningCheckOptions,
): Promise<string[]> {
const results = await Promise.all(
WARNING_CHECKS.map((check) => check.check(workspaceRoot)),
WARNING_CHECKS.map((check) => check.check(options)),
);
return results.filter((msg) => msg !== null);
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.1.2",
"version": "0.1.3",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -154,6 +154,11 @@ vi.mock('../core/tokenLimits.js', () => ({
describe('Server Config (config.ts)', () => {
const MODEL = 'qwen3-coder-plus';
// Default mock for canUseRipgrep to return true (tests that care about ripgrep will override this)
beforeEach(() => {
vi.mocked(canUseRipgrep).mockResolvedValue(true);
});
const SANDBOX: SandboxConfig = {
command: 'docker',
image: 'qwen-code-sandbox',
@@ -578,6 +583,40 @@ describe('Server Config (config.ts)', () => {
});
});
describe('UseBuiltinRipgrep Configuration', () => {
it('should default useBuiltinRipgrep to true when not provided', () => {
const config = new Config(baseParams);
expect(config.getUseBuiltinRipgrep()).toBe(true);
});
it('should set useBuiltinRipgrep to false when provided as false', () => {
const paramsWithBuiltinRipgrep: ConfigParameters = {
...baseParams,
useBuiltinRipgrep: false,
};
const config = new Config(paramsWithBuiltinRipgrep);
expect(config.getUseBuiltinRipgrep()).toBe(false);
});
it('should set useBuiltinRipgrep to true when explicitly provided as true', () => {
const paramsWithBuiltinRipgrep: ConfigParameters = {
...baseParams,
useBuiltinRipgrep: true,
};
const config = new Config(paramsWithBuiltinRipgrep);
expect(config.getUseBuiltinRipgrep()).toBe(true);
});
it('should default useBuiltinRipgrep to true when undefined', () => {
const paramsWithUndefinedBuiltinRipgrep: ConfigParameters = {
...baseParams,
useBuiltinRipgrep: undefined,
};
const config = new Config(paramsWithUndefinedBuiltinRipgrep);
expect(config.getUseBuiltinRipgrep()).toBe(true);
});
});
describe('createToolRegistry', () => {
it('should register a tool if coreTools contains an argument-specific pattern', async () => {
const params: ConfigParameters = {
@@ -825,10 +864,60 @@ describe('setApprovalMode with folder trust', () => {
expect(wasRipGrepRegistered).toBe(true);
expect(wasGrepRegistered).toBe(false);
expect(logRipgrepFallback).not.toHaveBeenCalled();
expect(canUseRipgrep).toHaveBeenCalledWith(true);
});
it('should register GrepTool as a fallback when useRipgrep is true but it is not available', async () => {
it('should register RipGrepTool with system ripgrep when useBuiltinRipgrep is false', async () => {
(canUseRipgrep as Mock).mockResolvedValue(true);
const config = new Config({
...baseParams,
useRipgrep: true,
useBuiltinRipgrep: false,
});
await config.initialize();
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
const wasRipGrepRegistered = calls.some(
(call) => call[0] instanceof vi.mocked(RipGrepTool),
);
const wasGrepRegistered = calls.some(
(call) => call[0] instanceof vi.mocked(GrepTool),
);
expect(wasRipGrepRegistered).toBe(true);
expect(wasGrepRegistered).toBe(false);
expect(canUseRipgrep).toHaveBeenCalledWith(false);
});
it('should fall back to GrepTool and log error when useBuiltinRipgrep is false but system ripgrep is not available', async () => {
(canUseRipgrep as Mock).mockResolvedValue(false);
const config = new Config({
...baseParams,
useRipgrep: true,
useBuiltinRipgrep: false,
});
await config.initialize();
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
const wasRipGrepRegistered = calls.some(
(call) => call[0] instanceof vi.mocked(RipGrepTool),
);
const wasGrepRegistered = calls.some(
(call) => call[0] instanceof vi.mocked(GrepTool),
);
expect(wasRipGrepRegistered).toBe(false);
expect(wasGrepRegistered).toBe(true);
expect(canUseRipgrep).toHaveBeenCalledWith(false);
expect(logRipgrepFallback).toHaveBeenCalledWith(
config,
expect.any(RipgrepFallbackEvent),
);
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
expect(event.error).toContain('Ripgrep is not available');
});
it('should fall back to GrepTool and log error when useRipgrep is true and builtin ripgrep is not available', async () => {
(canUseRipgrep as Mock).mockResolvedValue(false);
const config = new Config({ ...baseParams, useRipgrep: true });
await config.initialize();
@@ -843,15 +932,16 @@ describe('setApprovalMode with folder trust', () => {
expect(wasRipGrepRegistered).toBe(false);
expect(wasGrepRegistered).toBe(true);
expect(canUseRipgrep).toHaveBeenCalledWith(true);
expect(logRipgrepFallback).toHaveBeenCalledWith(
config,
expect.any(RipgrepFallbackEvent),
);
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
expect(event.error).toBeUndefined();
expect(event.error).toContain('Ripgrep is not available');
});
it('should register GrepTool as a fallback when canUseRipgrep throws an error', async () => {
it('should fall back to GrepTool and log error when canUseRipgrep throws an error', async () => {
const error = new Error('ripGrep check failed');
(canUseRipgrep as Mock).mockRejectedValue(error);
const config = new Config({ ...baseParams, useRipgrep: true });
@@ -890,7 +980,6 @@ describe('setApprovalMode with folder trust', () => {
expect(wasRipGrepRegistered).toBe(false);
expect(wasGrepRegistered).toBe(true);
expect(canUseRipgrep).not.toHaveBeenCalled();
expect(logRipgrepFallback).not.toHaveBeenCalled();
});
});
});

View File

@@ -268,6 +268,7 @@ export interface ConfigParameters {
interactive?: boolean;
trustedFolder?: boolean;
useRipgrep?: boolean;
useBuiltinRipgrep?: boolean;
shouldUseNodePtyShell?: boolean;
skipNextSpeakerCheck?: boolean;
shellExecutionConfig?: ShellExecutionConfig;
@@ -380,6 +381,7 @@ export class Config {
private readonly interactive: boolean;
private readonly trustedFolder: boolean | undefined;
private readonly useRipgrep: boolean;
private readonly useBuiltinRipgrep: boolean;
private readonly shouldUseNodePtyShell: boolean;
private readonly skipNextSpeakerCheck: boolean;
private shellExecutionConfig: ShellExecutionConfig;
@@ -482,13 +484,12 @@ export class Config {
this.chatCompression = params.chatCompression;
this.interactive = params.interactive ?? false;
this.trustedFolder = params.trustedFolder;
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
this.skipLoopDetection = params.skipLoopDetection ?? false;
// Web search
this.tavilyApiKey = params.tavilyApiKey;
this.useRipgrep = params.useRipgrep ?? true;
this.useBuiltinRipgrep = params.useBuiltinRipgrep ?? true;
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? true;
this.shellExecutionConfig = {
@@ -1023,6 +1024,10 @@ export class Config {
return this.useRipgrep;
}
getUseBuiltinRipgrep(): boolean {
return this.useBuiltinRipgrep;
}
getShouldUseNodePtyShell(): boolean {
return this.shouldUseNodePtyShell;
}
@@ -1148,13 +1153,18 @@ export class Config {
let useRipgrep = false;
let errorString: undefined | string = undefined;
try {
useRipgrep = await canUseRipgrep();
useRipgrep = await canUseRipgrep(this.getUseBuiltinRipgrep());
} catch (error: unknown) {
errorString = String(error);
}
if (useRipgrep) {
registerCoreTool(RipGrepTool, this);
} else {
errorString =
errorString ||
'Ripgrep is not available. Please install ripgrep globally.';
// Log for telemetry
logRipgrepFallback(this, new RipgrepFallbackEvent(errorString));
registerCoreTool(GrepTool, this);
}

View File

@@ -69,7 +69,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -288,7 +288,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -517,7 +517,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -731,7 +731,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -945,7 +945,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -1159,7 +1159,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -1373,7 +1373,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -1587,7 +1587,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -1801,7 +1801,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -2015,7 +2015,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -2252,7 +2252,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -2549,7 +2549,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -2786,7 +2786,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -3079,7 +3079,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
@@ -3293,7 +3293,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.

View File

@@ -16,11 +16,11 @@ import {
import type { Content, GenerateContentResponse, Part } from '@google/genai';
import {
findCompressSplitPoint,
isThinkingDefault,
isThinkingSupported,
GeminiClient,
} from './client.js';
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
import {
AuthType,
type ContentGenerator,
@@ -42,7 +42,6 @@ import { setSimulate429 } from '../utils/testUtils.js';
import { tokenLimit } from './tokenLimits.js';
import { ideContextStore } from '../ide/ideContext.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
import { QwenLogger } from '../telemetry/index.js';
// Mock fs module to prevent actual file system operations during tests
const mockFileSystem = new Map<string, string>();
@@ -101,6 +100,22 @@ vi.mock('../utils/errorReporting', () => ({ reportError: vi.fn() }));
vi.mock('../utils/nextSpeakerChecker', () => ({
checkNextSpeaker: vi.fn().mockResolvedValue(null),
}));
vi.mock('../utils/environmentContext', () => ({
getEnvironmentContext: vi
.fn()
.mockResolvedValue([{ text: 'Mocked env context' }]),
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
{
role: 'user',
parts: [{ text: 'Mocked env context' }],
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
...(extraHistory ?? []),
]),
}));
vi.mock('../utils/generateContentResponseUtilities', () => ({
getResponseText: (result: GenerateContentResponse) =>
result.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
@@ -136,6 +151,10 @@ vi.mock('../ide/ideContext.js');
vi.mock('../telemetry/uiTelemetry.js', () => ({
uiTelemetryService: mockUiTelemetryService,
}));
vi.mock('../telemetry/loggers.js', () => ({
logChatCompression: vi.fn(),
logNextSpeakerCheck: vi.fn(),
}));
/**
* Array.fromAsync ponyfill, which will be available in es 2024.
@@ -619,7 +638,8 @@ describe('Gemini Client (client.ts)', () => {
});
it('logs a telemetry event when compressing', async () => {
vi.spyOn(QwenLogger.prototype, 'logChatCompressionEvent');
const { logChatCompression } = await import('../telemetry/loggers.js');
vi.mocked(logChatCompression).mockClear();
const MOCKED_TOKEN_LIMIT = 1000;
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
@@ -627,19 +647,37 @@ describe('Gemini Client (client.ts)', () => {
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
});
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
// Need multiple history items so there's something to compress
const history = [
{ role: 'user', parts: [{ text: '...history 1...' }] },
{ role: 'model', parts: [{ text: '...history 2...' }] },
{ role: 'user', parts: [{ text: '...history 3...' }] },
{ role: 'model', parts: [{ text: '...history 4...' }] },
];
mockGetHistory.mockReturnValue(history);
// Token count needs to be ABOVE the threshold to trigger compression
const originalTokenCount =
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
originalTokenCount,
);
// We need to control the estimated new token count.
// We mock startChat to return a chat with a known history.
// Mock the summary response from the chat
const summaryText = 'This is a summary.';
mockGenerateContentFn.mockResolvedValue({
candidates: [
{
content: {
role: 'model',
parts: [{ text: summaryText }],
},
},
],
} as unknown as GenerateContentResponse);
// Mock startChat to complete the compression flow
const splitPoint = findCompressSplitPoint(history, 0.7);
const historyToKeep = history.slice(splitPoint);
const newCompressedHistory: Content[] = [
@@ -659,52 +697,36 @@ describe('Gemini Client (client.ts)', () => {
.fn()
.mockResolvedValue(mockNewChat as GeminiChat);
const totalChars = newCompressedHistory.reduce(
(total, content) => total + JSON.stringify(content).length,
0,
);
const newTokenCount = Math.floor(totalChars / 4);
// Mock the summary response from the chat
mockGenerateContentFn.mockResolvedValue({
candidates: [
{
content: {
role: 'model',
parts: [{ text: summaryText }],
},
},
],
} as unknown as GenerateContentResponse);
await client.tryCompressChat('prompt-id-3', false);
expect(QwenLogger.prototype.logChatCompressionEvent).toHaveBeenCalledWith(
expect(logChatCompression).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
tokens_before: originalTokenCount,
tokens_after: newTokenCount,
}),
);
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledWith(
newTokenCount,
);
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledTimes(
1,
);
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalled();
});
it('should trigger summarization if token count is at threshold with contextPercentageThreshold setting', async () => {
it('should trigger summarization if token count is above threshold with contextPercentageThreshold setting', async () => {
const MOCKED_TOKEN_LIMIT = 1000;
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
});
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
// Need multiple history items so there's something to compress
const history = [
{ role: 'user', parts: [{ text: '...history 1...' }] },
{ role: 'model', parts: [{ text: '...history 2...' }] },
{ role: 'user', parts: [{ text: '...history 3...' }] },
{ role: 'model', parts: [{ text: '...history 4...' }] },
];
mockGetHistory.mockReturnValue(history);
// Token count needs to be ABOVE the threshold to trigger compression
const originalTokenCount =
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
originalTokenCount,
@@ -864,7 +886,13 @@ describe('Gemini Client (client.ts)', () => {
});
it('should always trigger summarization when force is true, regardless of token count', async () => {
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
// Need multiple history items so there's something to compress
const history = [
{ role: 'user', parts: [{ text: '...history 1...' }] },
{ role: 'model', parts: [{ text: '...history 2...' }] },
{ role: 'user', parts: [{ text: '...history 3...' }] },
{ role: 'model', parts: [{ text: '...history 4...' }] },
];
mockGetHistory.mockReturnValue(history);
const originalTokenCount = 100; // Well below threshold, but > estimated new count

View File

@@ -25,13 +25,11 @@ import {
import type { ContentGenerator } from './contentGenerator.js';
import { GeminiChat } from './geminiChat.js';
import {
getCompressionPrompt,
getCoreSystemPrompt,
getCustomSystemPrompt,
getPlanModeSystemReminder,
getSubagentSystemReminder,
} from './prompts.js';
import { tokenLimit } from './tokenLimits.js';
import {
CompressionStatus,
GeminiEventType,
@@ -42,6 +40,11 @@ import {
// Services
import { type ChatRecordingService } from '../services/chatRecordingService.js';
import {
ChatCompressionService,
COMPRESSION_PRESERVE_THRESHOLD,
COMPRESSION_TOKEN_THRESHOLD,
} from '../services/chatCompressionService.js';
import { LoopDetectionService } from '../services/loopDetectionService.js';
// Tools
@@ -50,21 +53,18 @@ import { TaskTool } from '../tools/task.js';
// Telemetry
import {
NextSpeakerCheckEvent,
logChatCompression,
logNextSpeakerCheck,
makeChatCompressionEvent,
uiTelemetryService,
} from '../telemetry/index.js';
// Utilities
import {
getDirectoryContextString,
getEnvironmentContext,
getInitialChatHistory,
} from '../utils/environmentContext.js';
import { reportError } from '../utils/errorReporting.js';
import { getErrorMessage } from '../utils/errors.js';
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
import { flatMapTextParts, getResponseText } from '../utils/partUtils.js';
import { flatMapTextParts } from '../utils/partUtils.js';
import { retryWithBackoff } from '../utils/retry.js';
// IDE integration
@@ -85,68 +85,8 @@ export function isThinkingDefault(model: string) {
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
}
/**
* Returns the index of the oldest item to keep when compressing. May return
* contents.length which indicates that everything should be compressed.
*
* Exported for testing purposes.
*/
export function findCompressSplitPoint(
contents: Content[],
fraction: number,
): number {
if (fraction <= 0 || fraction >= 1) {
throw new Error('Fraction must be between 0 and 1');
}
const charCounts = contents.map((content) => JSON.stringify(content).length);
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
const targetCharCount = totalCharCount * fraction;
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
let cumulativeCharCount = 0;
for (let i = 0; i < contents.length; i++) {
const content = contents[i];
if (
content.role === 'user' &&
!content.parts?.some((part) => !!part.functionResponse)
) {
if (cumulativeCharCount >= targetCharCount) {
return i;
}
lastSplitPoint = i;
}
cumulativeCharCount += charCounts[i];
}
// We found no split points after targetCharCount.
// Check if it's safe to compress everything.
const lastContent = contents[contents.length - 1];
if (
lastContent?.role === 'model' &&
!lastContent?.parts?.some((part) => part.functionCall)
) {
return contents.length;
}
// Can't compress everything so just compress at last splitpoint.
return lastSplitPoint;
}
const MAX_TURNS = 100;
/**
* Threshold for compression token count as a fraction of the model's token limit.
* If the chat history exceeds this threshold, it will be compressed.
*/
const COMPRESSION_TOKEN_THRESHOLD = 0.7;
/**
* The fraction of the latest chat history to keep. A value of 0.3
* means that only the last 30% of the chat history will be kept after compression.
*/
const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
export class GeminiClient {
private chat?: GeminiChat;
private readonly generateContentConfig: GenerateContentConfig = {
@@ -243,23 +183,13 @@ export class GeminiClient {
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
this.forceFullIdeContext = true;
this.hasFailedCompressionAttempt = false;
const envParts = await getEnvironmentContext(this.config);
const toolRegistry = this.config.getToolRegistry();
const toolDeclarations = toolRegistry.getFunctionDeclarations();
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
const history: Content[] = [
{
role: 'user',
parts: envParts,
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
...(extraHistory ?? []),
];
const history = await getInitialChatHistory(this.config, extraHistory);
try {
const userMemory = this.config.getUserMemory();
const model = this.config.getModel();
@@ -503,14 +433,15 @@ export class GeminiClient {
userMemory,
this.config.getModel(),
);
const environment = await getEnvironmentContext(this.config);
const initialHistory = await getInitialChatHistory(this.config);
// Create a mock request content to count total tokens
const mockRequestContent = [
{
role: 'system' as const,
parts: [{ text: systemPrompt }, ...environment],
parts: [{ text: systemPrompt }],
},
...initialHistory,
...currentHistory,
];
@@ -732,127 +663,37 @@ export class GeminiClient {
prompt_id: string,
force: boolean = false,
): Promise<ChatCompressionInfo> {
const model = this.config.getModel();
const compressionService = new ChatCompressionService();
const curatedHistory = this.getChat().getHistory(true);
const { newHistory, info } = await compressionService.compress(
this.getChat(),
prompt_id,
force,
this.config.getModel(),
this.config,
this.hasFailedCompressionAttempt,
);
// Regardless of `force`, don't do anything if the history is empty.
if (
curatedHistory.length === 0 ||
(this.hasFailedCompressionAttempt && !force)
// Handle compression result
if (info.compressionStatus === CompressionStatus.COMPRESSED) {
// Success: update chat with new compressed history
if (newHistory) {
this.chat = await this.startChat(newHistory);
this.forceFullIdeContext = true;
}
} else if (
info.compressionStatus ===
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT ||
info.compressionStatus ===
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY
) {
return {
originalTokenCount: 0,
newTokenCount: 0,
compressionStatus: CompressionStatus.NOOP,
};
}
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
const contextPercentageThreshold =
this.config.getChatCompression()?.contextPercentageThreshold;
// Don't compress if not forced and we are under the limit.
if (!force) {
const threshold =
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
if (originalTokenCount < threshold * tokenLimit(model)) {
return {
originalTokenCount,
newTokenCount: originalTokenCount,
compressionStatus: CompressionStatus.NOOP,
};
// Track failed attempts (only mark as failed if not forced)
if (!force) {
this.hasFailedCompressionAttempt = true;
}
}
const splitPoint = findCompressSplitPoint(
curatedHistory,
1 - COMPRESSION_PRESERVE_THRESHOLD,
);
const historyToCompress = curatedHistory.slice(0, splitPoint);
const historyToKeep = curatedHistory.slice(splitPoint);
const summaryResponse = await this.config
.getContentGenerator()
.generateContent(
{
model,
contents: [
...historyToCompress,
{
role: 'user',
parts: [
{
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
},
],
},
],
config: {
systemInstruction: { text: getCompressionPrompt() },
},
},
prompt_id,
);
const summary = getResponseText(summaryResponse) ?? '';
const chat = await this.startChat([
{
role: 'user',
parts: [{ text: summary }],
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
...historyToKeep,
]);
this.forceFullIdeContext = true;
// Estimate token count 1 token ≈ 4 characters
const newTokenCount = Math.floor(
chat
.getHistory()
.reduce((total, content) => total + JSON.stringify(content).length, 0) /
4,
);
logChatCompression(
this.config,
makeChatCompressionEvent({
tokens_before: originalTokenCount,
tokens_after: newTokenCount,
}),
);
if (newTokenCount > originalTokenCount) {
this.hasFailedCompressionAttempt = !force && true;
return {
originalTokenCount,
newTokenCount,
compressionStatus:
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
};
} else {
this.chat = chat; // Chat compression successful, set new state.
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
}
logChatCompression(
this.config,
makeChatCompressionEvent({
tokens_before: originalTokenCount,
tokens_after: newTokenCount,
}),
);
return {
originalTokenCount,
newTokenCount,
compressionStatus: CompressionStatus.COMPRESSED,
};
return info;
}
}

View File

@@ -153,6 +153,9 @@ export enum CompressionStatus {
/** The compression failed due to an error counting tokens */
COMPRESSION_FAILED_TOKEN_COUNT_ERROR,
/** The compression failed due to receiving an empty or null summary */
COMPRESSION_FAILED_EMPTY_SUMMARY,
/** The compression was not necessary and no action was taken */
NOOP,
}

View File

@@ -48,6 +48,7 @@ export * from './utils/systemEncoding.js';
export * from './utils/textUtils.js';
export * from './utils/formatters.js';
export * from './utils/generateContentResponseUtilities.js';
export * from './utils/ripgrepUtils.js';
export * from './utils/filesearch/fileSearch.js';
export * from './utils/errorParsing.js';
export * from './utils/workspaceContext.js';

View File

@@ -0,0 +1,372 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
ChatCompressionService,
findCompressSplitPoint,
} from './chatCompressionService.js';
import type { Content, GenerateContentResponse } from '@google/genai';
import { CompressionStatus } from '../core/turn.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
import { tokenLimit } from '../core/tokenLimits.js';
import type { GeminiChat } from '../core/geminiChat.js';
import type { Config } from '../config/config.js';
import { getInitialChatHistory } from '../utils/environmentContext.js';
import type { ContentGenerator } from '../core/contentGenerator.js';
vi.mock('../telemetry/uiTelemetry.js');
vi.mock('../core/tokenLimits.js');
vi.mock('../telemetry/loggers.js');
vi.mock('../utils/environmentContext.js');
describe('findCompressSplitPoint', () => {
it('should throw an error for non-positive numbers', () => {
expect(() => findCompressSplitPoint([], 0)).toThrow(
'Fraction must be between 0 and 1',
);
});
it('should throw an error for a fraction greater than or equal to 1', () => {
expect(() => findCompressSplitPoint([], 1)).toThrow(
'Fraction must be between 0 and 1',
);
});
it('should handle an empty history', () => {
expect(findCompressSplitPoint([], 0.5)).toBe(0);
});
it('should handle a fraction in the middle', () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
];
expect(findCompressSplitPoint(history, 0.5)).toBe(4);
});
it('should handle a fraction of last index', () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
];
expect(findCompressSplitPoint(history, 0.9)).toBe(4);
});
it('should handle a fraction of after last index', () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (24%)
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (50%)
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (74%)
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (100%)
];
expect(findCompressSplitPoint(history, 0.8)).toBe(4);
});
it('should return earlier splitpoint if no valid ones are after threshhold', () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'This is the first message.' }] },
{ role: 'model', parts: [{ text: 'This is the second message.' }] },
{ role: 'user', parts: [{ text: 'This is the third message.' }] },
{ role: 'model', parts: [{ functionCall: { name: 'foo', args: {} } }] },
];
// Can't return 4 because the previous item has a function call.
expect(findCompressSplitPoint(history, 0.99)).toBe(2);
});
it('should handle a history with only one item', () => {
const historyWithEmptyParts: Content[] = [
{ role: 'user', parts: [{ text: 'Message 1' }] },
];
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(0);
});
it('should handle history with weird parts', () => {
const historyWithEmptyParts: Content[] = [
{ role: 'user', parts: [{ text: 'Message 1' }] },
{
role: 'model',
parts: [{ fileData: { fileUri: 'derp', mimeType: 'text/plain' } }],
},
{ role: 'user', parts: [{ text: 'Message 2' }] },
];
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(2);
});
});
describe('ChatCompressionService', () => {
let service: ChatCompressionService;
let mockChat: GeminiChat;
let mockConfig: Config;
const mockModel = 'gemini-pro';
const mockPromptId = 'test-prompt-id';
beforeEach(() => {
service = new ChatCompressionService();
mockChat = {
getHistory: vi.fn(),
} as unknown as GeminiChat;
mockConfig = {
getChatCompression: vi.fn(),
getContentGenerator: vi.fn(),
} as unknown as Config;
vi.mocked(tokenLimit).mockReturnValue(1000);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(500);
vi.mocked(getInitialChatHistory).mockImplementation(
async (_config, extraHistory) => extraHistory || [],
);
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should return NOOP if history is empty', async () => {
vi.mocked(mockChat.getHistory).mockReturnValue([]);
const result = await service.compress(
mockChat,
mockPromptId,
false,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
expect(result.newHistory).toBeNull();
});
it('should return NOOP if previously failed and not forced', async () => {
vi.mocked(mockChat.getHistory).mockReturnValue([
{ role: 'user', parts: [{ text: 'hi' }] },
]);
const result = await service.compress(
mockChat,
mockPromptId,
false,
mockModel,
mockConfig,
true,
);
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
expect(result.newHistory).toBeNull();
});
it('should return NOOP if under token threshold and not forced', async () => {
vi.mocked(mockChat.getHistory).mockReturnValue([
{ role: 'user', parts: [{ text: 'hi' }] },
]);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(600);
vi.mocked(tokenLimit).mockReturnValue(1000);
// Threshold is 0.7 * 1000 = 700. 600 < 700, so NOOP.
const result = await service.compress(
mockChat,
mockPromptId,
false,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
expect(result.newHistory).toBeNull();
});
it('should compress if over token threshold', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
{ role: 'user', parts: [{ text: 'msg3' }] },
{ role: 'model', parts: [{ text: 'msg4' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
{
content: {
parts: [{ text: 'Summary' }],
},
},
],
} as unknown as GenerateContentResponse);
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
false,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
expect(result.newHistory).not.toBeNull();
expect(result.newHistory![0].parts![0].text).toBe('Summary');
expect(mockGenerateContent).toHaveBeenCalled();
});
it('should force compress even if under threshold', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
{ role: 'user', parts: [{ text: 'msg3' }] },
{ role: 'model', parts: [{ text: 'msg4' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
{
content: {
parts: [{ text: 'Summary' }],
},
},
],
} as unknown as GenerateContentResponse);
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
true, // forced
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
expect(result.newHistory).not.toBeNull();
});
it('should return FAILED if new token count is inflated', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(10);
vi.mocked(tokenLimit).mockReturnValue(1000);
const longSummary = 'a'.repeat(1000); // Long summary to inflate token count
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
{
content: {
parts: [{ text: longSummary }],
},
},
],
} as unknown as GenerateContentResponse);
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
true,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
);
expect(result.newHistory).toBeNull();
});
it('should return FAILED if summary is empty string', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
{
content: {
parts: [{ text: '' }], // Empty summary
},
},
],
} as unknown as GenerateContentResponse);
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
true,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
);
expect(result.newHistory).toBeNull();
expect(result.info.originalTokenCount).toBe(100);
expect(result.info.newTokenCount).toBe(100);
});
it('should return FAILED if summary is only whitespace', async () => {
const history: Content[] = [
{ role: 'user', parts: [{ text: 'msg1' }] },
{ role: 'model', parts: [{ text: 'msg2' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
{
content: {
parts: [{ text: ' \n\t ' }], // Only whitespace
},
},
],
} as unknown as GenerateContentResponse);
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
generateContent: mockGenerateContent,
} as unknown as ContentGenerator);
const result = await service.compress(
mockChat,
mockPromptId,
true,
mockModel,
mockConfig,
false,
);
expect(result.info.compressionStatus).toBe(
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
);
expect(result.newHistory).toBeNull();
});
});

View File

@@ -0,0 +1,235 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { Content } from '@google/genai';
import type { Config } from '../config/config.js';
import type { GeminiChat } from '../core/geminiChat.js';
import { type ChatCompressionInfo, CompressionStatus } from '../core/turn.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
import { tokenLimit } from '../core/tokenLimits.js';
import { getCompressionPrompt } from '../core/prompts.js';
import { getResponseText } from '../utils/partUtils.js';
import { logChatCompression } from '../telemetry/loggers.js';
import { makeChatCompressionEvent } from '../telemetry/types.js';
import { getInitialChatHistory } from '../utils/environmentContext.js';
/**
* Threshold for compression token count as a fraction of the model's token limit.
* If the chat history exceeds this threshold, it will be compressed.
*/
export const COMPRESSION_TOKEN_THRESHOLD = 0.7;
/**
* The fraction of the latest chat history to keep. A value of 0.3
* means that only the last 30% of the chat history will be kept after compression.
*/
export const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
/**
* Returns the index of the oldest item to keep when compressing. May return
* contents.length which indicates that everything should be compressed.
*
* Exported for testing purposes.
*/
export function findCompressSplitPoint(
contents: Content[],
fraction: number,
): number {
if (fraction <= 0 || fraction >= 1) {
throw new Error('Fraction must be between 0 and 1');
}
const charCounts = contents.map((content) => JSON.stringify(content).length);
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
const targetCharCount = totalCharCount * fraction;
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
let cumulativeCharCount = 0;
for (let i = 0; i < contents.length; i++) {
const content = contents[i];
if (
content.role === 'user' &&
!content.parts?.some((part) => !!part.functionResponse)
) {
if (cumulativeCharCount >= targetCharCount) {
return i;
}
lastSplitPoint = i;
}
cumulativeCharCount += charCounts[i];
}
// We found no split points after targetCharCount.
// Check if it's safe to compress everything.
const lastContent = contents[contents.length - 1];
if (
lastContent?.role === 'model' &&
!lastContent?.parts?.some((part) => part.functionCall)
) {
return contents.length;
}
// Can't compress everything so just compress at last splitpoint.
return lastSplitPoint;
}
export class ChatCompressionService {
async compress(
chat: GeminiChat,
promptId: string,
force: boolean,
model: string,
config: Config,
hasFailedCompressionAttempt: boolean,
): Promise<{ newHistory: Content[] | null; info: ChatCompressionInfo }> {
const curatedHistory = chat.getHistory(true);
// Regardless of `force`, don't do anything if the history is empty.
if (
curatedHistory.length === 0 ||
(hasFailedCompressionAttempt && !force)
) {
return {
newHistory: null,
info: {
originalTokenCount: 0,
newTokenCount: 0,
compressionStatus: CompressionStatus.NOOP,
},
};
}
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
const contextPercentageThreshold =
config.getChatCompression()?.contextPercentageThreshold;
// Don't compress if not forced and we are under the limit.
if (!force) {
const threshold =
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
if (originalTokenCount < threshold * tokenLimit(model)) {
return {
newHistory: null,
info: {
originalTokenCount,
newTokenCount: originalTokenCount,
compressionStatus: CompressionStatus.NOOP,
},
};
}
}
const splitPoint = findCompressSplitPoint(
curatedHistory,
1 - COMPRESSION_PRESERVE_THRESHOLD,
);
const historyToCompress = curatedHistory.slice(0, splitPoint);
const historyToKeep = curatedHistory.slice(splitPoint);
if (historyToCompress.length === 0) {
return {
newHistory: null,
info: {
originalTokenCount,
newTokenCount: originalTokenCount,
compressionStatus: CompressionStatus.NOOP,
},
};
}
const summaryResponse = await config.getContentGenerator().generateContent(
{
model,
contents: [
...historyToCompress,
{
role: 'user',
parts: [
{
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
},
],
},
],
config: {
systemInstruction: getCompressionPrompt(),
},
},
promptId,
);
const summary = getResponseText(summaryResponse) ?? '';
const isSummaryEmpty = !summary || summary.trim().length === 0;
let newTokenCount = originalTokenCount;
let extraHistory: Content[] = [];
if (!isSummaryEmpty) {
extraHistory = [
{
role: 'user',
parts: [{ text: summary }],
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the additional context!' }],
},
...historyToKeep,
];
// Use a shared utility to construct the initial history for an accurate token count.
const fullNewHistory = await getInitialChatHistory(config, extraHistory);
// Estimate token count 1 token ≈ 4 characters
newTokenCount = Math.floor(
fullNewHistory.reduce(
(total, content) => total + JSON.stringify(content).length,
0,
) / 4,
);
}
logChatCompression(
config,
makeChatCompressionEvent({
tokens_before: originalTokenCount,
tokens_after: newTokenCount,
}),
);
if (isSummaryEmpty) {
return {
newHistory: null,
info: {
originalTokenCount,
newTokenCount: originalTokenCount,
compressionStatus: CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
},
};
} else if (newTokenCount > originalTokenCount) {
return {
newHistory: null,
info: {
originalTokenCount,
newTokenCount,
compressionStatus:
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
},
};
} else {
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
return {
newHistory: extraHistory,
info: {
originalTokenCount,
newTokenCount,
compressionStatus: CompressionStatus.COMPRESSED,
},
};
}
}
}

View File

@@ -32,7 +32,6 @@ import { GeminiChat } from '../core/geminiChat.js';
import { executeToolCall } from '../core/nonInteractiveToolExecutor.js';
import type { ToolRegistry } from '../tools/tool-registry.js';
import { type AnyDeclarativeTool } from '../tools/tools.js';
import { getEnvironmentContext } from '../utils/environmentContext.js';
import { ContextState, SubAgentScope } from './subagent.js';
import type {
ModelConfig,
@@ -44,7 +43,20 @@ import { SubagentTerminateMode } from './types.js';
vi.mock('../core/geminiChat.js');
vi.mock('../core/contentGenerator.js');
vi.mock('../utils/environmentContext.js');
vi.mock('../utils/environmentContext.js', () => ({
getEnvironmentContext: vi.fn().mockResolvedValue([{ text: 'Env Context' }]),
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
{
role: 'user',
parts: [{ text: 'Env Context' }],
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
...(extraHistory ?? []),
]),
}));
vi.mock('../core/nonInteractiveToolExecutor.js');
vi.mock('../ide/ide-client.js');
vi.mock('../core/client.js');
@@ -174,9 +186,6 @@ describe('subagent.ts', () => {
beforeEach(async () => {
vi.clearAllMocks();
vi.mocked(getEnvironmentContext).mockResolvedValue([
{ text: 'Env Context' },
]);
vi.mocked(createContentGenerator).mockResolvedValue({
getGenerativeModel: vi.fn(),
// eslint-disable-next-line @typescript-eslint/no-explicit-any

View File

@@ -16,7 +16,7 @@ import type {
ToolConfirmationOutcome,
ToolCallConfirmationDetails,
} from '../tools/tools.js';
import { getEnvironmentContext } from '../utils/environmentContext.js';
import { getInitialChatHistory } from '../utils/environmentContext.js';
import type {
Content,
Part,
@@ -807,11 +807,7 @@ export class SubAgentScope {
);
}
const envParts = await getEnvironmentContext(this.runtimeContext);
const envHistory: Content[] = [
{ role: 'user', parts: envParts },
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
];
const envHistory = await getInitialChatHistory(this.runtimeContext);
const start_history = [
...envHistory,

View File

@@ -23,6 +23,7 @@ import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.j
import type { ChildProcess } from 'node:child_process';
import { spawn } from 'node:child_process';
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
// Mock ripgrepUtils
vi.mock('../utils/ripgrepUtils.js', () => ({
@@ -42,11 +43,17 @@ function createMockSpawn(
outputData?: string;
exitCode?: number;
signal?: string;
onCall?: (
command: string,
args: readonly string[],
spawnOptions?: unknown,
) => void;
} = {},
) {
const { outputData, exitCode = 0, signal } = options;
const { outputData, exitCode = 0, signal, onCall } = options;
return () => {
return (command: string, args: readonly string[], spawnOptions?: unknown) => {
onCall?.(command, args, spawnOptions);
const mockProcess = {
stdout: {
on: vi.fn(),
@@ -87,19 +94,29 @@ function createMockSpawn(
describe('RipGrepTool', () => {
let tempRootDir: string;
let grepTool: RipGrepTool;
let fileExclusionsMock: { getGlobExcludes: () => string[] };
const abortSignal = new AbortController().signal;
const mockConfig = {
getTargetDir: () => tempRootDir,
getWorkspaceContext: () => createMockWorkspaceContext(tempRootDir),
getWorkingDir: () => tempRootDir,
getDebugMode: () => false,
getUseBuiltinRipgrep: () => true,
} as unknown as Config;
beforeEach(async () => {
vi.clearAllMocks();
(ensureRipgrepPath as Mock).mockResolvedValue('/mock/path/to/rg');
mockSpawn.mockClear();
mockSpawn.mockReset();
tempRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'grep-tool-root-'));
fileExclusionsMock = {
getGlobExcludes: vi.fn().mockReturnValue([]),
};
Object.assign(mockConfig, {
getFileExclusions: () => fileExclusionsMock,
getFileFilteringOptions: () => DEFAULT_FILE_FILTERING_OPTIONS,
});
grepTool = new RipGrepTool(mockConfig);
// Create some test files and directories
@@ -137,11 +154,11 @@ describe('RipGrepTool', () => {
expect(grepTool.validateToolParams(params)).toBeNull();
});
it('should return null for valid params (pattern, path, and include)', () => {
it('should return null for valid params (pattern, path, and glob)', () => {
const params: RipGrepToolParams = {
pattern: 'hello',
path: '.',
include: '*.txt',
glob: '*.txt',
};
expect(grepTool.validateToolParams(params)).toBeNull();
});
@@ -153,9 +170,11 @@ describe('RipGrepTool', () => {
);
});
it('should return null for what would be an invalid regex pattern', () => {
it('should surface an error for invalid regex pattern', () => {
const params: RipGrepToolParams = { pattern: '[[' };
expect(grepTool.validateToolParams(params)).toBeNull();
expect(grepTool.validateToolParams(params)).toContain(
'Invalid regular expression pattern: [[',
);
});
it('should return error if path does not exist', () => {
@@ -194,13 +213,11 @@ describe('RipGrepTool', () => {
expect(result.llmContent).toContain(
'Found 3 matches for pattern "world" in the workspace directory',
);
expect(result.llmContent).toContain('File: fileA.txt');
expect(result.llmContent).toContain('L1: hello world');
expect(result.llmContent).toContain('L2: second line with world');
expect(result.llmContent).toContain('fileA.txt:1:hello world');
expect(result.llmContent).toContain('fileA.txt:2:second line with world');
expect(result.llmContent).toContain(
`File: ${path.join('sub', 'fileC.txt')}`,
'sub/fileC.txt:1:another world in sub dir',
);
expect(result.llmContent).toContain('L1: another world in sub dir');
expect(result.returnDisplay).toBe('Found 3 matches');
});
@@ -219,12 +236,33 @@ describe('RipGrepTool', () => {
expect(result.llmContent).toContain(
'Found 1 match for pattern "world" in path "sub"',
);
expect(result.llmContent).toContain('File: fileC.txt'); // Path relative to 'sub'
expect(result.llmContent).toContain('L1: another world in sub dir');
expect(result.llmContent).toContain(
'fileC.txt:1:another world in sub dir',
);
expect(result.returnDisplay).toBe('Found 1 match');
});
it('should find matches with an include glob', async () => {
it('should use target directory when path is not provided', async () => {
mockSpawn.mockImplementationOnce(
createMockSpawn({
outputData: `fileA.txt:1:hello world${EOL}`,
exitCode: 0,
onCall: (_, args) => {
// Should search in the target directory (tempRootDir)
expect(args[args.length - 1]).toBe(tempRootDir);
},
}),
);
const params: RipGrepToolParams = { pattern: 'world' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'Found 1 match for pattern "world" in the workspace directory',
);
});
it('should find matches with a glob filter', async () => {
// Setup specific mock for this test
mockSpawn.mockImplementationOnce(
createMockSpawn({
@@ -233,20 +271,19 @@ describe('RipGrepTool', () => {
}),
);
const params: RipGrepToolParams = { pattern: 'hello', include: '*.js' };
const params: RipGrepToolParams = { pattern: 'hello', glob: '*.js' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'Found 1 match for pattern "hello" in the workspace directory (filter: "*.js"):',
);
expect(result.llmContent).toContain('File: fileB.js');
expect(result.llmContent).toContain(
'L2: function baz() { return "hello"; }',
'fileB.js:2:function baz() { return "hello"; }',
);
expect(result.returnDisplay).toBe('Found 1 match');
});
it('should find matches with an include glob and path', async () => {
it('should find matches with a glob filter and path', async () => {
await fs.writeFile(
path.join(tempRootDir, 'sub', 'another.js'),
'const greeting = "hello";',
@@ -291,18 +328,115 @@ describe('RipGrepTool', () => {
const params: RipGrepToolParams = {
pattern: 'hello',
path: 'sub',
include: '*.js',
glob: '*.js',
};
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'Found 1 match for pattern "hello" in path "sub" (filter: "*.js")',
);
expect(result.llmContent).toContain('File: another.js');
expect(result.llmContent).toContain('L1: const greeting = "hello";');
expect(result.llmContent).toContain(
'another.js:1:const greeting = "hello";',
);
expect(result.returnDisplay).toBe('Found 1 match');
});
it('should pass .qwenignore to ripgrep when respected', async () => {
await fs.writeFile(
path.join(tempRootDir, '.qwenignore'),
'ignored.txt\n',
);
mockSpawn.mockImplementationOnce(
createMockSpawn({
exitCode: 1,
onCall: (_, args) => {
expect(args).toContain('--ignore-file');
expect(args).toContain(path.join(tempRootDir, '.qwenignore'));
},
}),
);
const params: RipGrepToolParams = { pattern: 'secret' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'No matches found for pattern "secret" in the workspace directory.',
);
expect(result.returnDisplay).toBe('No matches found');
});
it('should include .qwenignore matches when disabled in config', async () => {
await fs.writeFile(path.join(tempRootDir, '.qwenignore'), 'kept.txt\n');
await fs.writeFile(path.join(tempRootDir, 'kept.txt'), 'keep me');
Object.assign(mockConfig, {
getFileFilteringOptions: () => ({
respectGitIgnore: true,
respectQwenIgnore: false,
}),
});
mockSpawn.mockImplementationOnce(
createMockSpawn({
outputData: `kept.txt:1:keep me${EOL}`,
exitCode: 0,
onCall: (_, args) => {
expect(args).not.toContain('--ignore-file');
expect(args).not.toContain(path.join(tempRootDir, '.qwenignore'));
},
}),
);
const params: RipGrepToolParams = { pattern: 'keep' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain(
'Found 1 match for pattern "keep" in the workspace directory:',
);
expect(result.llmContent).toContain('kept.txt:1:keep me');
expect(result.returnDisplay).toBe('Found 1 match');
});
it('should disable gitignore when configured', async () => {
Object.assign(mockConfig, {
getFileFilteringOptions: () => ({
respectGitIgnore: false,
respectQwenIgnore: true,
}),
});
mockSpawn.mockImplementationOnce(
createMockSpawn({
exitCode: 1,
onCall: (_, args) => {
expect(args).toContain('--no-ignore-vcs');
},
}),
);
const params: RipGrepToolParams = { pattern: 'ignored' };
const invocation = grepTool.build(params);
await invocation.execute(abortSignal);
});
it('should truncate llm content when exceeding maximum length', async () => {
const longMatch = 'fileA.txt:1:' + 'a'.repeat(25_000);
mockSpawn.mockImplementationOnce(
createMockSpawn({
outputData: `${longMatch}${EOL}`,
exitCode: 0,
}),
);
const params: RipGrepToolParams = { pattern: 'a+' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(String(result.llmContent).length).toBeLessThanOrEqual(20_000);
expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./);
expect(result.returnDisplay).toContain('truncated');
});
it('should return "No matches found" when pattern does not exist', async () => {
// Setup specific mock for no matches
mockSpawn.mockImplementationOnce(
@@ -320,19 +454,10 @@ describe('RipGrepTool', () => {
expect(result.returnDisplay).toBe('No matches found');
});
it('should return an error from ripgrep for invalid regex pattern', async () => {
mockSpawn.mockImplementationOnce(
createMockSpawn({
exitCode: 2,
}),
);
it('should throw validation error for invalid regex pattern', async () => {
const params: RipGrepToolParams = { pattern: '[[' };
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toContain('ripgrep exited with code 2');
expect(result.returnDisplay).toContain(
'Error: ripgrep exited with code 2',
expect(() => grepTool.build(params)).toThrow(
'Invalid regular expression pattern: [[',
);
});
@@ -379,8 +504,7 @@ describe('RipGrepTool', () => {
expect(result.llmContent).toContain(
'Found 1 match for pattern "foo.*bar" in the workspace directory:',
);
expect(result.llmContent).toContain('File: fileB.js');
expect(result.llmContent).toContain('L1: const foo = "bar";');
expect(result.llmContent).toContain('fileB.js:1:const foo = "bar";');
});
it('should be case-insensitive by default (JS fallback)', async () => {
@@ -430,11 +554,9 @@ describe('RipGrepTool', () => {
expect(result.llmContent).toContain(
'Found 2 matches for pattern "HELLO" in the workspace directory:',
);
expect(result.llmContent).toContain('File: fileA.txt');
expect(result.llmContent).toContain('L1: hello world');
expect(result.llmContent).toContain('File: fileB.js');
expect(result.llmContent).toContain('fileA.txt:1:hello world');
expect(result.llmContent).toContain(
'L2: function baz() { return "hello"; }',
'fileB.js:2:function baz() { return "hello"; }',
);
});
@@ -462,191 +584,6 @@ describe('RipGrepTool', () => {
});
});
describe('multi-directory workspace', () => {
it('should search across all workspace directories when no path is specified', async () => {
// Create additional directory with test files
const secondDir = await fs.mkdtemp(
path.join(os.tmpdir(), 'grep-tool-second-'),
);
await fs.writeFile(
path.join(secondDir, 'other.txt'),
'hello from second directory\nworld in second',
);
await fs.writeFile(
path.join(secondDir, 'another.js'),
'function world() { return "test"; }',
);
// Create a mock config with multiple directories
const multiDirConfig = {
getTargetDir: () => tempRootDir,
getWorkspaceContext: () =>
createMockWorkspaceContext(tempRootDir, [secondDir]),
getDebugMode: () => false,
} as unknown as Config;
// Setup specific mock for this test - multi-directory search for 'world'
// Mock will be called twice - once for each directory
let callCount = 0;
mockSpawn.mockImplementation(() => {
callCount++;
const mockProcess = {
stdout: {
on: vi.fn(),
removeListener: vi.fn(),
},
stderr: {
on: vi.fn(),
removeListener: vi.fn(),
},
on: vi.fn(),
removeListener: vi.fn(),
kill: vi.fn(),
};
setTimeout(() => {
const stdoutDataHandler = mockProcess.stdout.on.mock.calls.find(
(call) => call[0] === 'data',
)?.[1];
const closeHandler = mockProcess.on.mock.calls.find(
(call) => call[0] === 'close',
)?.[1];
let outputData = '';
if (callCount === 1) {
// First directory (tempRootDir)
outputData =
[
'fileA.txt:1:hello world',
'fileA.txt:2:second line with world',
'sub/fileC.txt:1:another world in sub dir',
].join(EOL) + EOL;
} else if (callCount === 2) {
// Second directory (secondDir)
outputData =
[
'other.txt:2:world in second',
'another.js:1:function world() { return "test"; }',
].join(EOL) + EOL;
}
if (stdoutDataHandler && outputData) {
stdoutDataHandler(Buffer.from(outputData));
}
if (closeHandler) {
closeHandler(0);
}
}, 0);
return mockProcess as unknown as ChildProcess;
});
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
const params: RipGrepToolParams = { pattern: 'world' };
const invocation = multiDirGrepTool.build(params);
const result = await invocation.execute(abortSignal);
// Should find matches in both directories
expect(result.llmContent).toContain(
'Found 5 matches for pattern "world"',
);
// Matches from first directory
expect(result.llmContent).toContain('fileA.txt');
expect(result.llmContent).toContain('L1: hello world');
expect(result.llmContent).toContain('L2: second line with world');
expect(result.llmContent).toContain('fileC.txt');
expect(result.llmContent).toContain('L1: another world in sub dir');
// Matches from both directories
expect(result.llmContent).toContain('other.txt');
expect(result.llmContent).toContain('L2: world in second');
expect(result.llmContent).toContain('another.js');
expect(result.llmContent).toContain('L1: function world()');
// Clean up
await fs.rm(secondDir, { recursive: true, force: true });
mockSpawn.mockClear();
});
it('should search only specified path within workspace directories', async () => {
// Create additional directory
const secondDir = await fs.mkdtemp(
path.join(os.tmpdir(), 'grep-tool-second-'),
);
await fs.mkdir(path.join(secondDir, 'sub'));
await fs.writeFile(
path.join(secondDir, 'sub', 'test.txt'),
'hello from second sub directory',
);
// Create a mock config with multiple directories
const multiDirConfig = {
getTargetDir: () => tempRootDir,
getWorkspaceContext: () =>
createMockWorkspaceContext(tempRootDir, [secondDir]),
getDebugMode: () => false,
} as unknown as Config;
// Setup specific mock for this test - searching in 'sub' should only return matches from that directory
mockSpawn.mockImplementationOnce(() => {
const mockProcess = {
stdout: {
on: vi.fn(),
removeListener: vi.fn(),
},
stderr: {
on: vi.fn(),
removeListener: vi.fn(),
},
on: vi.fn(),
removeListener: vi.fn(),
kill: vi.fn(),
};
setTimeout(() => {
const onData = mockProcess.stdout.on.mock.calls.find(
(call) => call[0] === 'data',
)?.[1];
const onClose = mockProcess.on.mock.calls.find(
(call) => call[0] === 'close',
)?.[1];
if (onData) {
onData(Buffer.from(`fileC.txt:1:another world in sub dir${EOL}`));
}
if (onClose) {
onClose(0);
}
}, 0);
return mockProcess as unknown as ChildProcess;
});
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
// Search only in the 'sub' directory of the first workspace
const params: RipGrepToolParams = { pattern: 'world', path: 'sub' };
const invocation = multiDirGrepTool.build(params);
const result = await invocation.execute(abortSignal);
// Should only find matches in the specified sub directory
expect(result.llmContent).toContain(
'Found 1 match for pattern "world" in path "sub"',
);
expect(result.llmContent).toContain('File: fileC.txt');
expect(result.llmContent).toContain('L1: another world in sub dir');
// Should not contain matches from second directory
expect(result.llmContent).not.toContain('test.txt');
// Clean up
await fs.rm(secondDir, { recursive: true, force: true });
});
});
describe('abort signal handling', () => {
it('should handle AbortSignal during search', async () => {
const controller = new AbortController();
@@ -1062,8 +999,8 @@ describe('RipGrepTool', () => {
});
});
describe('include pattern filtering', () => {
it('should handle multiple file extensions in include pattern', async () => {
describe('glob pattern filtering', () => {
it('should handle multiple file extensions in glob pattern', async () => {
await fs.writeFile(
path.join(tempRootDir, 'test.ts'),
'typescript content',
@@ -1075,7 +1012,7 @@ describe('RipGrepTool', () => {
);
await fs.writeFile(path.join(tempRootDir, 'test.txt'), 'text content');
// Setup specific mock for this test - include pattern should filter to only ts/tsx files
// Setup specific mock for this test - glob pattern should filter to only ts/tsx files
mockSpawn.mockImplementationOnce(() => {
const mockProcess = {
stdout: {
@@ -1116,7 +1053,7 @@ describe('RipGrepTool', () => {
const params: RipGrepToolParams = {
pattern: 'content',
include: '*.{ts,tsx}',
glob: '*.{ts,tsx}',
};
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
@@ -1127,7 +1064,7 @@ describe('RipGrepTool', () => {
expect(result.llmContent).not.toContain('test.txt');
});
it('should handle directory patterns in include', async () => {
it('should handle directory patterns in glob', async () => {
await fs.mkdir(path.join(tempRootDir, 'src'), { recursive: true });
await fs.writeFile(
path.join(tempRootDir, 'src', 'main.ts'),
@@ -1135,7 +1072,7 @@ describe('RipGrepTool', () => {
);
await fs.writeFile(path.join(tempRootDir, 'other.ts'), 'other code');
// Setup specific mock for this test - include pattern should filter to only src/** files
// Setup specific mock for this test - glob pattern should filter to only src/** files
mockSpawn.mockImplementationOnce(() => {
const mockProcess = {
stdout: {
@@ -1172,7 +1109,7 @@ describe('RipGrepTool', () => {
const params: RipGrepToolParams = {
pattern: 'code',
include: 'src/**',
glob: 'src/**',
};
const invocation = grepTool.build(params);
const result = await invocation.execute(abortSignal);
@@ -1189,10 +1126,10 @@ describe('RipGrepTool', () => {
expect(invocation.getDescription()).toBe("'testPattern'");
});
it('should generate correct description with pattern and include', () => {
it('should generate correct description with pattern and glob', () => {
const params: RipGrepToolParams = {
pattern: 'testPattern',
include: '*.ts',
glob: '*.ts',
};
const invocation = grepTool.build(params);
expect(invocation.getDescription()).toBe("'testPattern' in *.ts");
@@ -1211,29 +1148,18 @@ describe('RipGrepTool', () => {
expect(invocation.getDescription()).toContain(path.join('src', 'app'));
});
it('should indicate searching across all workspace directories when no path specified', () => {
// Create a mock config with multiple directories
const multiDirConfig = {
getTargetDir: () => tempRootDir,
getWorkspaceContext: () =>
createMockWorkspaceContext(tempRootDir, ['/another/dir']),
getDebugMode: () => false,
} as unknown as Config;
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
it('should generate correct description with default search path', () => {
const params: RipGrepToolParams = { pattern: 'testPattern' };
const invocation = multiDirGrepTool.build(params);
expect(invocation.getDescription()).toBe(
"'testPattern' across all workspace directories",
);
const invocation = grepTool.build(params);
expect(invocation.getDescription()).toBe("'testPattern'");
});
it('should generate correct description with pattern, include, and path', async () => {
it('should generate correct description with pattern, glob, and path', async () => {
const dirPath = path.join(tempRootDir, 'src', 'app');
await fs.mkdir(dirPath, { recursive: true });
const params: RipGrepToolParams = {
pattern: 'testPattern',
include: '*.ts',
glob: '*.ts',
path: path.join('src', 'app'),
};
const invocation = grepTool.build(params);

View File

@@ -10,16 +10,19 @@ import { EOL } from 'node:os';
import { spawn } from 'node:child_process';
import type { ToolInvocation, ToolResult } from './tools.js';
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { ToolNames } from './tool-names.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { getErrorMessage, isNodeError } from '../utils/errors.js';
import type { Config } from '../config/config.js';
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import type { FileFilteringOptions } from '../config/constants.js';
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
const DEFAULT_TOTAL_MAX_MATCHES = 20000;
const MAX_LLM_CONTENT_LENGTH = 20_000;
/**
* Parameters for the GrepTool
* Parameters for the GrepTool (Simplified)
*/
export interface RipGrepToolParams {
/**
@@ -33,18 +36,14 @@ export interface RipGrepToolParams {
path?: string;
/**
* File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")
* Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}")
*/
include?: string;
}
glob?: string;
/**
* Result object for a single grep match
*/
interface GrepMatch {
filePath: string;
lineNumber: number;
line: string;
/**
* Maximum number of matching lines to return (optional, shows all if not specified)
*/
limit?: number;
}
class GrepToolInvocation extends BaseToolInvocation<
@@ -61,18 +60,15 @@ class GrepToolInvocation extends BaseToolInvocation<
/**
* Checks if a path is within the root directory and resolves it.
* @param relativePath Path relative to the root directory (or undefined for root).
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
* @returns The absolute path to search within.
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
*/
private resolveAndValidatePath(relativePath?: string): string | null {
// If no path specified, return null to indicate searching all workspace directories
if (!relativePath) {
return null;
}
private resolveAndValidatePath(relativePath?: string): string {
const targetDir = this.config.getTargetDir();
const targetPath = relativePath
? path.resolve(targetDir, relativePath)
: targetDir;
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
// Security Check: Ensure the resolved path is within workspace boundaries
const workspaceContext = this.config.getWorkspaceContext();
if (!workspaceContext.isPathWithinWorkspace(targetPath)) {
const directories = workspaceContext.getDirectories();
@@ -81,7 +77,10 @@ class GrepToolInvocation extends BaseToolInvocation<
);
}
// Check existence and type after resolving
return this.ensureDirectory(targetPath);
}
private ensureDirectory(targetPath: string): string {
try {
const stats = fs.statSync(targetPath);
if (!stats.isDirectory()) {
@@ -101,104 +100,81 @@ class GrepToolInvocation extends BaseToolInvocation<
async execute(signal: AbortSignal): Promise<ToolResult> {
try {
const workspaceContext = this.config.getWorkspaceContext();
const searchDirAbs = this.resolveAndValidatePath(this.params.path);
const searchDirDisplay = this.params.path || '.';
// Determine which directories to search
let searchDirectories: readonly string[];
if (searchDirAbs === null) {
// No path specified - search all workspace directories
searchDirectories = workspaceContext.getDirectories();
} else {
// Specific path provided - search only that directory
searchDirectories = [searchDirAbs];
}
// Get raw ripgrep output
const rawOutput = await this.performRipgrepSearch({
pattern: this.params.pattern,
path: searchDirAbs,
glob: this.params.glob,
signal,
});
let allMatches: GrepMatch[] = [];
const totalMaxMatches = DEFAULT_TOTAL_MAX_MATCHES;
// Build search description
const searchLocationDescription = this.params.path
? `in path "${searchDirDisplay}"`
: `in the workspace directory`;
if (this.config.getDebugMode()) {
console.log(`[GrepTool] Total result limit: ${totalMaxMatches}`);
}
const filterDescription = this.params.glob
? ` (filter: "${this.params.glob}")`
: '';
for (const searchDir of searchDirectories) {
const searchResult = await this.performRipgrepSearch({
pattern: this.params.pattern,
path: searchDir,
include: this.params.include,
signal,
});
if (searchDirectories.length > 1) {
const dirName = path.basename(searchDir);
searchResult.forEach((match) => {
match.filePath = path.join(dirName, match.filePath);
});
}
allMatches = allMatches.concat(searchResult);
if (allMatches.length >= totalMaxMatches) {
allMatches = allMatches.slice(0, totalMaxMatches);
break;
}
}
let searchLocationDescription: string;
if (searchDirAbs === null) {
const numDirs = workspaceContext.getDirectories().length;
searchLocationDescription =
numDirs > 1
? `across ${numDirs} workspace directories`
: `in the workspace directory`;
} else {
searchLocationDescription = `in path "${searchDirDisplay}"`;
}
if (allMatches.length === 0) {
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}.`;
// Check if we have any matches
if (!rawOutput.trim()) {
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}.`;
return { llmContent: noMatchMsg, returnDisplay: `No matches found` };
}
const wasTruncated = allMatches.length >= totalMaxMatches;
// Split into lines and count total matches
const allLines = rawOutput.split(EOL).filter((line) => line.trim());
const totalMatches = allLines.length;
const matchTerm = totalMatches === 1 ? 'match' : 'matches';
const matchesByFile = allMatches.reduce(
(acc, match) => {
const fileKey = match.filePath;
if (!acc[fileKey]) {
acc[fileKey] = [];
}
acc[fileKey].push(match);
acc[fileKey].sort((a, b) => a.lineNumber - b.lineNumber);
return acc;
},
{} as Record<string, GrepMatch[]>,
);
// Build header early to calculate available space
const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`;
const maxTruncationNoticeLength = 100; // "[... N more matches truncated]"
const maxGrepOutputLength =
MAX_LLM_CONTENT_LENGTH - header.length - maxTruncationNoticeLength;
const matchCount = allMatches.length;
const matchTerm = matchCount === 1 ? 'match' : 'matches';
let llmContent = `Found ${matchCount} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}`;
if (wasTruncated) {
llmContent += ` (results limited to ${totalMaxMatches} matches for performance)`;
// Apply line limit first (if specified)
let truncatedByLineLimit = false;
let linesToInclude = allLines;
if (
this.params.limit !== undefined &&
allLines.length > this.params.limit
) {
linesToInclude = allLines.slice(0, this.params.limit);
truncatedByLineLimit = true;
}
llmContent += `:\n---\n`;
// Join lines back into grep output
let grepOutput = linesToInclude.join(EOL);
for (const filePath in matchesByFile) {
llmContent += `File: ${filePath}\n`;
matchesByFile[filePath].forEach((match) => {
const trimmedLine = match.line.trim();
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
});
llmContent += '---\n';
// Apply character limit as safety net
let truncatedByCharLimit = false;
if (grepOutput.length > maxGrepOutputLength) {
grepOutput = grepOutput.slice(0, maxGrepOutputLength) + '...';
truncatedByCharLimit = true;
}
let displayMessage = `Found ${matchCount} ${matchTerm}`;
if (wasTruncated) {
displayMessage += ` (limited)`;
// Count how many lines we actually included after character truncation
const finalLines = grepOutput.split(EOL).filter((line) => line.trim());
const includedLines = finalLines.length;
// Build result
let llmContent = header + grepOutput;
// Add truncation notice if needed
if (truncatedByLineLimit || truncatedByCharLimit) {
const omittedMatches = totalMatches - includedLines;
llmContent += ` [${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`;
}
// Build display message (show real count, not truncated)
let displayMessage = `Found ${totalMatches} ${matchTerm}`;
if (truncatedByLineLimit || truncatedByCharLimit) {
displayMessage += ` (truncated)`;
}
return {
@@ -215,53 +191,15 @@ class GrepToolInvocation extends BaseToolInvocation<
}
}
private parseRipgrepOutput(output: string, basePath: string): GrepMatch[] {
const results: GrepMatch[] = [];
if (!output) return results;
const lines = output.split(EOL);
for (const line of lines) {
if (!line.trim()) continue;
const firstColonIndex = line.indexOf(':');
if (firstColonIndex === -1) continue;
const secondColonIndex = line.indexOf(':', firstColonIndex + 1);
if (secondColonIndex === -1) continue;
const filePathRaw = line.substring(0, firstColonIndex);
const lineNumberStr = line.substring(
firstColonIndex + 1,
secondColonIndex,
);
const lineContent = line.substring(secondColonIndex + 1);
const lineNumber = parseInt(lineNumberStr, 10);
if (!isNaN(lineNumber)) {
const absoluteFilePath = path.resolve(basePath, filePathRaw);
const relativeFilePath = path.relative(basePath, absoluteFilePath);
results.push({
filePath: relativeFilePath || path.basename(absoluteFilePath),
lineNumber,
line: lineContent,
});
}
}
return results;
}
private async performRipgrepSearch(options: {
pattern: string;
path: string;
include?: string;
glob?: string;
signal: AbortSignal;
}): Promise<GrepMatch[]> {
const { pattern, path: absolutePath, include } = options;
}): Promise<string> {
const { pattern, path: absolutePath, glob } = options;
const rgArgs = [
const rgArgs: string[] = [
'--line-number',
'--no-heading',
'--with-filename',
@@ -270,29 +208,34 @@ class GrepToolInvocation extends BaseToolInvocation<
pattern,
];
if (include) {
rgArgs.push('--glob', include);
// Add file exclusions from .gitignore and .qwenignore
const filteringOptions = this.getFileFilteringOptions();
if (!filteringOptions.respectGitIgnore) {
rgArgs.push('--no-ignore-vcs');
}
const excludes = [
'.git',
'node_modules',
'bower_components',
'*.log',
'*.tmp',
'build',
'dist',
'coverage',
];
excludes.forEach((exclude) => {
rgArgs.push('--glob', `!${exclude}`);
});
if (filteringOptions.respectQwenIgnore) {
const qwenIgnorePath = path.join(
this.config.getTargetDir(),
'.qwenignore',
);
if (fs.existsSync(qwenIgnorePath)) {
rgArgs.push('--ignore-file', qwenIgnorePath);
}
}
// Add glob pattern if provided
if (glob) {
rgArgs.push('--glob', glob);
}
rgArgs.push('--threads', '4');
rgArgs.push(absolutePath);
try {
const rgPath = await ensureRipgrepPath();
const rgPath = this.config.getUseBuiltinRipgrep()
? await ensureRipgrepPath()
: 'rg';
const output = await new Promise<string>((resolve, reject) => {
const child = spawn(rgPath, rgArgs, {
windowsHide: true,
@@ -334,22 +277,33 @@ class GrepToolInvocation extends BaseToolInvocation<
});
});
return this.parseRipgrepOutput(output, absolutePath);
return output;
} catch (error: unknown) {
console.error(`GrepLogic: ripgrep failed: ${getErrorMessage(error)}`);
throw error;
}
}
private getFileFilteringOptions(): FileFilteringOptions {
const options = this.config.getFileFilteringOptions?.();
return {
respectGitIgnore:
options?.respectGitIgnore ??
DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore,
respectQwenIgnore:
options?.respectQwenIgnore ??
DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore,
};
}
/**
* Gets a description of the grep operation
* @param params Parameters for the grep operation
* @returns A string describing the grep
*/
getDescription(): string {
let description = `'${this.params.pattern}'`;
if (this.params.include) {
description += ` in ${this.params.include}`;
if (this.params.glob) {
description += ` in ${this.params.glob}`;
}
if (this.params.path) {
const resolvedPath = path.resolve(
@@ -381,36 +335,41 @@ class GrepToolInvocation extends BaseToolInvocation<
}
/**
* Implementation of the Grep tool logic (moved from CLI)
* Implementation of the Grep tool logic
*/
export class RipGrepTool extends BaseDeclarativeTool<
RipGrepToolParams,
ToolResult
> {
static readonly Name = 'search_file_content';
static readonly Name = ToolNames.GREP;
constructor(private readonly config: Config) {
super(
RipGrepTool.Name,
'SearchText',
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. Total results limited to 20,000 matches like VSCode.',
'Grep',
'A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx")\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - special regex characters need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n',
Kind.Search,
{
properties: {
pattern: {
description:
"The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').",
type: 'string',
description:
'The regular expression pattern to search for in file contents',
},
glob: {
type: 'string',
description:
'Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}") - maps to rg --glob',
},
path: {
description:
'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
type: 'string',
description:
'File or directory to search in (rg PATH). Defaults to current working directory.',
},
include: {
limit: {
type: 'number',
description:
"Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).",
type: 'string',
'Limit output to first N lines/entries. Optional - shows all matches if not specified.',
},
},
required: ['pattern'],
@@ -422,13 +381,13 @@ export class RipGrepTool extends BaseDeclarativeTool<
/**
* Checks if a path is within the root directory and resolves it.
* @param relativePath Path relative to the root directory (or undefined for root).
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
* @returns The absolute path to search within.
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
*/
private resolveAndValidatePath(relativePath?: string): string | null {
// If no path specified, return null to indicate searching all workspace directories
private resolveAndValidatePath(relativePath?: string): string {
// If no path specified, search within the workspace root directory
if (!relativePath) {
return null;
return this.config.getTargetDir();
}
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
@@ -465,7 +424,9 @@ export class RipGrepTool extends BaseDeclarativeTool<
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
override validateToolParams(params: RipGrepToolParams): string | null {
protected override validateToolParamValues(
params: RipGrepToolParams,
): string | null {
const errors = SchemaValidator.validate(
this.schema.parametersJsonSchema,
params,
@@ -474,6 +435,13 @@ export class RipGrepTool extends BaseDeclarativeTool<
return errors;
}
// Validate pattern is a valid regex
try {
new RegExp(params.pattern);
} catch (error) {
return `Invalid regular expression pattern: ${params.pattern}. Error: ${getErrorMessage(error)}`;
}
// Only validate path if one is provided
if (params.path) {
try {

View File

@@ -14,7 +14,7 @@ export const ToolNames = {
WRITE_FILE: 'write_file',
READ_FILE: 'read_file',
READ_MANY_FILES: 'read_many_files',
GREP: 'search_file_content',
GREP: 'grep_search',
GLOB: 'glob',
SHELL: 'run_shell_command',
TODO_WRITE: 'todo_write',

View File

@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Part } from '@google/genai';
import type { Content, Part } from '@google/genai';
import type { Config } from '../config/config.js';
import { getFolderStructure } from './getFolderStructure.js';
@@ -107,3 +107,23 @@ ${directoryContext}
return initialParts;
}
export async function getInitialChatHistory(
config: Config,
extraHistory?: Content[],
): Promise<Content[]> {
const envParts = await getEnvironmentContext(config);
const envContextString = envParts.map((part) => part.text || '').join('\n\n');
return [
{
role: 'user',
parts: [{ text: envContextString }],
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
...(extraHistory ?? []),
];
}

View File

@@ -152,7 +152,16 @@ describe('ripgrepUtils', () => {
});
describe('canUseRipgrep', () => {
it('should return true if ripgrep binary exists', async () => {
it('should return true if ripgrep binary exists (builtin)', async () => {
(fileExists as Mock).mockResolvedValue(true);
const result = await canUseRipgrep(true);
expect(result).toBe(true);
expect(fileExists).toHaveBeenCalledOnce();
});
it('should return true if ripgrep binary exists (default)', async () => {
(fileExists as Mock).mockResolvedValue(true);
const result = await canUseRipgrep();
@@ -161,15 +170,26 @@ describe('ripgrepUtils', () => {
expect(fileExists).toHaveBeenCalledOnce();
});
it('should return false if ripgrep binary does not exist', async () => {
it('should fall back to system rg if bundled ripgrep binary does not exist', async () => {
(fileExists as Mock).mockResolvedValue(false);
// When useBuiltin is true but bundled binary doesn't exist,
// it should fall back to checking system rg (which will spawn a process)
// In this test environment, system rg is likely available, so result should be true
// unless spawn fails
const result = await canUseRipgrep();
expect(result).toBe(false);
// The test may pass or fail depending on system rg availability
// Just verify that fileExists was called to check bundled binary first
expect(fileExists).toHaveBeenCalledOnce();
// Result depends on whether system rg is installed
expect(typeof result).toBe('boolean');
});
// Note: Tests for system ripgrep detection (useBuiltin=false) would require mocking
// the child_process spawn function, which is complex in ESM. These cases are tested
// indirectly through integration tests.
it('should return false if platform is unsupported', async () => {
const originalPlatform = process.platform;

View File

@@ -85,13 +85,31 @@ export function getRipgrepPath(): string {
/**
* Checks if ripgrep binary is available
* @param useBuiltin If true, tries bundled ripgrep first, then falls back to system ripgrep.
* If false, only checks for system ripgrep.
*/
export async function canUseRipgrep(): Promise<boolean> {
export async function canUseRipgrep(
useBuiltin: boolean = true,
): Promise<boolean> {
try {
const rgPath = getRipgrepPath();
return await fileExists(rgPath);
if (useBuiltin) {
// Try bundled ripgrep first
const rgPath = getRipgrepPath();
if (await fileExists(rgPath)) {
return true;
}
// Fallback to system rg if bundled binary is not available
}
// Check for system ripgrep by trying to spawn 'rg --version'
const { spawn } = await import('node:child_process');
return await new Promise<boolean>((resolve) => {
const proc = spawn('rg', ['--version']);
proc.on('error', () => resolve(false));
proc.on('exit', (code) => resolve(code === 0));
});
} catch (_error) {
// Unsupported platform/arch
// Unsupported platform/arch or other error
return false;
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.1.2",
"version": "0.1.3",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.1.2",
"version": "0.1.3",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {