Sync upstream Gemini-CLI v0.8.2 (#838)

This commit is contained in:
tanzhenxin
2025-10-23 09:27:04 +08:00
committed by GitHub
parent 096fabb5d6
commit eb95c131be
644 changed files with 70389 additions and 23709 deletions

View File

@@ -130,14 +130,14 @@ describe('bfsFileSearch', () => {
fileService,
fileFilteringOptions: {
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
},
});
expect(result).toEqual([targetFilePath]);
});
it('should ignore geminiignored files', async () => {
it('should ignore qwenignored files', async () => {
await createTestFile('node_modules/', 'project', '.qwenignore');
await createTestFile('content', 'project', 'node_modules', 'target.txt');
const targetFilePath = await createTestFile(
@@ -153,7 +153,7 @@ describe('bfsFileSearch', () => {
fileService,
fileFilteringOptions: {
respectGitIgnore: false,
respectGeminiIgnore: true,
respectQwenIgnore: true,
},
});
@@ -182,7 +182,7 @@ describe('bfsFileSearch', () => {
fileService,
fileFilteringOptions: {
respectGitIgnore: false,
respectGeminiIgnore: false,
respectQwenIgnore: false,
},
});

View File

@@ -7,7 +7,7 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import type { FileDiscoveryService } from '../services/fileDiscoveryService.js';
import type { FileFilteringOptions } from '../config/config.js';
import type { FileFilteringOptions } from '../config/constants.js';
// Simple console logger for now.
// TODO: Integrate with a more robust server-side logger.
const logger = {
@@ -99,21 +99,28 @@ export async function bfsFileSearch(
for (const { currentDir, entries } of results) {
for (const entry of entries) {
const fullPath = path.join(currentDir, entry.name);
const isDirectory = entry.isDirectory();
const isMatchingFile = entry.isFile() && entry.name === fileName;
if (!isDirectory && !isMatchingFile) {
continue;
}
if (isDirectory && ignoreDirsSet.has(entry.name)) {
continue;
}
if (
fileService?.shouldIgnoreFile(fullPath, {
respectGitIgnore: options.fileFilteringOptions?.respectGitIgnore,
respectGeminiIgnore:
options.fileFilteringOptions?.respectGeminiIgnore,
respectQwenIgnore: options.fileFilteringOptions?.respectQwenIgnore,
})
) {
continue;
}
if (entry.isDirectory()) {
if (!ignoreDirsSet.has(entry.name)) {
queue.push(fullPath);
}
} else if (entry.isFile() && entry.name === fileName) {
if (isDirectory) {
queue.push(fullPath);
} else {
foundFiles.push(fullPath);
}
}

View File

@@ -21,11 +21,12 @@ import {
isEditorAvailable,
type EditorType,
} from './editor.js';
import { execSync, spawn } from 'node:child_process';
import { execSync, spawn, spawnSync } from 'node:child_process';
vi.mock('child_process', () => ({
execSync: vi.fn(),
spawn: vi.fn(),
spawnSync: vi.fn(() => ({ error: null, status: 0 })),
}));
const originalPlatform = process.platform;
@@ -314,23 +315,23 @@ describe('editor utils', () => {
});
describe('openDiff', () => {
const spawnEditors: EditorType[] = [
const guiEditors: EditorType[] = [
'vscode',
'vscodium',
'windsurf',
'cursor',
'zed',
];
for (const editor of spawnEditors) {
for (const editor of guiEditors) {
it(`should call spawn for ${editor}`, async () => {
const mockSpawn = {
on: vi.fn((event, cb) => {
if (event === 'close') {
cb(0);
}
}),
};
(spawn as Mock).mockReturnValue(mockSpawn);
const mockSpawnOn = vi.fn((event, cb) => {
if (event === 'close') {
cb(0);
}
});
(spawn as Mock).mockReturnValue({ on: mockSpawnOn });
await openDiff('old.txt', 'new.txt', editor, () => {});
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
expect(spawn).toHaveBeenCalledWith(
@@ -338,77 +339,53 @@ describe('editor utils', () => {
diffCommand.args,
{
stdio: 'inherit',
shell: true,
},
);
expect(mockSpawn.on).toHaveBeenCalledWith(
'close',
expect.any(Function),
);
expect(mockSpawn.on).toHaveBeenCalledWith(
'error',
expect.any(Function),
);
expect(mockSpawnOn).toHaveBeenCalledWith('close', expect.any(Function));
expect(mockSpawnOn).toHaveBeenCalledWith('error', expect.any(Function));
});
it(`should reject if spawn for ${editor} fails`, async () => {
const mockError = new Error('spawn error');
const mockSpawn = {
on: vi.fn((event, cb) => {
if (event === 'error') {
cb(mockError);
}
}),
};
(spawn as Mock).mockReturnValue(mockSpawn);
const mockSpawnOn = vi.fn((event, cb) => {
if (event === 'error') {
cb(mockError);
}
});
(spawn as Mock).mockReturnValue({ on: mockSpawnOn });
await expect(
openDiff('old.txt', 'new.txt', editor, () => {}),
).rejects.toThrow('spawn error');
});
it(`should reject if ${editor} exits with non-zero code`, async () => {
const mockSpawn = {
on: vi.fn((event, cb) => {
if (event === 'close') {
cb(1);
}
}),
};
(spawn as Mock).mockReturnValue(mockSpawn);
const mockSpawnOn = vi.fn((event, cb) => {
if (event === 'close') {
cb(1);
}
});
(spawn as Mock).mockReturnValue({ on: mockSpawnOn });
await expect(
openDiff('old.txt', 'new.txt', editor, () => {}),
).rejects.toThrow(`${editor} exited with code 1`);
});
}
const execSyncEditors: EditorType[] = ['vim', 'neovim', 'emacs'];
for (const editor of execSyncEditors) {
it(`should call execSync for ${editor} on non-windows`, async () => {
Object.defineProperty(process, 'platform', { value: 'linux' });
await openDiff('old.txt', 'new.txt', editor, () => {});
expect(execSync).toHaveBeenCalledTimes(1);
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
const expectedCommand = `${
diffCommand.command
} ${diffCommand.args.map((arg) => `"${arg}"`).join(' ')}`;
expect(execSync).toHaveBeenCalledWith(expectedCommand, {
stdio: 'inherit',
encoding: 'utf8',
});
});
const terminalEditors: EditorType[] = ['vim', 'neovim', 'emacs'];
it(`should call execSync for ${editor} on windows`, async () => {
Object.defineProperty(process, 'platform', { value: 'win32' });
for (const editor of terminalEditors) {
it(`should call spawnSync for ${editor}`, async () => {
await openDiff('old.txt', 'new.txt', editor, () => {});
expect(execSync).toHaveBeenCalledTimes(1);
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
const expectedCommand = `${diffCommand.command} ${diffCommand.args.join(
' ',
)}`;
expect(execSync).toHaveBeenCalledWith(expectedCommand, {
stdio: 'inherit',
encoding: 'utf8',
});
expect(spawnSync).toHaveBeenCalledWith(
diffCommand.command,
diffCommand.args,
{
stdio: 'inherit',
},
);
});
}
@@ -424,38 +401,48 @@ describe('editor utils', () => {
});
describe('onEditorClose callback', () => {
it('should call onEditorClose for execSync editors', async () => {
(execSync as Mock).mockReturnValue(Buffer.from(`/usr/bin/`));
const onEditorClose = vi.fn();
await openDiff('old.txt', 'new.txt', 'vim', onEditorClose);
expect(execSync).toHaveBeenCalledTimes(1);
expect(onEditorClose).toHaveBeenCalledTimes(1);
});
it('should call onEditorClose for execSync editors when an error is thrown', async () => {
(execSync as Mock).mockImplementation(() => {
throw new Error('test error');
const terminalEditors: EditorType[] = ['vim', 'neovim', 'emacs'];
for (const editor of terminalEditors) {
it(`should call onEditorClose for ${editor} on close`, async () => {
const onEditorClose = vi.fn();
await openDiff('old.txt', 'new.txt', editor, onEditorClose);
expect(onEditorClose).toHaveBeenCalledTimes(1);
});
const onEditorClose = vi.fn();
openDiff('old.txt', 'new.txt', 'vim', onEditorClose);
expect(execSync).toHaveBeenCalledTimes(1);
expect(onEditorClose).toHaveBeenCalledTimes(1);
});
it('should not call onEditorClose for spawn editors', async () => {
const onEditorClose = vi.fn();
const mockSpawn = {
on: vi.fn((event, cb) => {
it(`should call onEditorClose for ${editor} on error`, async () => {
const onEditorClose = vi.fn();
const mockError = new Error('spawn error');
(spawnSync as Mock).mockImplementation(() => {
throw mockError;
});
await expect(
openDiff('old.txt', 'new.txt', editor, onEditorClose),
).rejects.toThrow('spawn error');
expect(onEditorClose).toHaveBeenCalledTimes(1);
});
}
const guiEditors: EditorType[] = [
'vscode',
'vscodium',
'windsurf',
'cursor',
'zed',
];
for (const editor of guiEditors) {
it(`should not call onEditorClose for ${editor}`, async () => {
const onEditorClose = vi.fn();
const mockSpawnOn = vi.fn((event, cb) => {
if (event === 'close') {
cb(0);
}
}),
};
(spawn as Mock).mockReturnValue(mockSpawn);
await openDiff('old.txt', 'new.txt', 'vscode', onEditorClose);
expect(spawn).toHaveBeenCalledTimes(1);
expect(onEditorClose).not.toHaveBeenCalled();
});
});
(spawn as Mock).mockReturnValue({ on: mockSpawnOn });
await openDiff('old.txt', 'new.txt', editor, onEditorClose);
expect(onEditorClose).not.toHaveBeenCalled();
});
}
});
});

View File

@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { execSync, spawn } from 'node:child_process';
import { execSync, spawn, spawnSync } from 'node:child_process';
export type EditorType =
| 'vscode'
@@ -173,57 +173,44 @@ export async function openDiff(
}
try {
switch (editor) {
case 'vscode':
case 'vscodium':
case 'windsurf':
case 'cursor':
case 'zed':
// Use spawn for GUI-based editors to avoid blocking the entire process
return new Promise((resolve, reject) => {
const childProcess = spawn(diffCommand.command, diffCommand.args, {
stdio: 'inherit',
shell: true,
});
const isTerminalEditor = ['vim', 'emacs', 'neovim'].includes(editor);
childProcess.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`${editor} exited with code ${code}`));
}
});
childProcess.on('error', (error) => {
reject(error);
});
if (isTerminalEditor) {
try {
const result = spawnSync(diffCommand.command, diffCommand.args, {
stdio: 'inherit',
});
case 'vim':
case 'emacs':
case 'neovim': {
// Use execSync for terminal-based editors
const command =
process.platform === 'win32'
? `${diffCommand.command} ${diffCommand.args.join(' ')}`
: `${diffCommand.command} ${diffCommand.args.map((arg) => `"${arg}"`).join(' ')}`;
try {
execSync(command, {
stdio: 'inherit',
encoding: 'utf8',
});
} catch (e) {
console.error('Error in onEditorClose callback:', e);
} finally {
onEditorClose();
if (result.error) {
throw result.error;
}
break;
if (result.status !== 0) {
throw new Error(`${editor} exited with code ${result.status}`);
}
} finally {
onEditorClose();
}
default:
throw new Error(`Unsupported editor: ${editor}`);
return;
}
return new Promise<void>((resolve, reject) => {
const childProcess = spawn(diffCommand.command, diffCommand.args, {
stdio: 'inherit',
});
childProcess.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`${editor} exited with code ${code}`));
}
});
childProcess.on('error', (error) => {
reject(error);
});
});
} catch (error) {
console.error(error);
throw error;
}
}

View File

@@ -149,9 +149,7 @@ describe('parseAndFormatApiError', () => {
expect(result).toContain(
'You have reached your daily gemini-2.5-pro quota limit',
);
expect(result).toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result).toContain('upgrade to get higher limits');
});
it('should format a regular 429 API error with standard message for Google auth', () => {
@@ -230,9 +228,7 @@ describe('parseAndFormatApiError', () => {
expect(result).toContain(
'We appreciate you for choosing Gemini Code Assist and the Gemini CLI',
);
expect(result).not.toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result).not.toContain('upgrade to get higher limits');
});
it('should format a 429 API error with Pro quota exceeded message for Google auth (Legacy tier)', () => {
@@ -254,9 +250,7 @@ describe('parseAndFormatApiError', () => {
expect(result).toContain(
'We appreciate you for choosing Gemini Code Assist and the Gemini CLI',
);
expect(result).not.toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result).not.toContain('upgrade to get higher limits');
});
it('should handle different Gemini 2.5 version strings in Pro quota exceeded errors', () => {
@@ -286,12 +280,8 @@ describe('parseAndFormatApiError', () => {
expect(resultPreview).toContain(
'You have reached your daily gemini-2.5-preview-pro quota limit',
);
expect(result25).toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(resultPreview).toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result25).toContain('upgrade to get higher limits');
expect(resultPreview).toContain('upgrade to get higher limits');
});
it('should not match non-Pro models with similar version strings', () => {
@@ -349,9 +339,7 @@ describe('parseAndFormatApiError', () => {
expect(result).toContain(
'We appreciate you for choosing Gemini Code Assist and the Gemini CLI',
);
expect(result).not.toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result).not.toContain('upgrade to get higher limits');
});
it('should format a regular 429 API error with standard message for Google auth (Standard tier)', () => {
@@ -368,8 +356,6 @@ describe('parseAndFormatApiError', () => {
expect(result).toContain(
'We appreciate you for choosing Gemini Code Assist and the Gemini CLI',
);
expect(result).not.toContain(
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
);
expect(result).not.toContain('upgrade to get higher limits');
});
});

View File

@@ -27,10 +27,10 @@ const getRateLimitErrorMessageGoogleProQuotaFree = (
currentModel: string = DEFAULT_GEMINI_MODEL,
fallbackModel: string = DEFAULT_GEMINI_FLASH_MODEL,
) =>
`\nYou have reached your daily ${currentModel} quota limit. You will be switched to the ${fallbackModel} model for the rest of this session. To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist, or use /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
`\nYou have reached your daily ${currentModel} quota limit. You will be switched to the ${fallbackModel} model for the rest of this session. To increase your limits, upgrade to get higher limits at https://goo.gle/set-up-gemini-code-assist, or use /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
const getRateLimitErrorMessageGoogleGenericQuotaFree = () =>
`\nYou have reached your daily quota limit. To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist, or use /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
`\nYou have reached your daily quota limit. To increase your limits, upgrade to get higher limits at https://goo.gle/set-up-gemini-code-assist, or use /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
// Legacy/Standard Tier message functions
const getRateLimitErrorMessageGooglePaid = (

View File

@@ -20,7 +20,7 @@ describe('reportError', () => {
beforeEach(async () => {
// Create a temporary directory for logs
testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'gemini-report-test-'));
testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'qwen-report-test-'));
vi.resetAllMocks();
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
vi.spyOn(Date.prototype, 'toISOString').mockReturnValue(MOCK_TIMESTAMP);
@@ -33,7 +33,7 @@ describe('reportError', () => {
});
const getExpectedReportPath = (type: string) =>
path.join(testDir, `gemini-client-error-${type}-${MOCK_TIMESTAMP}.json`);
path.join(testDir, `qwen-client-error-${type}-${MOCK_TIMESTAMP}.json`);
it('should generate a report and log the path', async () => {
const error = new Error('Test error');

View File

@@ -30,7 +30,7 @@ export async function reportError(
reportingDir = os.tmpdir(), // for testing
): Promise<void> {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const reportFileName = `gemini-client-error-${type}-${timestamp}.json`;
const reportFileName = `qwen-client-error-${type}-${timestamp}.json`;
const reportPath = path.join(reportingDir, reportFileName);
let errorToReport: { message: string; stack?: string };

View File

@@ -59,6 +59,16 @@ export class FatalTurnLimitedError extends FatalError {
super(message, 53);
}
}
export class FatalToolExecutionError extends FatalError {
constructor(message: string) {
super(message, 54);
}
}
export class FatalCancellationError extends FatalError {
constructor(message: string) {
super(message, 130); // Standard exit code for SIGINT
}
}
export class ForbiddenError extends Error {}
export class UnauthorizedError extends Error {}

View File

@@ -15,25 +15,29 @@ import {
} from 'vitest';
import * as actualNodeFs from 'node:fs'; // For setup/teardown
import fs from 'node:fs';
import fsPromises from 'node:fs/promises';
import path from 'node:path';
import os from 'node:os';
import mime from 'mime-types';
import mime from 'mime/lite';
import {
isWithinRoot,
isBinaryFile,
detectFileType,
processSingleFileContent,
detectBOM,
readFileWithEncoding,
fileExists,
} from './fileUtils.js';
import { StandardFileSystemService } from '../services/fileSystemService.js';
vi.mock('mime-types', () => ({
default: { lookup: vi.fn() },
lookup: vi.fn(),
vi.mock('mime/lite', () => ({
default: { getType: vi.fn() },
getType: vi.fn(),
}));
const mockMimeLookup = mime.lookup as Mock;
const mockMimeGetType = mime.getType as Mock;
describe('fileUtils', () => {
let tempRootDir: string;
@@ -47,7 +51,7 @@ describe('fileUtils', () => {
let directoryPath: string;
beforeEach(() => {
vi.resetAllMocks(); // Reset all mocks, including mime.lookup
vi.resetAllMocks(); // Reset all mocks, including mime.getType
tempRootDir = actualNodeFs.mkdtempSync(
path.join(os.tmpdir(), 'fileUtils-test-'),
@@ -130,6 +134,25 @@ describe('fileUtils', () => {
});
});
describe('fileExists', () => {
it('should return true if the file exists', async () => {
const testFile = path.join(tempRootDir, 'exists.txt');
actualNodeFs.writeFileSync(testFile, 'content');
await expect(fileExists(testFile)).resolves.toBe(true);
});
it('should return false if the file does not exist', async () => {
const testFile = path.join(tempRootDir, 'does-not-exist.txt');
await expect(fileExists(testFile)).resolves.toBe(false);
});
it('should return true for a directory that exists', async () => {
const testDir = path.join(tempRootDir, 'exists-dir');
actualNodeFs.mkdirSync(testDir);
await expect(fileExists(testDir)).resolves.toBe(true);
});
});
describe('isBinaryFile', () => {
let filePathForBinaryTest: string;
@@ -181,6 +204,367 @@ describe('fileUtils', () => {
});
});
describe('BOM detection and encoding', () => {
let testDir: string;
beforeEach(async () => {
testDir = await fsPromises.mkdtemp(
path.join(
await fsPromises.realpath(os.tmpdir()),
'fileUtils-bom-test-',
),
);
});
afterEach(async () => {
if (testDir) {
await fsPromises.rm(testDir, { recursive: true, force: true });
}
});
describe('detectBOM', () => {
it('should detect UTF-8 BOM', () => {
const buf = Buffer.from([
0xef, 0xbb, 0xbf, 0x48, 0x65, 0x6c, 0x6c, 0x6f,
]);
const result = detectBOM(buf);
expect(result).toEqual({ encoding: 'utf8', bomLength: 3 });
});
it('should detect UTF-16 LE BOM', () => {
const buf = Buffer.from([0xff, 0xfe, 0x48, 0x00, 0x65, 0x00]);
const result = detectBOM(buf);
expect(result).toEqual({ encoding: 'utf16le', bomLength: 2 });
});
it('should detect UTF-16 BE BOM', () => {
const buf = Buffer.from([0xfe, 0xff, 0x00, 0x48, 0x00, 0x65]);
const result = detectBOM(buf);
expect(result).toEqual({ encoding: 'utf16be', bomLength: 2 });
});
it('should detect UTF-32 LE BOM', () => {
const buf = Buffer.from([
0xff, 0xfe, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
]);
const result = detectBOM(buf);
expect(result).toEqual({ encoding: 'utf32le', bomLength: 4 });
});
it('should detect UTF-32 BE BOM', () => {
const buf = Buffer.from([
0x00, 0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x48,
]);
const result = detectBOM(buf);
expect(result).toEqual({ encoding: 'utf32be', bomLength: 4 });
});
it('should return null for no BOM', () => {
const buf = Buffer.from([0x48, 0x65, 0x6c, 0x6c, 0x6f]);
const result = detectBOM(buf);
expect(result).toBeNull();
});
it('should return null for empty buffer', () => {
const buf = Buffer.alloc(0);
const result = detectBOM(buf);
expect(result).toBeNull();
});
it('should return null for partial BOM', () => {
const buf = Buffer.from([0xef, 0xbb]); // Incomplete UTF-8 BOM
const result = detectBOM(buf);
expect(result).toBeNull();
});
});
describe('readFileWithEncoding', () => {
it('should read UTF-8 BOM file correctly', async () => {
const content = 'Hello, 世界! 🌍';
const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]);
const utf8Content = Buffer.from(content, 'utf8');
const fullBuffer = Buffer.concat([utf8Bom, utf8Content]);
const filePath = path.join(testDir, 'utf8-bom.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should read UTF-16 LE BOM file correctly', async () => {
const content = 'Hello, 世界! 🌍';
const utf16leBom = Buffer.from([0xff, 0xfe]);
const utf16leContent = Buffer.from(content, 'utf16le');
const fullBuffer = Buffer.concat([utf16leBom, utf16leContent]);
const filePath = path.join(testDir, 'utf16le-bom.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should read UTF-16 BE BOM file correctly', async () => {
const content = 'Hello, 世界! 🌍';
// Manually encode UTF-16 BE: each char as big-endian 16-bit
const utf16beBom = Buffer.from([0xfe, 0xff]);
const chars = Array.from(content);
const utf16beBytes: number[] = [];
for (const char of chars) {
const code = char.codePointAt(0)!;
if (code > 0xffff) {
// Surrogate pair for emoji
const surrogate1 = 0xd800 + ((code - 0x10000) >> 10);
const surrogate2 = 0xdc00 + ((code - 0x10000) & 0x3ff);
utf16beBytes.push((surrogate1 >> 8) & 0xff, surrogate1 & 0xff);
utf16beBytes.push((surrogate2 >> 8) & 0xff, surrogate2 & 0xff);
} else {
utf16beBytes.push((code >> 8) & 0xff, code & 0xff);
}
}
const utf16beContent = Buffer.from(utf16beBytes);
const fullBuffer = Buffer.concat([utf16beBom, utf16beContent]);
const filePath = path.join(testDir, 'utf16be-bom.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should read UTF-32 LE BOM file correctly', async () => {
const content = 'Hello, 世界! 🌍';
const utf32leBom = Buffer.from([0xff, 0xfe, 0x00, 0x00]);
const utf32leBytes: number[] = [];
for (const char of Array.from(content)) {
const code = char.codePointAt(0)!;
utf32leBytes.push(
code & 0xff,
(code >> 8) & 0xff,
(code >> 16) & 0xff,
(code >> 24) & 0xff,
);
}
const utf32leContent = Buffer.from(utf32leBytes);
const fullBuffer = Buffer.concat([utf32leBom, utf32leContent]);
const filePath = path.join(testDir, 'utf32le-bom.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should read UTF-32 BE BOM file correctly', async () => {
const content = 'Hello, 世界! 🌍';
const utf32beBom = Buffer.from([0x00, 0x00, 0xfe, 0xff]);
const utf32beBytes: number[] = [];
for (const char of Array.from(content)) {
const code = char.codePointAt(0)!;
utf32beBytes.push(
(code >> 24) & 0xff,
(code >> 16) & 0xff,
(code >> 8) & 0xff,
code & 0xff,
);
}
const utf32beContent = Buffer.from(utf32beBytes);
const fullBuffer = Buffer.concat([utf32beBom, utf32beContent]);
const filePath = path.join(testDir, 'utf32be-bom.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should read file without BOM as UTF-8', async () => {
const content = 'Hello, 世界!';
const filePath = path.join(testDir, 'no-bom.txt');
await fsPromises.writeFile(filePath, content, 'utf8');
const result = await readFileWithEncoding(filePath);
expect(result).toBe(content);
});
it('should handle empty file', async () => {
const filePath = path.join(testDir, 'empty.txt');
await fsPromises.writeFile(filePath, '');
const result = await readFileWithEncoding(filePath);
expect(result).toBe('');
});
});
describe('isBinaryFile with BOM awareness', () => {
it('should not treat UTF-8 BOM file as binary', async () => {
const content = 'Hello, world!';
const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]);
const utf8Content = Buffer.from(content, 'utf8');
const fullBuffer = Buffer.concat([utf8Bom, utf8Content]);
const filePath = path.join(testDir, 'utf8-bom-test.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await isBinaryFile(filePath);
expect(result).toBe(false);
});
it('should not treat UTF-16 LE BOM file as binary', async () => {
const content = 'Hello, world!';
const utf16leBom = Buffer.from([0xff, 0xfe]);
const utf16leContent = Buffer.from(content, 'utf16le');
const fullBuffer = Buffer.concat([utf16leBom, utf16leContent]);
const filePath = path.join(testDir, 'utf16le-bom-test.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await isBinaryFile(filePath);
expect(result).toBe(false);
});
it('should not treat UTF-16 BE BOM file as binary', async () => {
const utf16beBom = Buffer.from([0xfe, 0xff]);
// Simple ASCII in UTF-16 BE
const utf16beContent = Buffer.from([
0x00,
0x48, // H
0x00,
0x65, // e
0x00,
0x6c, // l
0x00,
0x6c, // l
0x00,
0x6f, // o
0x00,
0x2c, // ,
0x00,
0x20, // space
0x00,
0x77, // w
0x00,
0x6f, // o
0x00,
0x72, // r
0x00,
0x6c, // l
0x00,
0x64, // d
0x00,
0x21, // !
]);
const fullBuffer = Buffer.concat([utf16beBom, utf16beContent]);
const filePath = path.join(testDir, 'utf16be-bom-test.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await isBinaryFile(filePath);
expect(result).toBe(false);
});
it('should not treat UTF-32 LE BOM file as binary', async () => {
const utf32leBom = Buffer.from([0xff, 0xfe, 0x00, 0x00]);
const utf32leContent = Buffer.from([
0x48,
0x00,
0x00,
0x00, // H
0x65,
0x00,
0x00,
0x00, // e
0x6c,
0x00,
0x00,
0x00, // l
0x6c,
0x00,
0x00,
0x00, // l
0x6f,
0x00,
0x00,
0x00, // o
]);
const fullBuffer = Buffer.concat([utf32leBom, utf32leContent]);
const filePath = path.join(testDir, 'utf32le-bom-test.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await isBinaryFile(filePath);
expect(result).toBe(false);
});
it('should not treat UTF-32 BE BOM file as binary', async () => {
const utf32beBom = Buffer.from([0x00, 0x00, 0xfe, 0xff]);
const utf32beContent = Buffer.from([
0x00,
0x00,
0x00,
0x48, // H
0x00,
0x00,
0x00,
0x65, // e
0x00,
0x00,
0x00,
0x6c, // l
0x00,
0x00,
0x00,
0x6c, // l
0x00,
0x00,
0x00,
0x6f, // o
]);
const fullBuffer = Buffer.concat([utf32beBom, utf32beContent]);
const filePath = path.join(testDir, 'utf32be-bom-test.txt');
await fsPromises.writeFile(filePath, fullBuffer);
const result = await isBinaryFile(filePath);
expect(result).toBe(false);
});
it('should still treat actual binary file as binary', async () => {
// PNG header + some binary data with null bytes
const pngHeader = Buffer.from([
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
]);
const binaryData = Buffer.from([
0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52,
]); // IHDR chunk with nulls
const fullContent = Buffer.concat([pngHeader, binaryData]);
const filePath = path.join(testDir, 'test.png');
await fsPromises.writeFile(filePath, fullContent);
const result = await isBinaryFile(filePath);
expect(result).toBe(true);
});
it('should treat file with null bytes (no BOM) as binary', async () => {
const content = Buffer.from([
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x00, 0x77, 0x6f, 0x72, 0x6c, 0x64,
]);
const filePath = path.join(testDir, 'null-bytes.bin');
await fsPromises.writeFile(filePath, content);
const result = await isBinaryFile(filePath);
expect(result).toBe(true);
});
});
});
describe('detectFileType', () => {
let filePathForDetectTest: string;
@@ -207,12 +591,12 @@ describe('fileUtils', () => {
});
it('should detect image type by extension (png)', async () => {
mockMimeLookup.mockReturnValueOnce('image/png');
mockMimeGetType.mockReturnValueOnce('image/png');
expect(await detectFileType('file.png')).toBe('image');
});
it('should detect image type by extension (jpeg)', async () => {
mockMimeLookup.mockReturnValueOnce('image/jpeg');
mockMimeGetType.mockReturnValueOnce('image/jpeg');
expect(await detectFileType('file.jpg')).toBe('image');
});
@@ -222,31 +606,31 @@ describe('fileUtils', () => {
});
it('should detect pdf type by extension', async () => {
mockMimeLookup.mockReturnValueOnce('application/pdf');
mockMimeGetType.mockReturnValueOnce('application/pdf');
expect(await detectFileType('file.pdf')).toBe('pdf');
});
it('should detect audio type by extension', async () => {
mockMimeLookup.mockReturnValueOnce('audio/mpeg');
mockMimeGetType.mockReturnValueOnce('audio/mpeg');
expect(await detectFileType('song.mp3')).toBe('audio');
});
it('should detect video type by extension', async () => {
mockMimeLookup.mockReturnValueOnce('video/mp4');
mockMimeGetType.mockReturnValueOnce('video/mp4');
expect(await detectFileType('movie.mp4')).toBe('video');
});
it('should detect known binary extensions as binary (e.g. .zip)', async () => {
mockMimeLookup.mockReturnValueOnce('application/zip');
mockMimeGetType.mockReturnValueOnce('application/zip');
expect(await detectFileType('archive.zip')).toBe('binary');
});
it('should detect known binary extensions as binary (e.g. .exe)', async () => {
mockMimeLookup.mockReturnValueOnce('application/octet-stream'); // Common for .exe
mockMimeGetType.mockReturnValueOnce('application/octet-stream'); // Common for .exe
expect(await detectFileType('app.exe')).toBe('binary');
});
it('should use isBinaryFile for unknown extensions and detect as binary', async () => {
mockMimeLookup.mockReturnValueOnce(false); // Unknown mime type
mockMimeGetType.mockReturnValueOnce(false); // Unknown mime type
// Create a file that isBinaryFile will identify as binary
const binaryContent = Buffer.from([
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a,
@@ -256,7 +640,7 @@ describe('fileUtils', () => {
});
it('should default to text if mime type is unknown and content is not binary', async () => {
mockMimeLookup.mockReturnValueOnce(false); // Unknown mime type
mockMimeGetType.mockReturnValueOnce(false); // Unknown mime type
// filePathForDetectTest is already a text file by default from beforeEach
expect(await detectFileType(filePathForDetectTest)).toBe('text');
});
@@ -314,7 +698,7 @@ describe('fileUtils', () => {
it('should handle read errors for image/pdf files', async () => {
actualNodeFs.writeFileSync(testImageFilePath, 'content'); // File must exist
mockMimeLookup.mockReturnValue('image/png');
mockMimeGetType.mockReturnValue('image/png');
const readError = new Error('Simulated image read error');
vi.spyOn(fsPromises, 'readFile').mockRejectedValueOnce(readError);
@@ -330,7 +714,7 @@ describe('fileUtils', () => {
it('should process an image file', async () => {
const fakePngData = Buffer.from('fake png data');
actualNodeFs.writeFileSync(testImageFilePath, fakePngData);
mockMimeLookup.mockReturnValue('image/png');
mockMimeGetType.mockReturnValue('image/png');
const result = await processSingleFileContent(
testImageFilePath,
tempRootDir,
@@ -352,7 +736,7 @@ describe('fileUtils', () => {
it('should process a PDF file', async () => {
const fakePdfData = Buffer.from('fake pdf data');
actualNodeFs.writeFileSync(testPdfFilePath, fakePdfData);
mockMimeLookup.mockReturnValue('application/pdf');
mockMimeGetType.mockReturnValue('application/pdf');
const result = await processSingleFileContent(
testPdfFilePath,
tempRootDir,
@@ -380,7 +764,7 @@ describe('fileUtils', () => {
const testSvgFilePath = path.join(tempRootDir, 'test.svg');
actualNodeFs.writeFileSync(testSvgFilePath, svgContent, 'utf-8');
mockMimeLookup.mockReturnValue('image/svg+xml');
mockMimeGetType.mockReturnValue('image/svg+xml');
const result = await processSingleFileContent(
testSvgFilePath,
@@ -397,7 +781,7 @@ describe('fileUtils', () => {
testBinaryFilePath,
Buffer.from([0x00, 0x01, 0x02]),
);
mockMimeLookup.mockReturnValueOnce('application/octet-stream');
mockMimeGetType.mockReturnValueOnce('application/octet-stream');
// isBinaryFile will operate on the real file.
const result = await processSingleFileContent(
@@ -570,22 +954,30 @@ describe('fileUtils', () => {
});
it('should return an error if the file size exceeds 20MB', async () => {
// Create a file just over 20MB
const twentyOneMB = 21 * 1024 * 1024;
const buffer = Buffer.alloc(twentyOneMB, 0x61); // Fill with 'a'
actualNodeFs.writeFileSync(testTextFilePath, buffer);
// Create a small test file
actualNodeFs.writeFileSync(testTextFilePath, 'test content');
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
);
// Spy on fs.promises.stat to return a large file size
const statSpy = vi.spyOn(fs.promises, 'stat').mockResolvedValueOnce({
size: 21 * 1024 * 1024,
isDirectory: () => false,
} as fs.Stats);
expect(result.error).toContain('File size exceeds the 20MB limit');
expect(result.returnDisplay).toContain(
'File size exceeds the 20MB limit',
);
expect(result.llmContent).toContain('File size exceeds the 20MB limit');
try {
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
new StandardFileSystemService(),
);
expect(result.error).toContain('File size exceeds the 20MB limit');
expect(result.returnDisplay).toContain(
'File size exceeds the 20MB limit',
);
expect(result.llmContent).toContain('File size exceeds the 20MB limit');
} finally {
statSpy.mockRestore();
}
});
});
});

View File

@@ -5,9 +5,10 @@
*/
import fs from 'node:fs';
import fsPromises from 'node:fs/promises';
import path from 'node:path';
import type { PartUnion } from '@google/genai';
import mime from 'mime-types';
import mime from 'mime/lite';
import type { FileSystemService } from '../services/fileSystemService.js';
import { ToolErrorType } from '../tools/tool-error.js';
import { BINARY_EXTENSIONS } from './ignorePatterns.js';
@@ -19,13 +20,145 @@ const MAX_LINE_LENGTH_TEXT_FILE = 2000;
// Default values for encoding and separator format
export const DEFAULT_ENCODING: BufferEncoding = 'utf-8';
// --- Unicode BOM detection & decoding helpers --------------------------------
type UnicodeEncoding = 'utf8' | 'utf16le' | 'utf16be' | 'utf32le' | 'utf32be';
interface BOMInfo {
encoding: UnicodeEncoding;
bomLength: number;
}
/**
* Detect a Unicode BOM (Byte Order Mark) if present.
* Reads up to the first 4 bytes and returns encoding + BOM length, else null.
*/
export function detectBOM(buf: Buffer): BOMInfo | null {
if (buf.length >= 4) {
// UTF-32 LE: FF FE 00 00
if (
buf[0] === 0xff &&
buf[1] === 0xfe &&
buf[2] === 0x00 &&
buf[3] === 0x00
) {
return { encoding: 'utf32le', bomLength: 4 };
}
// UTF-32 BE: 00 00 FE FF
if (
buf[0] === 0x00 &&
buf[1] === 0x00 &&
buf[2] === 0xfe &&
buf[3] === 0xff
) {
return { encoding: 'utf32be', bomLength: 4 };
}
}
if (buf.length >= 3) {
// UTF-8: EF BB BF
if (buf[0] === 0xef && buf[1] === 0xbb && buf[2] === 0xbf) {
return { encoding: 'utf8', bomLength: 3 };
}
}
if (buf.length >= 2) {
// UTF-16 LE: FF FE (but not UTF-32 LE already matched above)
if (
buf[0] === 0xff &&
buf[1] === 0xfe &&
(buf.length < 4 || buf[2] !== 0x00 || buf[3] !== 0x00)
) {
return { encoding: 'utf16le', bomLength: 2 };
}
// UTF-16 BE: FE FF
if (buf[0] === 0xfe && buf[1] === 0xff) {
return { encoding: 'utf16be', bomLength: 2 };
}
}
return null;
}
/**
* Convert a UTF-16 BE buffer to a JS string by swapping to LE then using Node's decoder.
* (Node has 'utf16le' but not 'utf16be'.)
*/
function decodeUTF16BE(buf: Buffer): string {
if (buf.length === 0) return '';
const swapped = Buffer.from(buf); // swap16 mutates in place, so copy
swapped.swap16();
return swapped.toString('utf16le');
}
/**
* Decode a UTF-32 buffer (LE or BE) into a JS string.
* Invalid code points are replaced with U+FFFD, partial trailing bytes are ignored.
*/
function decodeUTF32(buf: Buffer, littleEndian: boolean): string {
if (buf.length < 4) return '';
const usable = buf.length - (buf.length % 4);
let out = '';
for (let i = 0; i < usable; i += 4) {
const cp = littleEndian
? (buf[i] |
(buf[i + 1] << 8) |
(buf[i + 2] << 16) |
(buf[i + 3] << 24)) >>>
0
: (buf[i + 3] |
(buf[i + 2] << 8) |
(buf[i + 1] << 16) |
(buf[i] << 24)) >>>
0;
// Valid planes: 0x0000..0x10FFFF excluding surrogates
if (cp <= 0x10ffff && !(cp >= 0xd800 && cp <= 0xdfff)) {
out += String.fromCodePoint(cp);
} else {
out += '\uFFFD';
}
}
return out;
}
/**
* Read a file as text, honoring BOM encodings (UTF8/16/32) and stripping the BOM.
* Falls back to utf8 when no BOM is present.
*/
export async function readFileWithEncoding(filePath: string): Promise<string> {
// Read the file once; detect BOM and decode from the single buffer.
const full = await fs.promises.readFile(filePath);
if (full.length === 0) return '';
const bom = detectBOM(full);
if (!bom) {
// No BOM → treat as UTF8
return full.toString('utf8');
}
// Strip BOM and decode per encoding
const content = full.subarray(bom.bomLength);
switch (bom.encoding) {
case 'utf8':
return content.toString('utf8');
case 'utf16le':
return content.toString('utf16le');
case 'utf16be':
return decodeUTF16BE(content);
case 'utf32le':
return decodeUTF32(content, true);
case 'utf32be':
return decodeUTF32(content, false);
default:
// Defensive fallback; should be unreachable
return content.toString('utf8');
}
}
/**
* Looks up the specific MIME type for a file path.
* @param filePath Path to the file.
* @returns The specific MIME type string (e.g., 'text/python', 'application/javascript') or undefined if not found or ambiguous.
*/
export function getSpecificMimeType(filePath: string): string | undefined {
const lookedUpMime = mime.lookup(filePath);
const lookedUpMime = mime.getType(filePath);
return typeof lookedUpMime === 'string' ? lookedUpMime : undefined;
}
@@ -57,59 +190,52 @@ export function isWithinRoot(
}
/**
* Determines if a file is likely binary based on content sampling.
* @param filePath Path to the file.
* @returns Promise that resolves to true if the file appears to be binary.
* Heuristic: determine if a file is likely binary.
* Now BOM-aware: if a Unicode BOM is detected, we treat it as text.
* For non-BOM files, retain the existing null-byte and non-printable ratio checks.
*/
export async function isBinaryFile(filePath: string): Promise<boolean> {
let fileHandle: fs.promises.FileHandle | undefined;
let fh: fs.promises.FileHandle | null = null;
try {
fileHandle = await fs.promises.open(filePath, 'r');
// Read up to 4KB or file size, whichever is smaller
const stats = await fileHandle.stat();
fh = await fs.promises.open(filePath, 'r');
const stats = await fh.stat();
const fileSize = stats.size;
if (fileSize === 0) {
// Empty file is not considered binary for content checking
return false;
}
const bufferSize = Math.min(4096, fileSize);
const buffer = Buffer.alloc(bufferSize);
const result = await fileHandle.read(buffer, 0, buffer.length, 0);
const bytesRead = result.bytesRead;
if (fileSize === 0) return false; // empty is not binary
// Sample up to 4KB from the head (previous behavior)
const sampleSize = Math.min(4096, fileSize);
const buf = Buffer.alloc(sampleSize);
const { bytesRead } = await fh.read(buf, 0, sampleSize, 0);
if (bytesRead === 0) return false;
// BOM → text (avoid false positives for UTF16/32 with nulls)
const bom = detectBOM(buf.subarray(0, Math.min(4, bytesRead)));
if (bom) return false;
let nonPrintableCount = 0;
for (let i = 0; i < bytesRead; i++) {
if (buffer[i] === 0) return true; // Null byte is a strong indicator
if (buffer[i] < 9 || (buffer[i] > 13 && buffer[i] < 32)) {
if (buf[i] === 0) return true; // strong indicator of binary when no BOM
if (buf[i] < 9 || (buf[i] > 13 && buf[i] < 32)) {
nonPrintableCount++;
}
}
// If >30% non-printable characters, consider it binary
return nonPrintableCount / bytesRead > 0.3;
} catch (error) {
// Log error for debugging while maintaining existing behavior
console.warn(
`Failed to check if file is binary: ${filePath}`,
error instanceof Error ? error.message : String(error),
);
// If any error occurs (e.g. file not found, permissions),
// treat as not binary here; let higher-level functions handle existence/access errors.
return false;
} finally {
// Safely close the file handle if it was successfully opened
if (fileHandle) {
if (fh) {
try {
await fileHandle.close();
await fh.close();
} catch (closeError) {
// Log close errors for debugging while continuing with cleanup
console.warn(
`Failed to close file handle for: ${filePath}`,
closeError instanceof Error ? closeError.message : String(closeError),
);
// The important thing is that we attempted to clean up
}
}
}
@@ -136,7 +262,7 @@ export async function detectFileType(
return 'svg';
}
const lookedUpMimeType = mime.lookup(filePath); // Returns false if not found, or the mime type string
const lookedUpMimeType = mime.getType(filePath); // Returns null if not found, or the mime type string
if (lookedUpMimeType) {
if (lookedUpMimeType.startsWith('image/')) {
return 'image';
@@ -244,14 +370,15 @@ export async function processSingleFileContent(
returnDisplay: `Skipped large SVG file (>1MB): ${relativePathForDisplay}`,
};
}
const content = await fileSystemService.readTextFile(filePath);
const content = await readFileWithEncoding(filePath);
return {
llmContent: content,
returnDisplay: `Read SVG as text: ${relativePathForDisplay}`,
};
}
case 'text': {
const content = await fileSystemService.readTextFile(filePath);
// Use BOM-aware reader to avoid leaving a BOM character in content and to support UTF-16/32 transparently
const content = await readFileWithEncoding(filePath);
const lines = content.split('\n');
const originalLineCount = lines.length;
@@ -311,7 +438,7 @@ export async function processSingleFileContent(
llmContent: {
inlineData: {
data: base64Data,
mimeType: mime.lookup(filePath) || 'application/octet-stream',
mimeType: mime.getType(filePath) || 'application/octet-stream',
},
},
returnDisplay: `Read ${fileType} file: ${relativePathForDisplay}`,
@@ -340,3 +467,12 @@ export async function processSingleFileContent(
};
}
}
export async function fileExists(filePath: string): Promise<boolean> {
try {
await fsPromises.access(filePath, fs.constants.F_OK);
return true;
} catch (_: unknown) {
return false;
}
}

View File

@@ -32,7 +32,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
@@ -66,7 +66,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
@@ -98,7 +98,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: ['logs'],
});
@@ -130,7 +130,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -165,7 +165,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -203,7 +203,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -237,7 +237,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -269,7 +269,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
@@ -294,7 +294,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -320,7 +320,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
@@ -352,7 +352,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const options = {
@@ -390,7 +390,7 @@ describe('crawler', () => {
loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const getOptions = (ignore: Ignore) => ({
@@ -424,7 +424,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const options = {
@@ -455,7 +455,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const getOptions = (maxDepth?: number) => ({
@@ -507,7 +507,7 @@ describe('crawler', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
return crawl({

View File

@@ -27,7 +27,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -53,7 +53,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -81,7 +81,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: ['logs'],
cache: false,
cacheTtl: 0,
@@ -110,7 +110,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -143,7 +143,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -167,7 +167,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -202,7 +202,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -232,7 +232,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -262,7 +262,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -289,7 +289,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -315,7 +315,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -341,7 +341,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -367,7 +367,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -391,7 +391,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -422,7 +422,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -444,7 +444,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -467,7 +467,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false, // Explicitly disable .gitignore to isolate this rule
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -491,7 +491,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -528,7 +528,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: true, // Enable caching for this test
cacheTtl: 0,
@@ -568,7 +568,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -612,7 +612,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: true, // Ensure caching is enabled
cacheTtl: 10000,
@@ -650,7 +650,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -680,7 +680,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -705,7 +705,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -730,7 +730,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,
@@ -753,7 +753,7 @@ describe('FileSearch', () => {
const fileSearch = FileSearchFactory.create({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
cache: false,
cacheTtl: 0,

View File

@@ -18,7 +18,7 @@ export interface FileSearchOptions {
projectRoot: string;
ignoreDirs: string[];
useGitignore: boolean;
useGeminiignore: boolean;
useQwenignore: boolean;
cache: boolean;
cacheTtl: number;
enableRecursiveFileSearch: boolean;

View File

@@ -81,7 +81,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const fileFilter = ignore.getFileFilter();
@@ -96,7 +96,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
const fileFilter = ignore.getFileFilter();
@@ -112,7 +112,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
const fileFilter = ignore.getFileFilter();
@@ -126,7 +126,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: ['logs/'],
});
const dirFilter = ignore.getDirectoryFilter();
@@ -139,7 +139,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: true,
useGeminiignore: true,
useQwenignore: true,
ignoreDirs: [],
});
const fileFilter = ignore.getFileFilter();
@@ -151,7 +151,7 @@ describe('loadIgnoreRules', () => {
const ignore = loadIgnoreRules({
projectRoot: tmpDir,
useGitignore: false,
useGeminiignore: false,
useQwenignore: false,
ignoreDirs: [],
});
const dirFilter = ignore.getDirectoryFilter();

View File

@@ -14,7 +14,7 @@ const hasFileExtension = picomatch('**/*[*.]*');
export interface LoadIgnoreRulesOptions {
projectRoot: string;
useGitignore: boolean;
useGeminiignore: boolean;
useQwenignore: boolean;
ignoreDirs: string[];
}
@@ -27,10 +27,10 @@ export function loadIgnoreRules(options: LoadIgnoreRulesOptions): Ignore {
}
}
if (options.useGeminiignore) {
const geminiignorePath = path.join(options.projectRoot, '.qwenignore');
if (fs.existsSync(geminiignorePath)) {
ignorer.add(fs.readFileSync(geminiignorePath, 'utf8'));
if (options.useQwenignore) {
const qwenignorePath = path.join(options.projectRoot, '.qwenignore');
if (fs.existsSync(qwenignorePath)) {
ignorer.add(fs.readFileSync(qwenignorePath, 'utf8'));
}
}

View File

@@ -17,12 +17,13 @@ import {
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { retryWithBackoff } from './retry.js';
import { AuthType } from '../core/contentGenerator.js';
// Import the new types (Assuming this test file is in packages/core/src/utils/)
import type { FallbackModelHandler } from '../fallback/types.js';
vi.mock('node:fs');
vi.mock('node:fs');
describe('Flash Fallback Integration', () => {
// Update the description to reflect that this tests the retry utility's integration
describe('Retry Utility Fallback Integration', () => {
let config: Config;
beforeEach(() => {
@@ -43,25 +44,28 @@ describe('Flash Fallback Integration', () => {
resetRequestCounter();
});
it('should automatically accept fallback', async () => {
// Set up a minimal flash fallback handler for testing
const flashFallbackHandler = async (): Promise<boolean> => true;
// This test validates the Config's ability to store and execute the handler contract.
it('should execute the injected FallbackHandler contract correctly', async () => {
// Set up a minimal handler for testing, ensuring it matches the new type.
const fallbackHandler: FallbackModelHandler = async () => 'retry';
config.setFlashFallbackHandler(flashFallbackHandler);
// Use the generalized setter
config.setFallbackModelHandler(fallbackHandler);
// Call the handler directly to test
const result = await config.flashFallbackHandler!(
// Call the handler directly via the config property
const result = await config.fallbackModelHandler!(
'gemini-2.5-pro',
DEFAULT_GEMINI_FLASH_MODEL,
);
// Verify it automatically accepts
expect(result).toBe(true);
// Verify it returns the correct intent
expect(result).toBe('retry');
});
it('should trigger fallback after 2 consecutive 429 errors for OAuth users', async () => {
// This test validates the retry utility's logic for triggering the callback.
it('should trigger onPersistent429 after 2 consecutive 429 errors for OAuth users', async () => {
let fallbackCalled = false;
let fallbackModel = '';
// Removed fallbackModel variable as it's no longer relevant here.
// Mock function that simulates exactly 2 429 errors, then succeeds after fallback
const mockApiCall = vi
@@ -70,11 +74,11 @@ describe('Flash Fallback Integration', () => {
.mockRejectedValueOnce(createSimulated429Error())
.mockResolvedValueOnce('success after fallback');
// Mock fallback handler
const mockFallbackHandler = vi.fn(async (_authType?: string) => {
// Mock the onPersistent429 callback (this is what client.ts/geminiChat.ts provides)
const mockPersistent429Callback = vi.fn(async (_authType?: string) => {
fallbackCalled = true;
fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
return fallbackModel;
// Return true to signal retryWithBackoff to reset attempts and continue.
return true;
});
// Test with OAuth personal auth type, with maxAttempts = 2 to ensure fallback triggers
@@ -82,18 +86,17 @@ describe('Flash Fallback Integration', () => {
maxAttempts: 2,
initialDelayMs: 1,
maxDelayMs: 10,
shouldRetry: (error: Error) => {
shouldRetryOnError: (error: Error) => {
const status = (error as Error & { status?: number }).status;
return status === 429;
},
onPersistent429: mockFallbackHandler,
onPersistent429: mockPersistent429Callback,
authType: AuthType.LOGIN_WITH_GOOGLE,
});
// Verify fallback was triggered
// Verify fallback mechanism was triggered
expect(fallbackCalled).toBe(true);
expect(fallbackModel).toBe(DEFAULT_GEMINI_FLASH_MODEL);
expect(mockFallbackHandler).toHaveBeenCalledWith(
expect(mockPersistent429Callback).toHaveBeenCalledWith(
AuthType.LOGIN_WITH_GOOGLE,
expect.any(Error),
);
@@ -102,16 +105,16 @@ describe('Flash Fallback Integration', () => {
expect(mockApiCall).toHaveBeenCalledTimes(3);
});
it('should not trigger fallback for API key users', async () => {
it('should not trigger onPersistent429 for API key users', async () => {
let fallbackCalled = false;
// Mock function that simulates 429 errors
const mockApiCall = vi.fn().mockRejectedValue(createSimulated429Error());
// Mock fallback handler
const mockFallbackHandler = vi.fn(async () => {
// Mock the callback
const mockPersistent429Callback = vi.fn(async () => {
fallbackCalled = true;
return DEFAULT_GEMINI_FLASH_MODEL;
return true;
});
// Test with API key auth type - should not trigger fallback
@@ -120,11 +123,11 @@ describe('Flash Fallback Integration', () => {
maxAttempts: 5,
initialDelayMs: 10,
maxDelayMs: 100,
shouldRetry: (error: Error) => {
shouldRetryOnError: (error: Error) => {
const status = (error as Error & { status?: number }).status;
return status === 429;
},
onPersistent429: mockFallbackHandler,
onPersistent429: mockPersistent429Callback,
authType: AuthType.USE_GEMINI, // API key auth type
});
} catch (error) {
@@ -134,10 +137,11 @@ describe('Flash Fallback Integration', () => {
// Verify fallback was NOT triggered for API key users
expect(fallbackCalled).toBe(false);
expect(mockFallbackHandler).not.toHaveBeenCalled();
expect(mockPersistent429Callback).not.toHaveBeenCalled();
});
it('should properly disable simulation state after fallback', () => {
// This test validates the test utilities themselves.
it('should properly disable simulation state after fallback (Test Utility)', () => {
// Enable simulation
setSimulate429(true);

View File

@@ -282,7 +282,7 @@ ${testRootDir}${path.sep}
const structure = await getFolderStructure(testRootDir, {
fileService,
fileFilteringOptions: {
respectGeminiIgnore: false,
respectQwenIgnore: false,
respectGitIgnore: false,
},
});
@@ -292,8 +292,8 @@ ${testRootDir}${path.sep}
});
});
describe('with geminiignore', () => {
it('should ignore geminiignore files by default', async () => {
describe('with qwenignore', () => {
it('should ignore qwenignore files by default', async () => {
await fsPromises.writeFile(
nodePath.join(testRootDir, '.qwenignore'),
'ignored.txt\nnode_modules/\n.gemini/\n!/.gemini/config.yaml',
@@ -313,7 +313,7 @@ ${testRootDir}${path.sep}
expect(structure).not.toContain('logs.json');
});
it('should not ignore files if respectGeminiIgnore is false', async () => {
it('should not ignore files if respectQwenIgnore is false', async () => {
await fsPromises.writeFile(
nodePath.join(testRootDir, '.qwenignore'),
'ignored.txt\nnode_modules/\n.gemini/\n!/.gemini/config.yaml',
@@ -328,7 +328,7 @@ ${testRootDir}${path.sep}
const structure = await getFolderStructure(testRootDir, {
fileService,
fileFilteringOptions: {
respectGeminiIgnore: false,
respectQwenIgnore: false,
respectGitIgnore: true, // Explicitly disable gemini ignore only
},
});

View File

@@ -9,8 +9,8 @@ import type { Dirent } from 'node:fs';
import * as path from 'node:path';
import { getErrorMessage, isNodeError } from './errors.js';
import type { FileDiscoveryService } from '../services/fileDiscoveryService.js';
import type { FileFilteringOptions } from '../config/config.js';
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js';
import type { FileFilteringOptions } from '../config/constants.js';
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
const MAX_ITEMS = 20;
const TRUNCATION_INDICATOR = '...';
@@ -132,8 +132,8 @@ async function readFullStructure(
const shouldIgnore =
(options.fileFilteringOptions.respectGitIgnore &&
options.fileService.shouldGitIgnoreFile(filePath)) ||
(options.fileFilteringOptions.respectGeminiIgnore &&
options.fileService.shouldGeminiIgnoreFile(filePath));
(options.fileFilteringOptions.respectQwenIgnore &&
options.fileService.shouldQwenIgnoreFile(filePath));
if (shouldIgnore) {
continue;
}
@@ -172,8 +172,8 @@ async function readFullStructure(
isIgnored =
(options.fileFilteringOptions.respectGitIgnore &&
options.fileService.shouldGitIgnoreFile(subFolderPath)) ||
(options.fileFilteringOptions.respectGeminiIgnore &&
options.fileService.shouldGeminiIgnoreFile(subFolderPath));
(options.fileFilteringOptions.respectQwenIgnore &&
options.fileService.shouldQwenIgnoreFile(subFolderPath));
}
if (options.ignoredFolders.has(subFolderName) || isIgnored) {

View File

@@ -33,14 +33,16 @@ describe('GitIgnoreParser', () => {
await fs.rm(projectRoot, { recursive: true, force: true });
});
describe('initialization', () => {
it('should initialize without errors when no .gitignore exists', async () => {
describe('Basic ignore behaviors', () => {
beforeEach(async () => {
await setupGitRepo();
expect(() => parser.loadGitRepoPatterns()).not.toThrow();
});
it('should load .gitignore patterns when file exists', async () => {
await setupGitRepo();
it('should not ignore files when no .gitignore exists', async () => {
expect(parser.isIgnored('file.txt')).toBe(false);
});
it('should ignore files based on a root .gitignore', async () => {
const gitignoreContent = `
# Comment
node_modules/
@@ -50,52 +52,28 @@ node_modules/
`;
await createTestFile('.gitignore', gitignoreContent);
parser.loadGitRepoPatterns();
expect(parser.getPatterns()).toEqual([
'.git',
'node_modules/',
'*.log',
'/dist',
'.env',
]);
expect(parser.isIgnored(path.join('node_modules', 'some-lib'))).toBe(
true,
);
expect(parser.isIgnored(path.join('src', 'app.log'))).toBe(true);
expect(parser.isIgnored(path.join('dist', 'index.js'))).toBe(true);
expect(parser.isIgnored('.env')).toBe(true);
expect(parser.isIgnored('src/index.js')).toBe(false);
});
it('should handle git exclude file', async () => {
await setupGitRepo();
await createTestFile(
path.join('.git', 'info', 'exclude'),
'temp/\n*.tmp',
);
parser.loadGitRepoPatterns();
expect(parser.getPatterns()).toEqual(['.git', 'temp/', '*.tmp']);
expect(parser.isIgnored(path.join('temp', 'file.txt'))).toBe(true);
expect(parser.isIgnored(path.join('src', 'file.tmp'))).toBe(true);
});
it('should handle custom patterns file name', async () => {
// No .git directory for this test
await createTestFile('.qwenignore', 'temp/\n*.tmp');
parser.loadPatterns('.qwenignore');
expect(parser.getPatterns()).toEqual(['temp/', '*.tmp']);
expect(parser.isIgnored(path.join('temp', 'file.txt'))).toBe(true);
expect(parser.isIgnored(path.join('src', 'file.tmp'))).toBe(true);
});
it('should initialize without errors when no .qwenignore exists', () => {
expect(() => parser.loadPatterns('.qwenignore')).not.toThrow();
expect(parser.isIgnored('src/file.js')).toBe(false);
});
});
describe('isIgnored', () => {
describe('isIgnored path handling', () => {
beforeEach(async () => {
await setupGitRepo();
const gitignoreContent = `
@@ -107,7 +85,6 @@ src/*.tmp
!src/important.tmp
`;
await createTestFile('.gitignore', gitignoreContent);
parser.loadGitRepoPatterns();
});
it('should always ignore .git directory', () => {
@@ -181,16 +158,80 @@ src/*.tmp
expect(() => parser.isIgnored('/node_modules')).not.toThrow();
expect(parser.isIgnored('/node_modules')).toBe(false);
});
it('should handle backslash-prefixed files without crashing', () => {
expect(() => parser.isIgnored('\\backslash-file-test.txt')).not.toThrow();
expect(parser.isIgnored('\\backslash-file-test.txt')).toBe(false);
});
it('should handle files with absolute-like names', () => {
expect(() => parser.isIgnored('/backslash-file-test.txt')).not.toThrow();
expect(parser.isIgnored('/backslash-file-test.txt')).toBe(false);
});
});
describe('getIgnoredPatterns', () => {
it('should return the raw patterns added', async () => {
describe('nested .gitignore files', () => {
beforeEach(async () => {
await setupGitRepo();
const gitignoreContent = '*.log\n!important.log';
await createTestFile('.gitignore', gitignoreContent);
// Root .gitignore
await createTestFile('.gitignore', 'root-ignored.txt');
// Nested .gitignore 1
await createTestFile('a/.gitignore', '/b\nc');
// Nested .gitignore 2
await createTestFile('a/d/.gitignore', 'e.txt\nf/g');
});
parser.loadGitRepoPatterns();
expect(parser.getPatterns()).toEqual(['.git', '*.log', '!important.log']);
it('should handle nested .gitignore files correctly', async () => {
// From root .gitignore
expect(parser.isIgnored('root-ignored.txt')).toBe(true);
expect(parser.isIgnored('a/root-ignored.txt')).toBe(true);
// From a/.gitignore: /b
expect(parser.isIgnored('a/b')).toBe(true);
expect(parser.isIgnored('b')).toBe(false);
expect(parser.isIgnored('a/x/b')).toBe(false);
// From a/.gitignore: c
expect(parser.isIgnored('a/c')).toBe(true);
expect(parser.isIgnored('a/x/y/c')).toBe(true);
expect(parser.isIgnored('c')).toBe(false);
// From a/d/.gitignore: e.txt
expect(parser.isIgnored('a/d/e.txt')).toBe(true);
expect(parser.isIgnored('a/d/x/e.txt')).toBe(true);
expect(parser.isIgnored('a/e.txt')).toBe(false);
// From a/d/.gitignore: f/g
expect(parser.isIgnored('a/d/f/g')).toBe(true);
expect(parser.isIgnored('a/f/g')).toBe(false);
});
});
describe('precedence rules', () => {
beforeEach(async () => {
await setupGitRepo();
});
it('should prioritize nested .gitignore over root .gitignore', async () => {
await createTestFile('.gitignore', '*.log');
await createTestFile('a/b/.gitignore', '!special.log');
expect(parser.isIgnored('a/b/any.log')).toBe(true);
expect(parser.isIgnored('a/b/special.log')).toBe(false);
});
it('should prioritize .gitignore over .git/info/exclude', async () => {
// Exclude all .log files
await createTestFile(path.join('.git', 'info', 'exclude'), '*.log');
// But make an exception in the root .gitignore
await createTestFile('.gitignore', '!important.log');
expect(parser.isIgnored('some.log')).toBe(true);
expect(parser.isIgnored('important.log')).toBe(false);
expect(parser.isIgnored(path.join('subdir', 'some.log'))).toBe(true);
expect(parser.isIgnored(path.join('subdir', 'important.log'))).toBe(
false,
);
});
});
});

View File

@@ -6,70 +6,184 @@
import * as fs from 'node:fs';
import * as path from 'node:path';
import ignore, { type Ignore } from 'ignore';
import { isGitRepository } from './gitUtils.js';
import ignore from 'ignore';
export interface GitIgnoreFilter {
isIgnored(filePath: string): boolean;
getPatterns(): string[];
}
export class GitIgnoreParser implements GitIgnoreFilter {
private projectRoot: string;
private ig: Ignore = ignore();
private patterns: string[] = [];
private cache: Map<string, string[]> = new Map();
private globalPatterns: string[] | undefined;
constructor(projectRoot: string) {
this.projectRoot = path.resolve(projectRoot);
}
loadGitRepoPatterns(): void {
if (!isGitRepository(this.projectRoot)) return;
// Always ignore .git directory regardless of .gitignore content
this.addPatterns(['.git']);
const patternFiles = ['.gitignore', path.join('.git', 'info', 'exclude')];
for (const pf of patternFiles) {
this.loadPatterns(pf);
}
}
loadPatterns(patternsFileName: string): void {
const patternsFilePath = path.join(this.projectRoot, patternsFileName);
private loadPatternsForFile(patternsFilePath: string): string[] {
let content: string;
try {
content = fs.readFileSync(patternsFilePath, 'utf-8');
} catch (_error) {
// ignore file not found
return;
return [];
}
const patterns = (content ?? '')
const isExcludeFile = patternsFilePath.endsWith(
path.join('.git', 'info', 'exclude'),
);
const relativeBaseDir = isExcludeFile
? '.'
: path.dirname(path.relative(this.projectRoot, patternsFilePath));
return content
.split('\n')
.map((p) => p.trim())
.filter((p) => p !== '' && !p.startsWith('#'));
this.addPatterns(patterns);
}
.filter((p) => p !== '' && !p.startsWith('#'))
.map((p) => {
const isNegative = p.startsWith('!');
if (isNegative) {
p = p.substring(1);
}
private addPatterns(patterns: string[]) {
this.ig.add(patterns);
this.patterns.push(...patterns);
const isAnchoredInFile = p.startsWith('/');
if (isAnchoredInFile) {
p = p.substring(1);
}
// An empty pattern can result from a negated pattern like `!`,
// which we can ignore.
if (p === '') {
return '';
}
let newPattern = p;
if (relativeBaseDir && relativeBaseDir !== '.') {
// Only in nested .gitignore files, the patterns need to be modified according to:
// - If `a/b/.gitignore` defines `/c` then it needs to be changed to `/a/b/c`
// - If `a/b/.gitignore` defines `c` then it needs to be changed to `/a/b/**/c`
// - If `a/b/.gitignore` defines `c/d` then it needs to be changed to `/a/b/c/d`
if (!isAnchoredInFile && !p.includes('/')) {
// If no slash and not anchored in file, it matches files in any
// subdirectory.
newPattern = path.join('**', p);
}
// Prepend the .gitignore file's directory.
newPattern = path.join(relativeBaseDir, newPattern);
// Anchor the pattern to a nested gitignore directory.
if (!newPattern.startsWith('/')) {
newPattern = '/' + newPattern;
}
}
// Anchor the pattern if originally anchored
if (isAnchoredInFile && !newPattern.startsWith('/')) {
newPattern = '/' + newPattern;
}
if (isNegative) {
newPattern = '!' + newPattern;
}
// Even in windows, Ignore expects forward slashes.
newPattern = newPattern.replace(/\\/g, '/');
return newPattern;
})
.filter((p) => p !== '');
}
isIgnored(filePath: string): boolean {
const resolved = path.resolve(this.projectRoot, filePath);
const relativePath = path.relative(this.projectRoot, resolved);
if (relativePath === '' || relativePath.startsWith('..')) {
if (!filePath || typeof filePath !== 'string') {
return false;
}
// Even in windows, Ignore expects forward slashes.
const normalizedPath = relativePath.replace(/\\/g, '/');
return this.ig.ignores(normalizedPath);
}
const absoluteFilePath = path.resolve(this.projectRoot, filePath);
if (!absoluteFilePath.startsWith(this.projectRoot)) {
return false;
}
getPatterns(): string[] {
return this.patterns;
try {
const resolved = path.resolve(this.projectRoot, filePath);
const relativePath = path.relative(this.projectRoot, resolved);
if (relativePath === '' || relativePath.startsWith('..')) {
return false;
}
// Even in windows, Ignore expects forward slashes.
const normalizedPath = relativePath.replace(/\\/g, '/');
if (normalizedPath.startsWith('/') || normalizedPath === '') {
return false;
}
const ig = ignore();
// Always ignore .git directory
ig.add('.git');
// Load global patterns from .git/info/exclude on first call
if (this.globalPatterns === undefined) {
const excludeFile = path.join(
this.projectRoot,
'.git',
'info',
'exclude',
);
this.globalPatterns = fs.existsSync(excludeFile)
? this.loadPatternsForFile(excludeFile)
: [];
}
ig.add(this.globalPatterns);
const pathParts = relativePath.split(path.sep);
const dirsToVisit = [this.projectRoot];
let currentAbsDir = this.projectRoot;
// Collect all directories in the path
for (let i = 0; i < pathParts.length - 1; i++) {
currentAbsDir = path.join(currentAbsDir, pathParts[i]);
dirsToVisit.push(currentAbsDir);
}
for (const dir of dirsToVisit) {
const relativeDir = path.relative(this.projectRoot, dir);
if (relativeDir) {
const normalizedRelativeDir = relativeDir.replace(/\\/g, '/');
if (ig.ignores(normalizedRelativeDir)) {
// This directory is ignored by an ancestor's .gitignore.
// According to git behavior, we don't need to process this
// directory's .gitignore, as nothing inside it can be
// un-ignored.
break;
}
}
if (this.cache.has(dir)) {
const patterns = this.cache.get(dir);
if (patterns) {
ig.add(patterns);
}
} else {
const gitignorePath = path.join(dir, '.gitignore');
if (fs.existsSync(gitignorePath)) {
const patterns = this.loadPatternsForFile(gitignorePath);
this.cache.set(dir, patterns);
ig.add(patterns);
} else {
this.cache.set(dir, []); // Cache miss
}
}
}
return ig.ignores(normalizedPath);
} catch (_error) {
return false;
}
}
}

View File

@@ -0,0 +1,322 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
FixLLMEditWithInstruction,
resetLlmEditFixerCaches_TEST_ONLY,
type SearchReplaceEdit,
} from './llm-edit-fixer.js';
import { promptIdContext } from './promptIdContext.js';
import type { BaseLlmClient } from '../core/baseLlmClient.js';
// Mock the BaseLlmClient
const mockGenerateJson = vi.fn();
const mockBaseLlmClient = {
generateJson: mockGenerateJson,
} as unknown as BaseLlmClient;
describe('FixLLMEditWithInstruction', () => {
const instruction = 'Replace the title';
const old_string = '<h1>Old Title</h1>';
const new_string = '<h1>New Title</h1>';
const error = 'String not found';
const current_content = '<body><h1>Old Title</h1></body>';
const abortController = new AbortController();
const abortSignal = abortController.signal;
beforeEach(() => {
vi.clearAllMocks();
resetLlmEditFixerCaches_TEST_ONLY(); // Ensure cache is cleared before each test
});
afterEach(() => {
vi.useRealTimers(); // Reset timers after each test
});
const mockApiResponse: SearchReplaceEdit = {
search: '<h1>Old Title</h1>',
replace: '<h1>New Title</h1>',
noChangesRequired: false,
explanation: 'The original search was correct.',
};
it('should use the promptId from the AsyncLocalStorage context when available', async () => {
const testPromptId = 'test-prompt-id-12345';
mockGenerateJson.mockResolvedValue(mockApiResponse);
await promptIdContext.run(testPromptId, async () => {
await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
});
// Verify that generateJson was called with the promptId from the context
expect(mockGenerateJson).toHaveBeenCalledTimes(1);
expect(mockGenerateJson).toHaveBeenCalledWith(
expect.objectContaining({
promptId: testPromptId,
}),
);
});
it('should generate and use a fallback promptId when context is not available', async () => {
mockGenerateJson.mockResolvedValue(mockApiResponse);
const consoleWarnSpy = vi
.spyOn(console, 'warn')
.mockImplementation(() => {});
// Run the function outside of any context
await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
// Verify the warning was logged
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining(
'Could not find promptId in context. This is unexpected. Using a fallback ID: llm-fixer-fallback-',
),
);
// Verify that generateJson was called with the generated fallback promptId
expect(mockGenerateJson).toHaveBeenCalledTimes(1);
expect(mockGenerateJson).toHaveBeenCalledWith(
expect.objectContaining({
promptId: expect.stringContaining('llm-fixer-fallback-'),
}),
);
// Restore mocks
consoleWarnSpy.mockRestore();
});
it('should construct the user prompt correctly', async () => {
mockGenerateJson.mockResolvedValue(mockApiResponse);
const promptId = 'test-prompt-id-prompt-construction';
await promptIdContext.run(promptId, async () => {
await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
});
const generateJsonCall = mockGenerateJson.mock.calls[0][0];
const userPromptContent = generateJsonCall.contents[0].parts[0].text;
expect(userPromptContent).toContain(
`<instruction>\n${instruction}\n</instruction>`,
);
expect(userPromptContent).toContain(`<search>\n${old_string}\n</search>`);
expect(userPromptContent).toContain(`<replace>\n${new_string}\n</replace>`);
expect(userPromptContent).toContain(`<error>\n${error}\n</error>`);
expect(userPromptContent).toContain(
`<file_content>\n${current_content}\n</file_content>`,
);
});
it('should return a cached result on subsequent identical calls', async () => {
mockGenerateJson.mockResolvedValue(mockApiResponse);
const testPromptId = 'test-prompt-id-caching';
await promptIdContext.run(testPromptId, async () => {
// First call - should call the API
const result1 = await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
// Second call with identical parameters - should hit the cache
const result2 = await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
expect(result1).toEqual(mockApiResponse);
expect(result2).toEqual(mockApiResponse);
// Verify the underlying service was only called ONCE
expect(mockGenerateJson).toHaveBeenCalledTimes(1);
});
});
it('should not use cache for calls with different parameters', async () => {
mockGenerateJson.mockResolvedValue(mockApiResponse);
const testPromptId = 'test-prompt-id-cache-miss';
await promptIdContext.run(testPromptId, async () => {
// First call
await FixLLMEditWithInstruction(
instruction,
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
// Second call with a different instruction
await FixLLMEditWithInstruction(
'A different instruction',
old_string,
new_string,
error,
current_content,
mockBaseLlmClient,
abortSignal,
);
// Verify the underlying service was called TWICE
expect(mockGenerateJson).toHaveBeenCalledTimes(2);
});
});
describe('cache collision prevention', () => {
it('should prevent cache collisions when parameters contain separator sequences', async () => {
// This test would have failed with the old string concatenation approach
// but passes with JSON.stringify implementation
const firstResponse: SearchReplaceEdit = {
search: 'original text',
replace: 'first replacement',
noChangesRequired: false,
explanation: 'First edit correction',
};
const secondResponse: SearchReplaceEdit = {
search: 'different text',
replace: 'second replacement',
noChangesRequired: false,
explanation: 'Second edit correction',
};
mockGenerateJson
.mockResolvedValueOnce(firstResponse)
.mockResolvedValueOnce(secondResponse);
const testPromptId = 'cache-collision-test';
await promptIdContext.run(testPromptId, async () => {
// Scenario 1: Parameters that would create collision with string concatenation
// Cache key with old method would be: "Fix YAML---content---update--some---data--error"
const call1 = await FixLLMEditWithInstruction(
'Fix YAML', // instruction
'content', // old_string
'update--some', // new_string (contains --)
'data', // current_content
'error', // error
mockBaseLlmClient,
abortSignal,
);
// Scenario 2: Different parameters that would create same cache key with concatenation
// Cache key with old method would be: "Fix YAML---content---update--some---data--error"
const call2 = await FixLLMEditWithInstruction(
'Fix YAML---content---update', // instruction (contains ---)
'some---data', // old_string (contains ---)
'error', // new_string
'', // current_content
'', // error
mockBaseLlmClient,
abortSignal,
);
// With the fixed JSON.stringify approach, these should be different
// and each should get its own LLM response
expect(call1).toEqual(firstResponse);
expect(call2).toEqual(secondResponse);
expect(call1).not.toEqual(call2);
// Most importantly: the LLM should be called TWICE, not once
// (proving no cache collision occurred)
expect(mockGenerateJson).toHaveBeenCalledTimes(2);
});
});
it('should handle YAML frontmatter without cache collisions', async () => {
// Real-world test case with YAML frontmatter containing ---
const yamlResponse: SearchReplaceEdit = {
search: '---\ntitle: Old\n---',
replace: '---\ntitle: New\n---',
noChangesRequired: false,
explanation: 'Updated YAML frontmatter',
};
const contentResponse: SearchReplaceEdit = {
search: 'old content',
replace: 'new content',
noChangesRequired: false,
explanation: 'Updated content',
};
mockGenerateJson
.mockResolvedValueOnce(yamlResponse)
.mockResolvedValueOnce(contentResponse);
const testPromptId = 'yaml-frontmatter-test';
await promptIdContext.run(testPromptId, async () => {
// Call 1: Edit YAML frontmatter
const yamlEdit = await FixLLMEditWithInstruction(
'Update YAML frontmatter',
'---\ntitle: Old\n---', // Contains ---
'---\ntitle: New\n---', // Contains ---
'Some markdown content',
'YAML parse error',
mockBaseLlmClient,
abortSignal,
);
// Call 2: Edit regular content
const contentEdit = await FixLLMEditWithInstruction(
'Update content',
'old content',
'new content',
'Different file content',
'Content not found',
mockBaseLlmClient,
abortSignal,
);
// Verify both calls succeeded with different results
expect(yamlEdit).toEqual(yamlResponse);
expect(contentEdit).toEqual(contentResponse);
expect(yamlEdit).not.toEqual(contentEdit);
// Verify no cache collision - both calls should hit the LLM
expect(mockGenerateJson).toHaveBeenCalledTimes(2);
});
});
});
});

View File

@@ -0,0 +1,164 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { createHash } from 'node:crypto';
import { type Content, Type } from '@google/genai';
import { type BaseLlmClient } from '../core/baseLlmClient.js';
import { LruCache } from './LruCache.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { promptIdContext } from './promptIdContext.js';
const MAX_CACHE_SIZE = 50;
const EDIT_SYS_PROMPT = `
You are an expert code-editing assistant specializing in debugging and correcting failed search-and-replace operations.
# Primary Goal
Your task is to analyze a failed edit attempt and provide a corrected \`search\` string that will match the text in the file precisely. The correction should be as minimal as possible, staying very close to the original, failed \`search\` string. Do NOT invent a completely new edit based on the instruction; your job is to fix the provided parameters.
It is important that you do no try to figure out if the instruction is correct. DO NOT GIVE ADVICE. Your only goal here is to do your best to perform the search and replace task!
# Input Context
You will be given:
1. The high-level instruction for the original edit.
2. The exact \`search\` and \`replace\` strings that failed.
3. The error message that was produced.
4. The full content of the source file.
# Rules for Correction
1. **Minimal Correction:** Your new \`search\` string must be a close variation of the original. Focus on fixing issues like whitespace, indentation, line endings, or small contextual differences.
2. **Explain the Fix:** Your \`explanation\` MUST state exactly why the original \`search\` failed and how your new \`search\` string resolves that specific failure. (e.g., "The original search failed due to incorrect indentation; the new search corrects the indentation to match the source file.").
3. **Preserve the \`replace\` String:** Do NOT modify the \`replace\` string unless the instruction explicitly requires it and it was the source of the error. Your primary focus is fixing the \`search\` string.
4. **No Changes Case:** CRUCIAL: if the change is already present in the file, set \`noChangesRequired\` to True and explain why in the \`explanation\`. It is crucial that you only do this if the changes outline in \`replace\` are alredy in the file and suits the instruction!!
5. **Exactness:** The final \`search\` field must be the EXACT literal text from the file. Do not escape characters.
`;
const EDIT_USER_PROMPT = `
# Goal of the Original Edit
<instruction>
{instruction}
</instruction>
# Failed Attempt Details
- **Original \`search\` parameter (failed):**
<search>
{old_string}
</search>
- **Original \`replace\` parameter:**
<replace>
{new_string}
</replace>
- **Error Encountered:**
<error>
{error}
</error>
# Full File Content
<file_content>
{current_content}
</file_content>
# Your Task
Based on the error and the file content, provide a corrected \`search\` string that will succeed. Remember to keep your correction minimal and explain the precise reason for the failure in your \`explanation\`.
`;
export interface SearchReplaceEdit {
search: string;
replace: string;
noChangesRequired: boolean;
explanation: string;
}
const SearchReplaceEditSchema = {
type: Type.OBJECT,
properties: {
explanation: { type: Type.STRING },
search: { type: Type.STRING },
replace: { type: Type.STRING },
noChangesRequired: { type: Type.BOOLEAN },
},
required: ['search', 'replace', 'explanation'],
};
const editCorrectionWithInstructionCache = new LruCache<
string,
SearchReplaceEdit
>(MAX_CACHE_SIZE);
/**
* Attempts to fix a failed edit by using an LLM to generate a new search and replace pair.
* @param instruction The instruction for what needs to be done.
* @param old_string The original string to be replaced.
* @param new_string The original replacement string.
* @param error The error that occurred during the initial edit.
* @param current_content The current content of the file.
* @param baseLlmClient The BaseLlmClient to use for the LLM call.
* @param abortSignal An abort signal to cancel the operation.
* @param promptId A unique ID for the prompt.
* @returns A new search and replace pair.
*/
export async function FixLLMEditWithInstruction(
instruction: string,
old_string: string,
new_string: string,
error: string,
current_content: string,
baseLlmClient: BaseLlmClient,
abortSignal: AbortSignal,
): Promise<SearchReplaceEdit> {
let promptId = promptIdContext.getStore();
if (!promptId) {
promptId = `llm-fixer-fallback-${Date.now()}-${Math.random().toString(16).slice(2)}`;
console.warn(
`Could not find promptId in context. This is unexpected. Using a fallback ID: ${promptId}`,
);
}
const cacheKey = createHash('sha256')
.update(
JSON.stringify([
current_content,
old_string,
new_string,
instruction,
error,
]),
)
.digest('hex');
const cachedResult = editCorrectionWithInstructionCache.get(cacheKey);
if (cachedResult) {
return cachedResult;
}
const userPrompt = EDIT_USER_PROMPT.replace('{instruction}', instruction)
.replace('{old_string}', old_string)
.replace('{new_string}', new_string)
.replace('{error}', error)
.replace('{current_content}', current_content);
const contents: Content[] = [
{
role: 'user',
parts: [{ text: userPrompt }],
},
];
const result = (await baseLlmClient.generateJson({
contents,
schema: SearchReplaceEditSchema,
abortSignal,
model: DEFAULT_GEMINI_FLASH_MODEL,
systemInstruction: EDIT_SYS_PROMPT,
promptId,
maxAttempts: 1,
})) as unknown as SearchReplaceEdit;
editCorrectionWithInstructionCache.set(cacheKey, result);
return result;
}
export function resetLlmEditFixerCaches_TEST_ONLY() {
editCorrectionWithInstructionCache.clear();
}

View File

@@ -10,11 +10,11 @@ import * as os from 'node:os';
import * as path from 'node:path';
import { loadServerHierarchicalMemory } from './memoryDiscovery.js';
import {
GEMINI_CONFIG_DIR,
setGeminiMdFilename,
DEFAULT_CONTEXT_FILENAME,
} from '../tools/memoryTool.js';
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
import { QWEN_DIR } from './paths.js';
vi.mock('os', async (importOriginal) => {
const actualOs = await importOriginal<typeof os>();
@@ -25,6 +25,7 @@ vi.mock('os', async (importOriginal) => {
});
describe('loadServerHierarchicalMemory', () => {
const DEFAULT_FOLDER_TRUST = true;
let testRootDir: string;
let cwd: string;
let projectRoot: string;
@@ -62,7 +63,61 @@ describe('loadServerHierarchicalMemory', () => {
// Some tests set this to a different value.
setGeminiMdFilename(DEFAULT_CONTEXT_FILENAME);
// Clean up the temporary directory to prevent resource leaks.
await fsPromises.rm(testRootDir, { recursive: true, force: true });
// Use maxRetries option for robust cleanup without race conditions
await fsPromises.rm(testRootDir, {
recursive: true,
force: true,
maxRetries: 3,
retryDelay: 10,
});
});
describe('when untrusted', () => {
it('does not load context files from untrusted workspaces', async () => {
await createTestFile(
path.join(projectRoot, DEFAULT_CONTEXT_FILENAME),
'Project root memory',
);
await createTestFile(
path.join(cwd, DEFAULT_CONTEXT_FILENAME),
'Src directory memory',
);
const { fileCount } = await loadServerHierarchicalMemory(
cwd,
[],
false,
new FileDiscoveryService(projectRoot),
[],
false, // untrusted
);
expect(fileCount).toEqual(0);
});
it('loads context from outside the untrusted workspace', async () => {
await createTestFile(
path.join(projectRoot, DEFAULT_CONTEXT_FILENAME),
'Project root memory',
); // Untrusted
await createTestFile(
path.join(cwd, DEFAULT_CONTEXT_FILENAME),
'Src directory memory',
); // Untrusted
const filepath = path.join(homedir, QWEN_DIR, DEFAULT_CONTEXT_FILENAME);
await createTestFile(filepath, 'default context content'); // In user home dir (outside untrusted space).
const { fileCount, memoryContent } = await loadServerHierarchicalMemory(
cwd,
[],
false,
new FileDiscoveryService(projectRoot),
[],
false, // untrusted
);
expect(fileCount).toEqual(1);
expect(memoryContent).toContain(path.relative(cwd, filepath).toString());
});
});
it('should return empty memory and count if no context files are found', async () => {
@@ -71,6 +126,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -81,7 +138,7 @@ describe('loadServerHierarchicalMemory', () => {
it('should load only the global context file if present and others are not (default filename)', async () => {
const defaultContextFile = await createTestFile(
path.join(homedir, GEMINI_CONFIG_DIR, DEFAULT_CONTEXT_FILENAME),
path.join(homedir, QWEN_DIR, DEFAULT_CONTEXT_FILENAME),
'default context content',
);
@@ -90,6 +147,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -103,7 +162,7 @@ describe('loadServerHierarchicalMemory', () => {
setGeminiMdFilename(customFilename);
const customContextFile = await createTestFile(
path.join(homedir, GEMINI_CONFIG_DIR, customFilename),
path.join(homedir, QWEN_DIR, customFilename),
'custom context content',
);
@@ -112,6 +171,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -138,6 +199,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -161,6 +224,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -184,6 +249,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -207,6 +274,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -217,7 +286,7 @@ describe('loadServerHierarchicalMemory', () => {
it('should load and correctly order global, upward, and downward ORIGINAL_GEMINI_MD_FILENAME files', async () => {
const defaultContextFile = await createTestFile(
path.join(homedir, GEMINI_CONFIG_DIR, DEFAULT_CONTEXT_FILENAME),
path.join(homedir, QWEN_DIR, DEFAULT_CONTEXT_FILENAME),
'default context content',
);
const rootGeminiFile = await createTestFile(
@@ -242,6 +311,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -269,10 +340,11 @@ describe('loadServerHierarchicalMemory', () => {
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
'tree',
{
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
},
200, // maxDirs parameter
);
@@ -288,9 +360,11 @@ describe('loadServerHierarchicalMemory', () => {
.spyOn(console, 'debug')
.mockImplementation(() => {});
for (let i = 0; i < 100; i++) {
await createEmptyDir(path.join(cwd, `deep_dir_${i}`));
}
// Create directories in parallel for better performance
const dirPromises = Array.from({ length: 2 }, (_, i) =>
createEmptyDir(path.join(cwd, `deep_dir_${i}`)),
);
await Promise.all(dirPromises);
// Pass the custom limit directly to the function
await loadServerHierarchicalMemory(
@@ -299,17 +373,18 @@ describe('loadServerHierarchicalMemory', () => {
true,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
'tree', // importFormat
{
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
},
50, // maxDirs
1, // maxDirs
);
expect(consoleDebugSpy).toHaveBeenCalledWith(
expect.stringContaining('[DEBUG] [BfsFileSearch]'),
expect.stringContaining('Scanning [50/50]:'),
expect.stringContaining('Scanning [1/1]:'),
);
vi.mocked(console.debug).mockRestore();
@@ -319,6 +394,8 @@ describe('loadServerHierarchicalMemory', () => {
[],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -339,6 +416,7 @@ describe('loadServerHierarchicalMemory', () => {
false,
new FileDiscoveryService(projectRoot),
[extensionFilePath],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -361,6 +439,8 @@ describe('loadServerHierarchicalMemory', () => {
[includedDir],
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
expect(result).toEqual({
@@ -391,6 +471,8 @@ describe('loadServerHierarchicalMemory', () => {
createdFiles.map((f) => path.dirname(f)),
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
// Should have loaded all files
@@ -422,6 +504,8 @@ describe('loadServerHierarchicalMemory', () => {
[childDir, parentDir], // Deliberately include duplicates
false,
new FileDiscoveryService(projectRoot),
[],
DEFAULT_FOLDER_TRUST,
);
// Should have both files without duplicates

View File

@@ -9,14 +9,12 @@ import * as fsSync from 'node:fs';
import * as path from 'node:path';
import { homedir } from 'node:os';
import { bfsFileSearch } from './bfsFileSearch.js';
import {
GEMINI_CONFIG_DIR,
getAllGeminiMdFilenames,
} from '../tools/memoryTool.js';
import { getAllGeminiMdFilenames } from '../tools/memoryTool.js';
import type { FileDiscoveryService } from '../services/fileDiscoveryService.js';
import { processImports } from './memoryImportProcessor.js';
import type { FileFilteringOptions } from '../config/config.js';
import { DEFAULT_MEMORY_FILE_FILTERING_OPTIONS } from '../config/config.js';
import type { FileFilteringOptions } from '../config/constants.js';
import { DEFAULT_MEMORY_FILE_FILTERING_OPTIONS } from '../config/constants.js';
import { QWEN_DIR } from './paths.js';
// Simple console logger, similar to the one previously in CLI's config.ts
// TODO: Integrate with a more robust server-side logger if available/appropriate.
@@ -87,6 +85,7 @@ async function getGeminiMdFilePathsInternal(
debugMode: boolean,
fileService: FileDiscoveryService,
extensionContextFilePaths: string[] = [],
folderTrust: boolean,
fileFilteringOptions: FileFilteringOptions,
maxDirs: number,
): Promise<string[]> {
@@ -109,6 +108,7 @@ async function getGeminiMdFilePathsInternal(
debugMode,
fileService,
extensionContextFilePaths,
folderTrust,
fileFilteringOptions,
maxDirs,
),
@@ -138,6 +138,7 @@ async function getGeminiMdFilePathsInternalForEachDir(
debugMode: boolean,
fileService: FileDiscoveryService,
extensionContextFilePaths: string[] = [],
folderTrust: boolean,
fileFilteringOptions: FileFilteringOptions,
maxDirs: number,
): Promise<string[]> {
@@ -148,7 +149,7 @@ async function getGeminiMdFilePathsInternalForEachDir(
const resolvedHome = path.resolve(userHomePath);
const globalMemoryPath = path.join(
resolvedHome,
GEMINI_CONFIG_DIR,
QWEN_DIR,
geminiMdFilename,
);
@@ -183,7 +184,7 @@ async function getGeminiMdFilePathsInternalForEachDir(
} catch {
// Not found, which is okay
}
} else if (dir) {
} else if (dir && folderTrust) {
// FIX: Only perform the workspace search (upward and downward scans)
// if a valid currentWorkingDirectory is provided and it's not the home directory.
const resolvedCwd = path.resolve(dir);
@@ -203,7 +204,7 @@ async function getGeminiMdFilePathsInternalForEachDir(
: path.dirname(resolvedHome);
while (currentDir && currentDir !== path.dirname(currentDir)) {
if (currentDir === path.join(resolvedHome, GEMINI_CONFIG_DIR)) {
if (currentDir === path.join(resolvedHome, QWEN_DIR)) {
break;
}
@@ -225,7 +226,7 @@ async function getGeminiMdFilePathsInternalForEachDir(
}
upwardPaths.forEach((p) => allPaths.add(p));
const mergedOptions = {
const mergedOptions: FileFilteringOptions = {
...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
...fileFilteringOptions,
};
@@ -346,6 +347,11 @@ function concatenateInstructions(
.join('\n\n');
}
export interface LoadServerHierarchicalMemoryResponse {
memoryContent: string;
fileCount: number;
}
/**
* Loads hierarchical QWEN.md files and concatenates their content.
* This function is intended for use by the server.
@@ -356,10 +362,11 @@ export async function loadServerHierarchicalMemory(
debugMode: boolean,
fileService: FileDiscoveryService,
extensionContextFilePaths: string[] = [],
folderTrust: boolean,
importFormat: 'flat' | 'tree' = 'tree',
fileFilteringOptions?: FileFilteringOptions,
maxDirs: number = 200,
): Promise<{ memoryContent: string; fileCount: number }> {
): Promise<LoadServerHierarchicalMemoryResponse> {
if (debugMode)
logger.debug(
`Loading server hierarchical memory for CWD: ${currentWorkingDirectory} (importFormat: ${importFormat})`,
@@ -375,6 +382,7 @@ export async function loadServerHierarchicalMemory(
debugMode,
fileService,
extensionContextFilePaths,
folderTrust,
fileFilteringOptions || DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
maxDirs,
);

View File

@@ -423,6 +423,31 @@ describe('memoryImportProcessor', () => {
);
});
it('should not process imports in repeated inline code blocks', async () => {
const content = '`@noimport` and `@noimport`';
const projectRoot = testPath('test', 'project');
const basePath = testPath(projectRoot, 'src');
const result = await processImports(
content,
basePath,
true,
undefined,
projectRoot,
);
expect(result.content).toBe(content);
});
it('should not import when @ is inside an inline code block', async () => {
const content =
'We should not ` @import` when the symbol is inside an inline code string.';
const testRootDir = testPath('test', 'project');
const result = await processImports(content, testRootDir);
expect(result.content).toBe(content);
expect(result.importTree.imports).toBeUndefined();
});
it('should allow imports from parent and subdirectories within project root', async () => {
const content =
'Parent import: @../parent.md Subdir import: @./components/sub.md';

View File

@@ -7,7 +7,7 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { isSubpath } from './paths.js';
import { marked } from 'marked';
import { marked, type Token } from 'marked';
// Simple console logger for import processing
const logger = {
@@ -153,39 +153,32 @@ function isLetter(char: string): boolean {
function findCodeRegions(content: string): Array<[number, number]> {
const regions: Array<[number, number]> = [];
const tokens = marked.lexer(content);
let offset = 0;
// Map from raw content to a queue of its start indices in the original content.
const rawContentIndices = new Map<string, number[]>();
function walk(token: { type: string; raw: string; tokens?: unknown[] }) {
function walk(token: Token, baseOffset: number) {
if (token.type === 'code' || token.type === 'codespan') {
if (!rawContentIndices.has(token.raw)) {
const indices: number[] = [];
let lastIndex = -1;
while ((lastIndex = content.indexOf(token.raw, lastIndex + 1)) !== -1) {
indices.push(lastIndex);
}
rawContentIndices.set(token.raw, indices);
}
const indices = rawContentIndices.get(token.raw);
if (indices && indices.length > 0) {
// Assume tokens are processed in order of appearance.
// Dequeue the next available index for this raw content.
const idx = indices.shift()!;
regions.push([idx, idx + token.raw.length]);
}
regions.push([baseOffset, baseOffset + token.raw.length]);
}
if ('tokens' in token && token.tokens) {
let childOffset = 0;
for (const child of token.tokens) {
walk(child as { type: string; raw: string; tokens?: unknown[] });
const childIndexInParent = token.raw.indexOf(child.raw, childOffset);
if (childIndexInParent === -1) {
logger.error(
`Could not find child token in parent raw content. Aborting parsing for this branch. Child raw: "${child.raw}"`,
);
break;
}
walk(child, baseOffset + childIndexInParent);
childOffset = childIndexInParent + child.raw.length;
}
}
}
for (const token of tokens) {
walk(token);
walk(token, offset);
offset += token.raw.length;
}
return regions;

View File

@@ -4,83 +4,82 @@
* SPDX-License-Identifier: Apache-2.0
*/
import {
describe,
it,
expect,
vi,
beforeEach,
type Mock,
afterEach,
} from 'vitest';
import type { Content, GoogleGenAI, Models } from '@google/genai';
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
import { GeminiClient } from '../core/client.js';
import { Config } from '../config/config.js';
import type { Mock } from 'vitest';
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { Content } from '@google/genai';
import { BaseLlmClient } from '../core/baseLlmClient.js';
import type { ContentGenerator } from '../core/contentGenerator.js';
import type { Config } from '../config/config.js';
import type { NextSpeakerResponse } from './nextSpeakerChecker.js';
import { checkNextSpeaker } from './nextSpeakerChecker.js';
import { GeminiChat } from '../core/geminiChat.js';
// Mock GeminiClient and Config constructor
vi.mock('../core/client.js');
vi.mock('../config/config.js');
// Mock fs module to prevent actual file system operations during tests
const mockFileSystem = new Map<string, string>();
// Define mocks for GoogleGenAI and Models instances that will be used across tests
const mockModelsInstance = {
generateContent: vi.fn(),
generateContentStream: vi.fn(),
countTokens: vi.fn(),
embedContent: vi.fn(),
batchEmbedContents: vi.fn(),
} as unknown as Models;
vi.mock('node:fs', () => {
const fsModule = {
mkdirSync: vi.fn(),
writeFileSync: vi.fn((path: string, data: string) => {
mockFileSystem.set(path, data);
}),
readFileSync: vi.fn((path: string) => {
if (mockFileSystem.has(path)) {
return mockFileSystem.get(path);
}
throw Object.assign(new Error('ENOENT: no such file or directory'), {
code: 'ENOENT',
});
}),
existsSync: vi.fn((path: string) => mockFileSystem.has(path)),
};
const mockGoogleGenAIInstance = {
getGenerativeModel: vi.fn().mockReturnValue(mockModelsInstance),
// Add other methods of GoogleGenAI if they are directly used by GeminiChat constructor or its methods
} as unknown as GoogleGenAI;
vi.mock('@google/genai', async () => {
const actualGenAI =
await vi.importActual<typeof import('@google/genai')>('@google/genai');
return {
...actualGenAI,
GoogleGenAI: vi.fn(() => mockGoogleGenAIInstance), // Mock constructor to return the predefined instance
// If Models is instantiated directly in GeminiChat, mock its constructor too
// For now, assuming Models instance is obtained via getGenerativeModel
default: fsModule,
...fsModule,
};
});
// Mock GeminiClient and Config constructor
vi.mock('../core/baseLlmClient.js');
vi.mock('../config/config.js');
describe('checkNextSpeaker', () => {
let chatInstance: GeminiChat;
let mockGeminiClient: GeminiClient;
let MockConfig: Mock;
let mockConfig: Config;
let mockBaseLlmClient: BaseLlmClient;
const abortSignal = new AbortController().signal;
const promptId = 'test-prompt-id';
beforeEach(() => {
MockConfig = vi.mocked(Config);
const mockConfigInstance = new MockConfig(
'test-api-key',
'gemini-pro',
false,
'.',
false,
undefined,
false,
undefined,
undefined,
undefined,
vi.resetAllMocks();
mockBaseLlmClient = new BaseLlmClient(
{
generateContent: vi.fn(),
generateContentStream: vi.fn(),
countTokens: vi.fn(),
embedContent: vi.fn(),
} as ContentGenerator,
{} as Config,
);
mockGeminiClient = new GeminiClient(mockConfigInstance);
// Add generateJson mock to the client
mockBaseLlmClient.generateJson = vi.fn();
// Reset mocks before each test to ensure test isolation
vi.mocked(mockModelsInstance.generateContent).mockReset();
vi.mocked(mockModelsInstance.generateContentStream).mockReset();
mockConfig = {
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getModel: () => 'test-model',
getBaseLlmClient: vi.fn().mockReturnValue(mockBaseLlmClient),
storage: {
getProjectTempDir: vi.fn().mockReturnValue('/test/temp'),
},
} as unknown as Config;
// GeminiChat will receive the mocked instances via the mocked GoogleGenAI constructor
chatInstance = new GeminiChat(
mockConfigInstance,
mockModelsInstance, // This is the instance returned by mockGoogleGenAIInstance.getGenerativeModel
mockConfig,
{},
[], // initial history
);
@@ -90,31 +89,33 @@ describe('checkNextSpeaker', () => {
});
afterEach(() => {
vi.clearAllMocks();
vi.restoreAllMocks();
});
it('should return null if history is empty', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([]);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled();
});
it('should return null if the last speaker was the user', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([
vi.mocked(chatInstance.getHistory).mockReturnValue([
{ role: 'user', parts: [{ text: 'Hello' }] },
] as Content[]);
]);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled();
});
it("should return { next_speaker: 'model' } when model intends to continue", async () => {
@@ -125,15 +126,16 @@ describe('checkNextSpeaker', () => {
reasoning: 'Model stated it will do something.',
next_speaker: 'model',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toEqual(mockApiResponse);
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1);
});
it("should return { next_speaker: 'user' } when model asks a question", async () => {
@@ -144,12 +146,13 @@ describe('checkNextSpeaker', () => {
reasoning: 'Model asked a question.',
next_speaker: 'user',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toEqual(mockApiResponse);
});
@@ -162,87 +165,92 @@ describe('checkNextSpeaker', () => {
reasoning: 'Model made a statement, awaiting user input.',
next_speaker: 'user',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toEqual(mockApiResponse);
});
it('should return null if geminiClient.generateJson throws an error', async () => {
it('should return null if baseLlmClient.generateJson throws an error', async () => {
const consoleWarnSpy = vi
.spyOn(console, 'warn')
.mockImplementation(() => {});
(chatInstance.getHistory as Mock).mockReturnValue([
{ role: 'model', parts: [{ text: 'Some model output.' }] },
] as Content[]);
(mockGeminiClient.generateJson as Mock).mockRejectedValue(
(mockBaseLlmClient.generateJson as Mock).mockRejectedValue(
new Error('API Error'),
);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
consoleWarnSpy.mockRestore();
});
it('should return null if geminiClient.generateJson returns invalid JSON (missing next_speaker)', async () => {
it('should return null if baseLlmClient.generateJson returns invalid JSON (missing next_speaker)', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([
{ role: 'model', parts: [{ text: 'Some model output.' }] },
] as Content[]);
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
reasoning: 'This is incomplete.',
} as unknown as NextSpeakerResponse); // Type assertion to simulate invalid response
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
});
it('should return null if geminiClient.generateJson returns a non-string next_speaker', async () => {
it('should return null if baseLlmClient.generateJson returns a non-string next_speaker', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([
{ role: 'model', parts: [{ text: 'Some model output.' }] },
] as Content[]);
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
reasoning: 'Model made a statement, awaiting user input.',
next_speaker: 123, // Invalid type
} as unknown as NextSpeakerResponse);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
});
it('should return null if geminiClient.generateJson returns an invalid next_speaker string value', async () => {
it('should return null if baseLlmClient.generateJson returns an invalid next_speaker string value', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([
{ role: 'model', parts: [{ text: 'Some model output.' }] },
] as Content[]);
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
reasoning: 'Model made a statement, awaiting user input.',
next_speaker: 'neither', // Invalid enum value
} as unknown as NextSpeakerResponse);
const result = await checkNextSpeaker(
chatInstance,
mockGeminiClient,
mockConfig,
abortSignal,
promptId,
);
expect(result).toBeNull();
});
it('should call generateJson with DEFAULT_QWEN_FLASH_MODEL', async () => {
it('should call generateJson with the correct parameters', async () => {
(chatInstance.getHistory as Mock).mockReturnValue([
{ role: 'model', parts: [{ text: 'Some model output.' }] },
] as Content[]);
@@ -250,13 +258,14 @@ describe('checkNextSpeaker', () => {
reasoning: 'Model made a statement, awaiting user input.',
next_speaker: 'user',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
await checkNextSpeaker(chatInstance, mockGeminiClient, abortSignal);
await checkNextSpeaker(chatInstance, mockConfig, abortSignal, promptId);
expect(mockGeminiClient.generateJson).toHaveBeenCalled();
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
expect(mockBaseLlmClient.generateJson).toHaveBeenCalled();
const generateJsonCall = (mockBaseLlmClient.generateJson as Mock).mock
.calls[0];
expect(generateJsonCall[3]).toBe(DEFAULT_QWEN_FLASH_MODEL);
expect(generateJsonCall[0].model).toBe('test-model');
expect(generateJsonCall[0].promptId).toBe(promptId);
});
});

View File

@@ -5,10 +5,10 @@
*/
import type { Content } from '@google/genai';
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
import type { GeminiClient } from '../core/client.js';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import type { GeminiChat } from '../core/geminiChat.js';
import { isFunctionResponse } from './messageInspectors.js';
import type { Config } from '../config/config.js';
const CHECK_PROMPT = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
**Decision Rules (apply in order):**
@@ -41,8 +41,9 @@ export interface NextSpeakerResponse {
export async function checkNextSpeaker(
chat: GeminiChat,
geminiClient: GeminiClient,
config: Config,
abortSignal: AbortSignal,
promptId: string,
): Promise<NextSpeakerResponse | null> {
// We need to capture the curated history because there are many moments when the model will return invalid turns
// that when passed back up to the endpoint will break subsequent calls. An example of this is when the model decides
@@ -108,12 +109,13 @@ export async function checkNextSpeaker(
];
try {
const parsedResponse = (await geminiClient.generateJson(
const parsedResponse = (await config.getBaseLlmClient().generateJson({
contents,
RESPONSE_SCHEMA,
schema: RESPONSE_SCHEMA,
model: config.getModel() || DEFAULT_QWEN_MODEL,
abortSignal,
DEFAULT_QWEN_FLASH_MODEL,
)) as unknown as NextSpeakerResponse;
promptId,
})) as unknown as NextSpeakerResponse;
if (
parsedResponse &&

View File

@@ -300,7 +300,7 @@ describe('readPathFromWorkspace', () => {
['ignored.txt'],
{
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
},
);
});

View File

@@ -74,7 +74,7 @@ export async function readPathFromWorkspace(
);
const filteredFiles = fileService.filterFiles(relativeFiles, {
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
});
const finalFiles = filteredFiles.map((p) =>
path.resolve(config.getTargetDir(), p),
@@ -99,7 +99,7 @@ export async function readPathFromWorkspace(
const relativePath = path.relative(config.getTargetDir(), absolutePath);
const filtered = fileService.filterFiles([relativePath], {
respectGitIgnore: true,
respectGeminiIgnore: true,
respectQwenIgnore: true,
});
if (filtered.length === 0) {

View File

@@ -0,0 +1,9 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { AsyncLocalStorage } from 'node:async_hooks';
export const promptIdContext = new AsyncLocalStorage<string>();

View File

@@ -0,0 +1,68 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
import { QwenIgnoreParser } from './qwenIgnoreParser.js';
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import * as os from 'node:os';
describe('QwenIgnoreParser', () => {
let projectRoot: string;
async function createTestFile(filePath: string, content = '') {
const fullPath = path.join(projectRoot, filePath);
await fs.mkdir(path.dirname(fullPath), { recursive: true });
await fs.writeFile(fullPath, content);
}
beforeEach(async () => {
projectRoot = await fs.mkdtemp(path.join(os.tmpdir(), 'qwenignore-test-'));
});
afterEach(async () => {
await fs.rm(projectRoot, { recursive: true, force: true });
vi.restoreAllMocks();
});
describe('when .qwenignore exists', () => {
beforeEach(async () => {
await createTestFile(
'.qwenignore',
'ignored.txt\n# A comment\n/ignored_dir/\n',
);
await createTestFile('ignored.txt', 'ignored');
await createTestFile('not_ignored.txt', 'not ignored');
await createTestFile(
path.join('ignored_dir', 'file.txt'),
'in ignored dir',
);
await createTestFile(
path.join('subdir', 'not_ignored.txt'),
'not ignored',
);
});
it('should ignore files specified in .qwenignore', () => {
const parser = new QwenIgnoreParser(projectRoot);
expect(parser.getPatterns()).toEqual(['ignored.txt', '/ignored_dir/']);
expect(parser.isIgnored('ignored.txt')).toBe(true);
expect(parser.isIgnored('not_ignored.txt')).toBe(false);
expect(parser.isIgnored(path.join('ignored_dir', 'file.txt'))).toBe(true);
expect(parser.isIgnored(path.join('subdir', 'not_ignored.txt'))).toBe(
false,
);
});
});
describe('when .qwenignore does not exist', () => {
it('should not load any patterns and not ignore any files', () => {
const parser = new QwenIgnoreParser(projectRoot);
expect(parser.getPatterns()).toEqual([]);
expect(parser.isIgnored('any_file.txt')).toBe(false);
});
});
});

View File

@@ -0,0 +1,81 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'node:fs';
import * as path from 'node:path';
import ignore from 'ignore';
export interface QwenIgnoreFilter {
isIgnored(filePath: string): boolean;
getPatterns(): string[];
}
export class QwenIgnoreParser implements QwenIgnoreFilter {
private projectRoot: string;
private patterns: string[] = [];
private ig = ignore();
constructor(projectRoot: string) {
this.projectRoot = path.resolve(projectRoot);
this.loadPatterns();
}
private loadPatterns(): void {
const patternsFilePath = path.join(this.projectRoot, '.qwenignore');
let content: string;
try {
content = fs.readFileSync(patternsFilePath, 'utf-8');
} catch (_error) {
// ignore file not found
return;
}
this.patterns = (content ?? '')
.split('\n')
.map((p) => p.trim())
.filter((p) => p !== '' && !p.startsWith('#'));
this.ig.add(this.patterns);
}
isIgnored(filePath: string): boolean {
if (this.patterns.length === 0) {
return false;
}
if (!filePath || typeof filePath !== 'string') {
return false;
}
if (
filePath.startsWith('\\') ||
filePath === '/' ||
filePath.includes('\0')
) {
return false;
}
const resolved = path.resolve(this.projectRoot, filePath);
const relativePath = path.relative(this.projectRoot, resolved);
if (relativePath === '' || relativePath.startsWith('..')) {
return false;
}
// Even in windows, Ignore expects forward slashes.
const normalizedPath = relativePath.replace(/\\/g, '/');
if (normalizedPath.startsWith('/') || normalizedPath === '') {
return false;
}
return this.ig.ignores(normalizedPath);
}
getPatterns(): string[] {
return this.patterns;
}
}

View File

@@ -100,14 +100,49 @@ describe('retryWithBackoff', () => {
expect(mockFn).toHaveBeenCalledTimes(3);
});
it('should default to 5 maxAttempts if no options are provided', async () => {
// This function will fail more than 5 times to ensure all retries are used.
const mockFn = createFailingFunction(10);
const promise = retryWithBackoff(mockFn);
// Expect it to fail with the error from the 5th attempt.
// eslint-disable-next-line vitest/valid-expect
const assertionPromise = expect(promise).rejects.toThrow(
'Simulated error attempt 5',
);
await vi.runAllTimersAsync();
await assertionPromise;
expect(mockFn).toHaveBeenCalledTimes(5);
});
it('should default to 5 maxAttempts if options.maxAttempts is undefined', async () => {
// This function will fail more than 5 times to ensure all retries are used.
const mockFn = createFailingFunction(10);
const promise = retryWithBackoff(mockFn, { maxAttempts: undefined });
// Expect it to fail with the error from the 5th attempt.
// eslint-disable-next-line vitest/valid-expect
const assertionPromise = expect(promise).rejects.toThrow(
'Simulated error attempt 5',
);
await vi.runAllTimersAsync();
await assertionPromise;
expect(mockFn).toHaveBeenCalledTimes(5);
});
it('should not retry if shouldRetry returns false', async () => {
const mockFn = vi.fn(async () => {
throw new NonRetryableError('Non-retryable error');
});
const shouldRetry = (error: Error) => !(error instanceof NonRetryableError);
const shouldRetryOnError = (error: Error) =>
!(error instanceof NonRetryableError);
const promise = retryWithBackoff(mockFn, {
shouldRetry,
shouldRetryOnError,
initialDelayMs: 10,
});
@@ -115,6 +150,18 @@ describe('retryWithBackoff', () => {
expect(mockFn).toHaveBeenCalledTimes(1);
});
it('should throw an error if maxAttempts is not a positive number', async () => {
const mockFn = createFailingFunction(1);
// Test with 0
await expect(retryWithBackoff(mockFn, { maxAttempts: 0 })).rejects.toThrow(
'maxAttempts must be a positive number.',
);
// The function should not be called at all if validation fails
expect(mockFn).not.toHaveBeenCalled();
});
it('should use default shouldRetry if not provided, retrying on 429', async () => {
const mockFn = vi.fn(async () => {
const error = new Error('Too Many Requests') as any;
@@ -419,7 +466,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 100,
maxDelayMs: 1000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -441,7 +487,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 1000,
maxDelayMs: 5000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -462,7 +507,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 1000,
maxDelayMs: 5000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -488,7 +532,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 100,
maxDelayMs: 1000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -514,7 +557,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 100,
maxDelayMs: 1000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -536,7 +578,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 1000,
maxDelayMs: 5000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});
@@ -560,7 +601,6 @@ describe('retryWithBackoff', () => {
maxAttempts: 5,
initialDelayMs: 100,
maxDelayMs: 1000,
shouldRetry: () => true,
authType: AuthType.QWEN_OAUTH,
});

View File

@@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { GenerateContentResponse } from '@google/genai';
import { AuthType } from '../core/contentGenerator.js';
import {
isProQuotaExceededError,
@@ -20,7 +21,8 @@ export interface RetryOptions {
maxAttempts: number;
initialDelayMs: number;
maxDelayMs: number;
shouldRetry: (error: Error) => boolean;
shouldRetryOnError: (error: Error) => boolean;
shouldRetryOnContent?: (content: GenerateContentResponse) => boolean;
onPersistent429?: (
authType?: string,
error?: unknown,
@@ -32,7 +34,7 @@ const DEFAULT_RETRY_OPTIONS: RetryOptions = {
maxAttempts: 5,
initialDelayMs: 5000,
maxDelayMs: 30000, // 30 seconds
shouldRetry: defaultShouldRetry,
shouldRetryOnError: defaultShouldRetry,
};
/**
@@ -76,16 +78,25 @@ export async function retryWithBackoff<T>(
fn: () => Promise<T>,
options?: Partial<RetryOptions>,
): Promise<T> {
if (options?.maxAttempts !== undefined && options.maxAttempts <= 0) {
throw new Error('maxAttempts must be a positive number.');
}
const cleanOptions = options
? Object.fromEntries(Object.entries(options).filter(([_, v]) => v != null))
: {};
const {
maxAttempts,
initialDelayMs,
maxDelayMs,
onPersistent429,
authType,
shouldRetry,
shouldRetryOnError,
shouldRetryOnContent,
} = {
...DEFAULT_RETRY_OPTIONS,
...options,
...cleanOptions,
};
let attempt = 0;
@@ -95,7 +106,20 @@ export async function retryWithBackoff<T>(
while (attempt < maxAttempts) {
attempt++;
try {
return await fn();
const result = await fn();
if (
shouldRetryOnContent &&
shouldRetryOnContent(result as GenerateContentResponse)
) {
const jitter = currentDelay * 0.3 * (Math.random() * 2 - 1);
const delayWithJitter = Math.max(0, currentDelay + jitter);
await delay(delayWithJitter);
currentDelay = Math.min(maxDelayMs, currentDelay * 2);
continue;
}
return result;
} catch (error) {
const errorStatus = getErrorStatus(error);
@@ -199,7 +223,7 @@ export async function retryWithBackoff<T>(
}
// Check if we've exhausted retries or shouldn't retry
if (attempt >= maxAttempts || !shouldRetry(error as Error)) {
if (attempt >= maxAttempts || !shouldRetryOnError(error as Error)) {
throw error;
}

View File

@@ -0,0 +1,125 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect, it } from 'vitest';
import { SchemaValidator } from './schemaValidator.js';
describe('SchemaValidator', () => {
it('should allow any params if schema is undefined', () => {
const params = {
foo: 'bar',
};
expect(SchemaValidator.validate(undefined, params)).toBeNull();
});
it('rejects null params', () => {
const schema = {
type: 'object',
properties: {
foo: {
type: 'string',
},
},
};
expect(SchemaValidator.validate(schema, null)).toBe(
'Value of params must be an object',
);
});
it('rejects params that are not objects', () => {
const schema = {
type: 'object',
properties: {
foo: {
type: 'string',
},
},
};
expect(SchemaValidator.validate(schema, 'not an object')).toBe(
'Value of params must be an object',
);
});
it('allows schema with extra properties', () => {
const schema = {
type: 'object',
properties: {
example_enum: {
type: 'string',
enum: ['FOO', 'BAR'],
// enum-descriptions is not part of the JSON schema spec.
// This test verifies that the SchemaValidator allows the
// use of extra keywords, like this one, in the schema.
'enum-descriptions': ['a foo', 'a bar'],
},
},
};
const params = {
example_enum: 'BAR',
};
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('allows custom format values', () => {
const schema = {
type: 'object',
properties: {
duration: {
type: 'string',
// See: https://cloud.google.com/docs/discovery/type-format
format: 'google-duration',
},
mask: {
type: 'string',
format: 'google-fieldmask',
},
foo: {
type: 'string',
format: 'something-totally-custom',
},
},
};
const params = {
duration: '10s',
mask: 'foo.bar,biz.baz',
foo: 'some value',
};
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('allows valid values for known formats', () => {
const schema = {
type: 'object',
properties: {
today: {
type: 'string',
format: 'date',
},
},
};
const params = {
today: '2025-04-08',
};
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('rejects invalid values for known formats', () => {
const schema = {
type: 'object',
properties: {
today: {
type: 'string',
format: 'date',
},
},
};
const params = {
today: 'this is not a date',
};
expect(SchemaValidator.validate(schema, params)).not.toBeNull();
});
});

View File

@@ -9,7 +9,18 @@ import * as addFormats from 'ajv-formats';
// Ajv's ESM/CJS interop: use 'any' for compatibility as recommended by Ajv docs
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const AjvClass = (AjvPkg as any).default || AjvPkg;
const ajValidator = new AjvClass({ coerceTypes: true });
const ajValidator = new AjvClass(
// See: https://ajv.js.org/options.html#strict-mode-options
{
// strictSchema defaults to true and prevents use of JSON schemas that
// include unrecognized keywords. The JSON schema spec specifically allows
// for the use of non-standard keywords and the spec-compliant behavior
// is to ignore those keywords. Note that setting this to false also
// allows use of non-standard or custom formats (the unknown format value
// will be logged but the schema will still be considered valid).
strictSchema: false,
},
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const addFormatsFunc = (addFormats as any).default || addFormats;
addFormatsFunc(ajValidator);

View File

@@ -149,6 +149,15 @@ describe('isCommandAllowed', () => {
expect(result.reason).toContain('Command substitution');
});
it('should block command substitution using `>(...)`', () => {
const result = isCommandAllowed(
'echo "Log message" > >(tee log.txt)',
config,
);
expect(result.allowed).toBe(false);
expect(result.reason).toContain('Command substitution');
});
it('should block command substitution using backticks', () => {
const result = isCommandAllowed('echo `rm -rf /`', config);
expect(result.allowed).toBe(false);

View File

@@ -10,6 +10,7 @@ import os from 'node:os';
import { quote } from 'shell-quote';
import { doesToolInvocationMatch } from './tool-utils.js';
import { isShellCommandReadOnly } from './shellReadOnlyChecker.js';
import { spawn, type SpawnOptionsWithoutStdio } from 'node:child_process';
const SHELL_TOOL_NAMES = ['run_shell_command', 'ShellTool'];
@@ -266,6 +267,11 @@ export function detectCommandSubstitution(command: string): boolean {
return true;
}
// >(...) process substitution - works unquoted only (not in double quotes)
if (char === '>' && nextChar === '(' && !inDoubleQuotes && !inBackticks) {
return true;
}
// Backtick command substitution - check for opening backtick
// (We track the state above, so this catches the start of backtick substitution)
if (char === '`' && !inBackticks) {
@@ -319,7 +325,7 @@ export function checkCommandPermissions(
allAllowed: false,
disallowedCommands: [command],
blockReason:
'Command substitution using $(), <(), or >() is not allowed for security reasons',
'Command substitution using $(), `` ` ``, <(), or >() is not allowed for security reasons',
isHardDenial: true,
};
}
@@ -459,6 +465,37 @@ export function checkCommandPermissions(
* @param config The application configuration.
* @returns An object with 'allowed' boolean and optional 'reason' string if not allowed.
*/
export const spawnAsync = (
command: string,
args: string[],
options?: SpawnOptionsWithoutStdio,
): Promise<{ stdout: string; stderr: string }> =>
new Promise((resolve, reject) => {
const child = spawn(command, args, options);
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
resolve({ stdout, stderr });
} else {
reject(new Error(`Command failed with exit code ${code}:\n${stderr}`));
}
});
child.on('error', (err) => {
reject(err);
});
});
export function isCommandAllowed(
command: string,
config: Config,

View File

@@ -13,66 +13,29 @@ import {
type Mock,
afterEach,
} from 'vitest';
import type { Content, GoogleGenAI, Models } from '@google/genai';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import { GeminiClient } from '../core/client.js';
import { Config } from '../config/config.js';
import type { BaseLlmClient } from '../core/baseLlmClient.js';
import type { Config } from '../config/config.js';
import {
subagentGenerator,
type SubagentGeneratedContent,
} from './subagentGenerator.js';
// Mock GeminiClient and Config constructor
vi.mock('../core/client.js');
vi.mock('../config/config.js');
// Define mocks for GoogleGenAI and Models instances that will be used across tests
const mockModelsInstance = {
generateContent: vi.fn(),
generateContentStream: vi.fn(),
countTokens: vi.fn(),
embedContent: vi.fn(),
batchEmbedContents: vi.fn(),
} as unknown as Models;
const mockGoogleGenAIInstance = {
getGenerativeModel: vi.fn().mockReturnValue(mockModelsInstance),
} as unknown as GoogleGenAI;
vi.mock('@google/genai', async () => {
const actualGenAI =
await vi.importActual<typeof import('@google/genai')>('@google/genai');
return {
...actualGenAI,
GoogleGenAI: vi.fn(() => mockGoogleGenAIInstance),
};
});
describe('subagentGenerator', () => {
let mockGeminiClient: GeminiClient;
let MockConfig: Mock;
let mockClient: BaseLlmClient;
let mockConfig: Config;
const abortSignal = new AbortController().signal;
beforeEach(() => {
MockConfig = vi.mocked(Config);
const mockConfigInstance = new MockConfig(
'test-api-key',
'gemini-pro',
false,
'.',
false,
undefined,
false,
undefined,
undefined,
undefined,
);
// Create a mock client with generateJson method
mockClient = {
generateJson: vi.fn(),
} as unknown as BaseLlmClient;
mockGeminiClient = new GeminiClient(mockConfigInstance);
// Reset mocks before each test to ensure test isolation
vi.mocked(mockModelsInstance.generateContent).mockReset();
vi.mocked(mockModelsInstance.generateContentStream).mockReset();
// Create a mock config that returns the mock client and model
mockConfig = {
getBaseLlmClient: vi.fn().mockReturnValue(mockClient),
getModel: vi.fn().mockReturnValue('qwen3-coder-plus'),
} as unknown as Config;
});
afterEach(() => {
@@ -81,14 +44,14 @@ describe('subagentGenerator', () => {
it('should throw error for empty user description', async () => {
await expect(
subagentGenerator('', mockGeminiClient, abortSignal),
subagentGenerator('', mockConfig, abortSignal),
).rejects.toThrow('User description cannot be empty');
await expect(
subagentGenerator(' ', mockGeminiClient, abortSignal),
subagentGenerator(' ', mockConfig, abortSignal),
).rejects.toThrow('User description cannot be empty');
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
expect(mockClient.generateJson).not.toHaveBeenCalled();
});
it('should successfully generate content with valid LLM response', async () => {
@@ -101,38 +64,33 @@ describe('subagentGenerator', () => {
'You are a code review expert. Analyze code for best practices, bugs, and improvements.',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
(mockClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
const result = await subagentGenerator(
userDescription,
mockGeminiClient,
mockConfig,
abortSignal,
);
expect(result).toEqual(mockApiResponse);
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
expect(mockClient.generateJson).toHaveBeenCalledTimes(1);
// Verify the call parameters
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
.calls[0];
const contents = generateJsonCall[0] as Content[];
// Verify the call parameters - now it's a single object parameter
const generateJsonCall = (mockClient.generateJson as Mock).mock.calls[0];
const callParams = generateJsonCall[0];
// Should have 1 user message with the query
expect(contents).toHaveLength(1);
expect(contents[0]?.role).toBe('user');
expect(contents[0]?.parts?.[0]?.text).toContain(
// Check the contents
expect(callParams.contents).toHaveLength(1);
expect(callParams.contents[0]?.role).toBe('user');
expect(callParams.contents[0]?.parts?.[0]?.text).toContain(
`Create an agent configuration based on this request: "${userDescription}"`,
);
// Check that system prompt is passed in the config parameter
expect(generateJsonCall[2]).toBe(abortSignal);
expect(generateJsonCall[3]).toBe(DEFAULT_QWEN_MODEL);
expect(generateJsonCall[4]).toEqual(
expect.objectContaining({
systemInstruction: expect.stringContaining(
'You are an elite AI agent architect',
),
}),
// Check other parameters
expect(callParams.abortSignal).toBe(abortSignal);
expect(callParams.model).toBe('qwen3-coder-plus');
expect(callParams.systemInstruction).toContain(
'You are an elite AI agent architect',
);
});
@@ -144,15 +102,13 @@ describe('subagentGenerator', () => {
// Missing systemPrompt
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(
incompleteResponse,
);
(mockClient.generateJson as Mock).mockResolvedValue(incompleteResponse);
await expect(
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
subagentGenerator(userDescription, mockConfig, abortSignal),
).rejects.toThrow('Invalid response from LLM: missing required fields');
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
expect(mockClient.generateJson).toHaveBeenCalledTimes(1);
});
it('should throw error when LLM response has empty fields', async () => {
@@ -163,23 +119,19 @@ describe('subagentGenerator', () => {
systemPrompt: 'You are a database expert.',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(
emptyFieldsResponse,
);
(mockClient.generateJson as Mock).mockResolvedValue(emptyFieldsResponse);
await expect(
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
subagentGenerator(userDescription, mockConfig, abortSignal),
).rejects.toThrow('Invalid response from LLM: missing required fields');
});
it('should throw error when generateJson throws an error', async () => {
const userDescription = 'testing automation';
(mockGeminiClient.generateJson as Mock).mockRejectedValue(
new Error('API Error'),
);
(mockClient.generateJson as Mock).mockRejectedValue(new Error('API Error'));
await expect(
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
subagentGenerator(userDescription, mockConfig, abortSignal),
).rejects.toThrow('API Error');
});
@@ -191,24 +143,24 @@ describe('subagentGenerator', () => {
systemPrompt: 'You are a data analysis expert.',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockResponse);
(mockClient.generateJson as Mock).mockResolvedValue(mockResponse);
await subagentGenerator(userDescription, mockGeminiClient, abortSignal);
await subagentGenerator(userDescription, mockConfig, abortSignal);
expect(mockGeminiClient.generateJson).toHaveBeenCalledWith(
expect.any(Array),
expect(mockClient.generateJson).toHaveBeenCalledWith(
expect.objectContaining({
type: 'object',
properties: expect.objectContaining({
name: expect.objectContaining({ type: 'string' }),
description: expect.objectContaining({ type: 'string' }),
systemPrompt: expect.objectContaining({ type: 'string' }),
model: 'qwen3-coder-plus',
contents: expect.any(Object),
schema: expect.objectContaining({
type: 'object',
properties: expect.objectContaining({
name: expect.objectContaining({ type: 'string' }),
description: expect.objectContaining({ type: 'string' }),
systemPrompt: expect.objectContaining({ type: 'string' }),
}),
required: ['name', 'description', 'systemPrompt'],
}),
required: ['name', 'description', 'systemPrompt'],
}),
abortSignal,
DEFAULT_QWEN_MODEL,
expect.objectContaining({
abortSignal,
systemInstruction: expect.stringContaining(
'You are an elite AI agent architect',
),
@@ -224,47 +176,42 @@ describe('subagentGenerator', () => {
systemPrompt: 'You are an ML expert.',
};
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockResponse);
(mockClient.generateJson as Mock).mockResolvedValue(mockResponse);
await subagentGenerator(userDescription, mockGeminiClient, abortSignal);
await subagentGenerator(userDescription, mockConfig, abortSignal);
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
.calls[0];
const contents = generateJsonCall[0] as Content[];
const generateJsonCall = (mockClient.generateJson as Mock).mock.calls[0];
const callParams = generateJsonCall[0];
// Check user query (only message)
expect(contents).toHaveLength(1);
const userQueryContent = contents[0]?.parts?.[0]?.text;
expect(callParams.contents).toHaveLength(1);
const userQueryContent = callParams.contents[0]?.parts?.[0]?.text;
expect(userQueryContent).toContain(userDescription);
expect(userQueryContent).toContain(
'Create an agent configuration based on this request:',
);
// Check that system prompt is passed in the config parameter
expect(generateJsonCall[4]).toEqual(
expect.objectContaining({
systemInstruction: expect.stringContaining(
'You are an elite AI agent architect',
),
}),
// Check that system prompt is passed correctly
expect(callParams.systemInstruction).toContain(
'You are an elite AI agent architect',
);
});
it('should throw error for null response from generateJson', async () => {
const userDescription = 'security auditing';
(mockGeminiClient.generateJson as Mock).mockResolvedValue(null);
(mockClient.generateJson as Mock).mockResolvedValue(null);
await expect(
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
subagentGenerator(userDescription, mockConfig, abortSignal),
).rejects.toThrow('Invalid response from LLM: missing required fields');
});
it('should throw error for undefined response from generateJson', async () => {
const userDescription = 'api documentation';
(mockGeminiClient.generateJson as Mock).mockResolvedValue(undefined);
(mockClient.generateJson as Mock).mockResolvedValue(undefined);
await expect(
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
subagentGenerator(userDescription, mockConfig, abortSignal),
).rejects.toThrow('Invalid response from LLM: missing required fields');
});
});

View File

@@ -6,7 +6,7 @@
import type { Content } from '@google/genai';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import type { GeminiClient } from '../core/client.js';
import type { Config } from '../config/config.js';
const SYSTEM_PROMPT = `You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability.
@@ -115,7 +115,7 @@ export interface SubagentGeneratedContent {
*/
export async function subagentGenerator(
userDescription: string,
geminiClient: GeminiClient,
config: Config,
abortSignal: AbortSignal,
): Promise<SubagentGeneratedContent> {
if (!userDescription.trim()) {
@@ -125,15 +125,13 @@ export async function subagentGenerator(
const userPrompt = createUserPrompt(userDescription);
const contents: Content[] = [{ role: 'user', parts: [{ text: userPrompt }] }];
const parsedResponse = (await geminiClient.generateJson(
const parsedResponse = (await config.getBaseLlmClient().generateJson({
model: config.getModel() || DEFAULT_QWEN_MODEL,
contents,
RESPONSE_SCHEMA,
schema: RESPONSE_SCHEMA,
abortSignal,
DEFAULT_QWEN_MODEL,
{
systemInstruction: SYSTEM_PROMPT,
},
)) as unknown as SubagentGeneratedContent;
systemInstruction: SYSTEM_PROMPT,
})) as unknown as SubagentGeneratedContent;
if (
!parsedResponse ||

View File

@@ -0,0 +1,197 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { Terminal } from '@xterm/headless';
import {
serializeTerminalToObject,
convertColorToHex,
ColorMode,
} from './terminalSerializer.js';
const RED_FG = '\x1b[31m';
const RESET = '\x1b[0m';
function writeToTerminal(terminal: Terminal, data: string): Promise<void> {
return new Promise((resolve) => {
terminal.write(data, resolve);
});
}
describe('terminalSerializer', () => {
describe('serializeTerminalToObject', () => {
it('should handle an empty terminal', () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
const result = serializeTerminalToObject(terminal);
expect(result).toHaveLength(24);
result.forEach((line) => {
// Expect each line to be either empty or contain a single token with spaces
if (line.length > 0) {
expect(line[0].text.trim()).toBe('');
}
});
});
it('should serialize a single line of text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, 'Hello, world!');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].text).toContain('Hello, world!');
});
it('should serialize multiple lines of text', async () => {
const terminal = new Terminal({
cols: 7,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, 'Line 1\r\nLine 2');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].text).toBe('Line 1 ');
expect(result[1][0].text).toBe('Line 2');
});
it('should handle bold text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[1mBold text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].bold).toBe(true);
expect(result[0][0].text).toBe('Bold text');
});
it('should handle italic text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[3mItalic text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].italic).toBe(true);
expect(result[0][0].text).toBe('Italic text');
});
it('should handle underlined text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[4mUnderlined text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].underline).toBe(true);
expect(result[0][0].text).toBe('Underlined text');
});
it('should handle dim text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[2mDim text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].dim).toBe(true);
expect(result[0][0].text).toBe('Dim text');
});
it('should handle inverse text', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[7mInverse text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].inverse).toBe(true);
expect(result[0][0].text).toBe('Inverse text');
});
it('should handle foreground colors', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, `${RED_FG}Red text${RESET}`);
const result = serializeTerminalToObject(terminal);
expect(result[0][0].fg).toBe('#800000');
expect(result[0][0].text).toBe('Red text');
});
it('should handle background colors', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[42mGreen background\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].bg).toBe('#008000');
expect(result[0][0].text).toBe('Green background');
});
it('should handle RGB colors', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[38;2;100;200;50mRGB text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].fg).toBe('#64c832');
expect(result[0][0].text).toBe('RGB text');
});
it('should handle a combination of styles', async () => {
const terminal = new Terminal({
cols: 80,
rows: 24,
allowProposedApi: true,
});
await writeToTerminal(terminal, '\x1b[1;31;42mStyled text\x1b[0m');
const result = serializeTerminalToObject(terminal);
expect(result[0][0].bold).toBe(true);
expect(result[0][0].fg).toBe('#800000');
expect(result[0][0].bg).toBe('#008000');
expect(result[0][0].text).toBe('Styled text');
});
});
describe('convertColorToHex', () => {
it('should convert RGB color to hex', () => {
const color = (100 << 16) | (200 << 8) | 50;
const hex = convertColorToHex(color, ColorMode.RGB, '#000000');
expect(hex).toBe('#64c832');
});
it('should convert palette color to hex', () => {
const hex = convertColorToHex(1, ColorMode.PALETTE, '#000000');
expect(hex).toBe('#800000');
});
it('should return default color for ColorMode.DEFAULT', () => {
const hex = convertColorToHex(0, ColorMode.DEFAULT, '#ffffff');
expect(hex).toBe('#ffffff');
});
it('should return default color for invalid palette index', () => {
const hex = convertColorToHex(999, ColorMode.PALETTE, '#000000');
expect(hex).toBe('#000000');
});
});
});

View File

@@ -0,0 +1,476 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { IBufferCell, Terminal } from '@xterm/headless';
export interface AnsiToken {
text: string;
bold: boolean;
italic: boolean;
underline: boolean;
dim: boolean;
inverse: boolean;
fg: string;
bg: string;
}
export type AnsiLine = AnsiToken[];
export type AnsiOutput = AnsiLine[];
const enum Attribute {
inverse = 1,
bold = 2,
italic = 4,
underline = 8,
dim = 16,
}
export const enum ColorMode {
DEFAULT = 0,
PALETTE = 1,
RGB = 2,
}
class Cell {
private readonly cell: IBufferCell | null;
private readonly x: number;
private readonly y: number;
private readonly cursorX: number;
private readonly cursorY: number;
private readonly attributes: number = 0;
fg = 0;
bg = 0;
fgColorMode: ColorMode = ColorMode.DEFAULT;
bgColorMode: ColorMode = ColorMode.DEFAULT;
constructor(
cell: IBufferCell | null,
x: number,
y: number,
cursorX: number,
cursorY: number,
) {
this.cell = cell;
this.x = x;
this.y = y;
this.cursorX = cursorX;
this.cursorY = cursorY;
if (!cell) {
return;
}
if (cell.isInverse()) {
this.attributes += Attribute.inverse;
}
if (cell.isBold()) {
this.attributes += Attribute.bold;
}
if (cell.isItalic()) {
this.attributes += Attribute.italic;
}
if (cell.isUnderline()) {
this.attributes += Attribute.underline;
}
if (cell.isDim()) {
this.attributes += Attribute.dim;
}
if (cell.isFgRGB()) {
this.fgColorMode = ColorMode.RGB;
} else if (cell.isFgPalette()) {
this.fgColorMode = ColorMode.PALETTE;
} else {
this.fgColorMode = ColorMode.DEFAULT;
}
if (cell.isBgRGB()) {
this.bgColorMode = ColorMode.RGB;
} else if (cell.isBgPalette()) {
this.bgColorMode = ColorMode.PALETTE;
} else {
this.bgColorMode = ColorMode.DEFAULT;
}
if (this.fgColorMode === ColorMode.DEFAULT) {
this.fg = -1;
} else {
this.fg = cell.getFgColor();
}
if (this.bgColorMode === ColorMode.DEFAULT) {
this.bg = -1;
} else {
this.bg = cell.getBgColor();
}
}
isCursor(): boolean {
return this.x === this.cursorX && this.y === this.cursorY;
}
getChars(): string {
return this.cell?.getChars() || ' ';
}
isAttribute(attribute: Attribute): boolean {
return (this.attributes & attribute) !== 0;
}
equals(other: Cell): boolean {
return (
this.attributes === other.attributes &&
this.fg === other.fg &&
this.bg === other.bg &&
this.fgColorMode === other.fgColorMode &&
this.bgColorMode === other.bgColorMode &&
this.isCursor() === other.isCursor()
);
}
}
export function serializeTerminalToObject(terminal: Terminal): AnsiOutput {
const buffer = terminal.buffer.active;
const cursorX = buffer.cursorX;
const cursorY = buffer.cursorY;
const defaultFg = '';
const defaultBg = '';
const result: AnsiOutput = [];
for (let y = 0; y < terminal.rows; y++) {
const line = buffer.getLine(buffer.viewportY + y);
const currentLine: AnsiLine = [];
if (!line) {
result.push(currentLine);
continue;
}
let lastCell = new Cell(null, -1, -1, cursorX, cursorY);
let currentText = '';
for (let x = 0; x < terminal.cols; x++) {
const cellData = line.getCell(x);
const cell = new Cell(cellData || null, x, y, cursorX, cursorY);
if (x > 0 && !cell.equals(lastCell)) {
if (currentText) {
const token: AnsiToken = {
text: currentText,
bold: lastCell.isAttribute(Attribute.bold),
italic: lastCell.isAttribute(Attribute.italic),
underline: lastCell.isAttribute(Attribute.underline),
dim: lastCell.isAttribute(Attribute.dim),
inverse:
lastCell.isAttribute(Attribute.inverse) || lastCell.isCursor(),
fg: convertColorToHex(lastCell.fg, lastCell.fgColorMode, defaultFg),
bg: convertColorToHex(lastCell.bg, lastCell.bgColorMode, defaultBg),
};
currentLine.push(token);
}
currentText = '';
}
currentText += cell.getChars();
lastCell = cell;
}
if (currentText) {
const token: AnsiToken = {
text: currentText,
bold: lastCell.isAttribute(Attribute.bold),
italic: lastCell.isAttribute(Attribute.italic),
underline: lastCell.isAttribute(Attribute.underline),
dim: lastCell.isAttribute(Attribute.dim),
inverse: lastCell.isAttribute(Attribute.inverse) || lastCell.isCursor(),
fg: convertColorToHex(lastCell.fg, lastCell.fgColorMode, defaultFg),
bg: convertColorToHex(lastCell.bg, lastCell.bgColorMode, defaultBg),
};
currentLine.push(token);
}
result.push(currentLine);
}
return result;
}
// ANSI color palette from https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
const ANSI_COLORS = [
'#000000',
'#800000',
'#008000',
'#808000',
'#000080',
'#800080',
'#008080',
'#c0c0c0',
'#808080',
'#ff0000',
'#00ff00',
'#ffff00',
'#0000ff',
'#ff00ff',
'#00ffff',
'#ffffff',
'#000000',
'#00005f',
'#000087',
'#0000af',
'#0000d7',
'#0000ff',
'#005f00',
'#005f5f',
'#005f87',
'#005faf',
'#005fd7',
'#005fff',
'#008700',
'#00875f',
'#008787',
'#0087af',
'#0087d7',
'#0087ff',
'#00af00',
'#00af5f',
'#00af87',
'#00afaf',
'#00afd7',
'#00afff',
'#00d700',
'#00d75f',
'#00d787',
'#00d7af',
'#00d7d7',
'#00d7ff',
'#00ff00',
'#00ff5f',
'#00ff87',
'#00ffaf',
'#00ffd7',
'#00ffff',
'#5f0000',
'#5f005f',
'#5f0087',
'#5f00af',
'#5f00d7',
'#5f00ff',
'#5f5f00',
'#5f5f5f',
'#5f5f87',
'#5f5faf',
'#5f5fd7',
'#5f5fff',
'#5f8700',
'#5f875f',
'#5f8787',
'#5f87af',
'#5f87d7',
'#5f87ff',
'#5faf00',
'#5faf5f',
'#5faf87',
'#5fafaf',
'#5fafd7',
'#5fafff',
'#5fd700',
'#5fd75f',
'#5fd787',
'#5fd7af',
'#5fd7d7',
'#5fd7ff',
'#5fff00',
'#5fff5f',
'#5fff87',
'#5fffaf',
'#5fffd7',
'#5fffff',
'#870000',
'#87005f',
'#870087',
'#8700af',
'#8700d7',
'#8700ff',
'#875f00',
'#875f5f',
'#875f87',
'#875faf',
'#875fd7',
'#875fff',
'#878700',
'#87875f',
'#878787',
'#8787af',
'#8787d7',
'#8787ff',
'#87af00',
'#87af5f',
'#87af87',
'#87afaf',
'#87afd7',
'#87afff',
'#87d700',
'#87d75f',
'#87d787',
'#87d7af',
'#87d7d7',
'#87d7ff',
'#87ff00',
'#87ff5f',
'#87ff87',
'#87ffaf',
'#87ffd7',
'#87ffff',
'#af0000',
'#af005f',
'#af0087',
'#af00af',
'#af00d7',
'#af00ff',
'#af5f00',
'#af5f5f',
'#af5f87',
'#af5faf',
'#af5fd7',
'#af5fff',
'#af8700',
'#af875f',
'#af8787',
'#af87af',
'#af87d7',
'#af87ff',
'#afaf00',
'#afaf5f',
'#afaf87',
'#afafaf',
'#afafd7',
'#afafff',
'#afd700',
'#afd75f',
'#afd787',
'#afd7af',
'#afd7d7',
'#afd7ff',
'#afff00',
'#afff5f',
'#afff87',
'#afffaf',
'#afffd7',
'#afffff',
'#d70000',
'#d7005f',
'#d70087',
'#d700af',
'#d700d7',
'#d700ff',
'#d75f00',
'#d75f5f',
'#d75f87',
'#d75faf',
'#d75fd7',
'#d75fff',
'#d78700',
'#d7875f',
'#d78787',
'#d787af',
'#d787d7',
'#d787ff',
'#d7af00',
'#d7af5f',
'#d7af87',
'#d7afaf',
'#d7afd7',
'#d7afff',
'#d7d700',
'#d7d75f',
'#d7d787',
'#d7d7af',
'#d7d7d7',
'#d7d7ff',
'#d7ff00',
'#d7ff5f',
'#d7ff87',
'#d7ffaf',
'#d7ffd7',
'#d7ffff',
'#ff0000',
'#ff005f',
'#ff0087',
'#ff00af',
'#ff00d7',
'#ff00ff',
'#ff5f00',
'#ff5f5f',
'#ff5f87',
'#ff5faf',
'#ff5fd7',
'#ff5fff',
'#ff8700',
'#ff875f',
'#ff8787',
'#ff87af',
'#ff87d7',
'#ff87ff',
'#ffaf00',
'#ffaf5f',
'#ffaf87',
'#ffafaf',
'#ffafd7',
'#ffafff',
'#ffd700',
'#ffd75f',
'#ffd787',
'#ffd7af',
'#ffd7d7',
'#ffd7ff',
'#ffff00',
'#ffff5f',
'#ffff87',
'#ffffaf',
'#ffffd7',
'#ffffff',
'#080808',
'#121212',
'#1c1c1c',
'#262626',
'#303030',
'#3a3a3a',
'#444444',
'#4e4e4e',
'#585858',
'#626262',
'#6c6c6c',
'#767676',
'#808080',
'#8a8a8a',
'#949494',
'#9e9e9e',
'#a8a8a8',
'#b2b2b2',
'#bcbcbc',
'#c6c6c6',
'#d0d0d0',
'#dadada',
'#e4e4e4',
'#eeeeee',
];
export function convertColorToHex(
color: number,
colorMode: ColorMode,
defaultColor: string,
): string {
if (colorMode === ColorMode.RGB) {
const r = (color >> 16) & 255;
const g = (color >> 8) & 255;
const b = color & 255;
return `#${r.toString(16).padStart(2, '0')}${g
.toString(16)
.padStart(2, '0')}${b.toString(16).padStart(2, '0')}`;
}
if (colorMode === ColorMode.PALETTE) {
return ANSI_COLORS[color] || defaultColor;
}
return defaultColor;
}

View File

@@ -0,0 +1,79 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { safeLiteralReplace } from './textUtils.js';
describe('safeLiteralReplace', () => {
it('returns original string when oldString empty or not found', () => {
expect(safeLiteralReplace('abc', '', 'X')).toBe('abc');
expect(safeLiteralReplace('abc', 'z', 'X')).toBe('abc');
});
it('fast path when newString has no $', () => {
expect(safeLiteralReplace('abc', 'b', 'X')).toBe('aXc');
});
it('treats $ literally', () => {
expect(safeLiteralReplace('foo', 'foo', "bar$'baz")).toBe("bar$'baz");
});
it("does not interpret replacement patterns like $&, $', $` and $1", () => {
expect(safeLiteralReplace('hello', 'hello', '$&-replacement')).toBe(
'$&-replacement',
);
expect(safeLiteralReplace('mid', 'mid', 'new$`content')).toBe(
'new$`content',
);
expect(safeLiteralReplace('test', 'test', '$1$2value')).toBe('$1$2value');
});
it('preserves end-of-line $ in regex-like text', () => {
const current = "| select('match', '^[sv]d[a-z]$')";
const oldStr = "'^[sv]d[a-z]$'";
const newStr = "'^[sv]d[a-z]$' # updated";
const expected = "| select('match', '^[sv]d[a-z]$' # updated)";
expect(safeLiteralReplace(current, oldStr, newStr)).toBe(expected);
});
it('handles multiple $ characters', () => {
expect(safeLiteralReplace('x', 'x', '$$$')).toBe('$$$');
});
it('preserves pre-escaped $$ literally', () => {
expect(safeLiteralReplace('x', 'x', '$$value')).toBe('$$value');
});
it('handles complex malicious patterns from PR #7871', () => {
const original = 'The price is PRICE.';
const result = safeLiteralReplace(
original,
'PRICE',
"$& Wow, that's a lot! $'",
);
expect(result).toBe("The price is $& Wow, that's a lot! $'.");
});
it('handles multiple replacements correctly', () => {
const text = 'Replace FOO and FOO again';
const result = safeLiteralReplace(text, 'FOO', '$100');
expect(result).toBe('Replace $100 and $100 again');
});
it('preserves $ at different positions', () => {
expect(safeLiteralReplace('test', 'test', '$')).toBe('$');
expect(safeLiteralReplace('test', 'test', 'prefix$')).toBe('prefix$');
expect(safeLiteralReplace('test', 'test', '$suffix')).toBe('$suffix');
});
it('handles edge case with $$$$', () => {
expect(safeLiteralReplace('x', 'x', '$$$$')).toBe('$$$$');
});
it('handles newString with only dollar signs', () => {
expect(safeLiteralReplace('abc', 'b', '$$')).toBe('a$$c');
});
});

View File

@@ -4,6 +4,27 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Safely replaces text with literal strings, avoiding ECMAScript GetSubstitution issues.
* Escapes $ characters to prevent template interpretation.
*/
export function safeLiteralReplace(
str: string,
oldString: string,
newString: string,
): string {
if (oldString === '' || !str.includes(oldString)) {
return str;
}
if (!newString.includes('$')) {
return str.replaceAll(oldString, newString);
}
const escapedNewString = newString.replaceAll('$', '$$$$');
return str.replaceAll(oldString, escapedNewString);
}
/**
* Checks if a Buffer is likely binary by testing for the presence of a NULL byte.
* The presence of a NULL byte is a strong indicator that the data is not plain text.

View File

@@ -0,0 +1,80 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { parseThought } from './thoughtUtils.js';
describe('parseThought', () => {
it.each([
{
name: 'a standard thought with subject and description',
rawText: '**Subject:** This is the description.',
expected: {
subject: 'Subject:',
description: 'This is the description.',
},
},
{
name: 'leading and trailing whitespace in the raw string',
rawText: ' **Subject** description with spaces ',
expected: { subject: 'Subject', description: 'description with spaces' },
},
{
name: 'whitespace surrounding the subject content',
rawText: '** Subject **',
expected: { subject: 'Subject', description: '' },
},
{
name: 'a thought with only a subject',
rawText: '**Only Subject**',
expected: { subject: 'Only Subject', description: '' },
},
{
name: 'a thought with only a description (no subject)',
rawText: 'This is just a description.',
expected: { subject: '', description: 'This is just a description.' },
},
{
name: 'an empty string input',
rawText: '',
expected: { subject: '', description: '' },
},
{
name: 'newlines within the subject and description',
rawText:
'**Multi-line\nSubject**\nHere is a description\nspread across lines.',
expected: {
subject: 'Multi-line\nSubject',
description: 'Here is a description\nspread across lines.',
},
},
{
name: 'only the first subject if multiple are present',
rawText: '**First** some text **Second**',
expected: { subject: 'First', description: 'some text **Second**' },
},
{
name: 'text before and after the subject',
rawText: 'Prefix text **Subject** Suffix text.',
expected: {
subject: 'Subject',
description: 'Prefix text Suffix text.',
},
},
{
name: 'an unclosed subject tag',
rawText: 'Text with **an unclosed subject',
expected: { subject: '', description: 'Text with **an unclosed subject' },
},
{
name: 'an empty subject tag',
rawText: 'A thought with **** in the middle.',
expected: { subject: '', description: 'A thought with in the middle.' },
},
])('should correctly parse $name', ({ rawText, expected }) => {
expect(parseThought(rawText)).toEqual(expected);
});
});

View File

@@ -0,0 +1,54 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export type ThoughtSummary = {
subject: string;
description: string;
};
const START_DELIMITER = '**';
const END_DELIMITER = '**';
/**
* Parses a raw thought string into a structured ThoughtSummary object.
*
* Thoughts are expected to have a bold "subject" part enclosed in double
* asterisks (e.g., **Subject**). The rest of the string is considered
* the description. This function only parses the first valid subject found.
*
* @param rawText The raw text of the thought.
* @returns A ThoughtSummary object. If no valid subject is found, the entire
* string is treated as the description.
*/
export function parseThought(rawText: string): ThoughtSummary {
const startIndex = rawText.indexOf(START_DELIMITER);
if (startIndex === -1) {
// No start delimiter found, the whole text is the description.
return { subject: '', description: rawText.trim() };
}
const endIndex = rawText.indexOf(
END_DELIMITER,
startIndex + START_DELIMITER.length,
);
if (endIndex === -1) {
// Start delimiter found but no end delimiter, so it's not a valid subject.
// Treat the entire string as the description.
return { subject: '', description: rawText.trim() };
}
const subject = rawText
.substring(startIndex + START_DELIMITER.length, endIndex)
.trim();
// The description is everything before the start delimiter and after the end delimiter.
const description = (
rawText.substring(0, startIndex) +
rawText.substring(endIndex + END_DELIMITER.length)
).trim();
return { subject, description };
}